hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
215cc143a84faf168fe37e6b734e3e34edcca820.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r = blockIdx.y*blockDim.y + threadIdx.y; if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist); d_out[i].x = intensity; d_out[i].y = intensity; d_out[i].z = 0; d_out[i].w = 255; } /* Note that there is no host array, computation and display are both handled from the device No hipMalloc either, the render function in main.cpp declares a pointer d_out that gets its value from CudaGraphicsResourceGetMappedPointer() and provides the CUDA pointer to the memory allocated in the pixel buffer */ void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); }
215cc143a84faf168fe37e6b734e3e34edcca820.cu
#include "kernel.h" #define TX 32 #define TY 32 __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r = blockIdx.y*blockDim.y + threadIdx.y; if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist); d_out[i].x = intensity; d_out[i].y = intensity; d_out[i].z = 0; d_out[i].w = 255; } /* Note that there is no host array, computation and display are both handled from the device No cudaMalloc either, the render function in main.cpp declares a pointer d_out that gets its value from CudaGraphicsResourceGetMappedPointer() and provides the CUDA pointer to the memory allocated in the pixel buffer */ void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); }
f7bc02703723d137febc380310fa08a6ee82875c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "tfcc_cudacellinterface.h" #include <type_traits> #include "exceptions/tfcc_cudaruntimeerror.h" #include "exceptions/tfcc_cudnnruntimeerror.h" #include "exceptions/tfcc_invalidargumenterror.h" #include "exceptions/tfcc_notimplementederror.h" #include "framework/tfcc_cudasession.h" #include "framework/tfcc_session.h" #include "framework/tfcc_types.h" namespace tfcc { // cuda functionsz template <class T> static inline T __device__ sigmoid(const T value) { return static_cast<T>(1) / (static_cast<T>(1) + exp(-value)); } template <class T> static void __global__ _cuda_process_lstm_cell( unsigned batch, unsigned units, const T* stateC, const T* inputValue, const T* stateHValue, const T* bias, T forget, T* result, T* resultState) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; const unsigned total = batch * units; for (unsigned i = tid; i < total; i += skip) { unsigned row = i / units; unsigned col = i % units; const T* currentInputValue = inputValue + row * units * 4; const T* currentStateHValue = stateHValue + row * units * 4; T xi = currentInputValue[col] + currentStateHValue[col] + bias[col]; T xc = currentInputValue[col + units] + currentStateHValue[col + units] + bias[col + units]; T xf = currentInputValue[col + units * 2] + currentStateHValue[col + units * 2] + bias[col + units * 2]; T xo = currentInputValue[col + units * 3] + currentStateHValue[col + units * 3] + bias[col + units * 3]; xi = sigmoid(xi); xc = tanh(xc); xf = sigmoid(xf + forget); xo = sigmoid(xo); T cs = xc * xi + stateC[i] * xf; T rs = tanh(cs) * xo; result[i] = rs; resultState[i] = cs; } } template <class T> static void __global__ _cuda_process_gru_cell_gates( unsigned batch, unsigned units, unsigned inputSize, const T* state, const T* inputs, const T* value, const T* bias, T* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; const unsigned total = batch * units; for (unsigned i = tid; i < total; i += skip) { unsigned row = i / units; unsigned col = i % units; T r = sigmoid(value[row * units * 2 + col] + bias[col]); T s = r * state[i]; result[row * (inputSize + units) + col + inputSize] = s; } const unsigned inputTotal = batch * inputSize; for (unsigned i = tid; i < inputTotal; i += skip) { unsigned row = i / inputSize; unsigned col = i % inputSize; result[row * (inputSize + units) + col] = inputs[i]; } } template <class T> static void __global__ _cuda_process_gru_cell_candidate( unsigned batch, unsigned units, const T* state, const T* value, const T* bias, const T* cValue, const T* cBias, T* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; const unsigned total = batch * units; for (unsigned i = tid; i < total; i += skip) { unsigned row = i / units; unsigned col = i % units; T r = sigmoid(value[row * units * 2 + col] + bias[col]); T u = sigmoid(value[row * units * 2 + col + units] + bias[col + units]); T c = tanh(cValue[i] + cBias[col]); result[i] = u * state[i] + (static_cast<T>(1) - u) * c; } } template <class T> static void __global__ _cuda_process_pytorch_gru_cell( unsigned batch, unsigned units, const T* state, const T* inputValue, const T* stateValue, const T* gateBias, const T* candidateIBias, const T* candidateHBias, T* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; const unsigned total = batch * units; for (unsigned i = tid; i < total; i += skip) { unsigned row = i / units; unsigned col = i % units; const T* currentInputValue = inputValue + row * units * 3; const T* currentStateValue = stateValue + row * units * 3; T r = currentInputValue[col] + currentStateValue[col] + gateBias[col]; r = sigmoid(r); T z = currentInputValue[col + units] + currentStateValue[col + units] + gateBias[col + units]; z = sigmoid(z); T ni = currentInputValue[col + units * 2] + candidateIBias[col]; T nh = currentStateValue[col + units * 2] + candidateHBias[col]; T n = tanh(ni + r * nh); result[i] = n + z * (state[i] - n); } } // helper functions template <class T> static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, std::tuple<Variable<T>, Variable<T>>>::type _process_lstm_cell_helper( const Tensor<T>& stateC, const Tensor<T>& inputValue, const Tensor<T>& stateHValue, const Tensor<T>& bias, T forget, size_t blockCount, size_t threadCount) { unsigned batch = stateC.shape(0); unsigned units = stateC.shape(1); Variable<T> result(stateC.shape()); Variable<T> resultState(stateC.shape()); tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault()); hipLaunchKernelGGL(( _cuda_process_lstm_cell), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), batch, units, stateC.data(), inputValue.data(), stateHValue.data(), bias.data(), forget, result.data(), resultState.data()); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return std::make_tuple(std::move(result), std::move(resultState)); } template <class T, class ST> static inline std::tuple<Variable<T>, Variable<T>> _process_lstm_cell_helper( const Tensor<T>& stateC, const Tensor<T>& inputValue, const Tensor<T>& stateHValue, const Tensor<T>& bias, T forget, ST blockCount, ST threadCount) { throw NotImplementedError(); } template <class T> static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type _process_gru_cell_gates_helper( const Tensor<T>& state, const Tensor<T>& inputs, const Tensor<T>& value, const Tensor<T>& bias, size_t blockCount, size_t threadCount) { unsigned batch = state.shape(0); unsigned units = state.shape(1); unsigned inputSize = inputs.shape(1); Variable<T> result({batch, inputSize + units}); tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault()); hipLaunchKernelGGL(( _cuda_process_gru_cell_gates), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), batch, units, inputSize, state.data(), inputs.data(), value.data(), bias.data(), result.data()); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return result; } template <class T, class ST> static inline Variable<T> _process_gru_cell_gates_helper( const Tensor<T>& state, const Tensor<T>& inputs, const Tensor<T>& value, const Tensor<T>& bias, ST blockCount, ST threadCount) { throw NotImplementedError(); } template <class T> static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type _process_gru_cell_candidate_helper( const Tensor<T>& state, const Tensor<T>& value, const Tensor<T>& bias, const Tensor<T>& cValue, const Tensor<T>& cBias, size_t blockCount, size_t threadCount) { unsigned batch = state.shape(0); unsigned units = state.shape(1); Variable<T> result({batch, units}); tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault()); hipLaunchKernelGGL(( _cuda_process_gru_cell_candidate), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), batch, units, state.data(), value.data(), bias.data(), cValue.data(), cBias.data(), result.data()); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return result; } template <class T, class ST> static inline Variable<T> _process_gru_cell_candidate_helper( const Tensor<T>& state, const Tensor<T>& value, const Tensor<T>& bias, const Tensor<T>& cValue, const Tensor<T>& cBias, ST blockCount, ST threadCount) { throw NotImplementedError(); } template <class T> static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type _process_pytorch_gru_cell_helper( const Tensor<T>& state, const Tensor<T>& inputValue, const Tensor<T>& stateValue, const Tensor<T>& gateBias, const Tensor<T>& candidateIBias, const Tensor<T>& candidateHBias, size_t blockCount, size_t threadCount) { unsigned batch = state.shape(0); unsigned units = state.shape(1); Variable<T> result(state.shape()); tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault()); hipLaunchKernelGGL(( _cuda_process_pytorch_gru_cell), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), batch, units, state.data(), inputValue.data(), stateValue.data(), gateBias.data(), candidateIBias.data(), candidateHBias.data(), result.data()); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return result; } template <class T, class ST> static inline Variable<T> _process_pytorch_gru_cell_helper( const Tensor<T>& state, const Tensor<T>& inputValue, const Tensor<T>& stateValue, const Tensor<T>& gateBias, const Tensor<T>& candidateIBias, const Tensor<T>& candidateHBias, ST blockCount, ST threadCount) { throw NotImplementedError(); } //class function template <class T> CUDACellInterface<T>::CUDACellInterface(const CUDADeviceProperty& property) : _property(property) { } template <class T> CUDACellInterface<T>::~CUDACellInterface() { } template <class T> std::tuple<Variable<T>, Variable<T>> CUDACellInterface<T>::processLSTMCell( const Tensor<T>& stateC, const Tensor<T>& inputValue, const Tensor<T>& stateHValue, const Tensor<T>& bias, T forget) { if (stateC.shape().size() != 2) throw InvalidArgumentError("invalid stateC"); unsigned batch = stateC.shape(0); unsigned units = stateC.shape(1); if (inputValue.shape().size() != 2 || inputValue.shape(0) != batch || inputValue.shape(1) != units * 4) throw InvalidArgumentError("invalid inputValue"); if (stateHValue.shape().size() != 2 || stateHValue.shape(0) != batch || stateHValue.shape(1) != units * 4) throw InvalidArgumentError("invalid stateHValue"); if (bias.shape().size() != 1 || bias.shape(0) != units * 4) throw InvalidArgumentError("invalid bias"); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(batch * units); return _process_lstm_cell_helper(stateC, inputValue, stateHValue, bias, forget, blockCount, threadCount); } template <class T> Variable<T> CUDACellInterface<T>::processGRUCellGates( const Tensor<T>& state, const Tensor<T>& inputs, const Tensor<T>& value, const Tensor<T>& bias) { if (state.shape().size() != 2) throw InvalidArgumentError("invalid state"); unsigned batch = state.shape(0); unsigned units = state.shape(1); if (inputs.shape().size() != 2 || inputs.shape(0) != batch) throw InvalidArgumentError("invalid inputs"); unsigned inputSize = inputs.shape(1); if (value.shape().size() != 2 || value.shape(0) != batch || value.shape(1) != units * 2) throw InvalidArgumentError("invalid value"); if (bias.shape().size() != 1 || bias.shape(0) != units * 2) throw InvalidArgumentError("invalid bias"); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(batch * units); return _process_gru_cell_gates_helper(state, inputs, value, bias, blockCount, threadCount); } template <class T> Variable<T> CUDACellInterface<T>::processGRUCellCandidate( const Tensor<T>& state, const Tensor<T>& value, const Tensor<T>& bias, const Tensor<T>& cValue, const Tensor<T>& cBias) { if (state.shape().size() != 2) throw InvalidArgumentError("invalid state"); unsigned batch = state.shape(0); unsigned units = state.shape(1); if (value.shape().size() != 2 || value.shape(0) != batch || value.shape(1) != units * 2) throw InvalidArgumentError("invalid value"); if (bias.shape().size() != 1 || bias.shape(0) != units * 2) throw InvalidArgumentError("invalid bias"); if (cValue.shape().size() != 2 || cValue.shape(0) != batch || cValue.shape(1) != units) throw InvalidArgumentError("invalid cValue"); if (cBias.shape().size() != 1 || cBias.shape(0) != units) throw InvalidArgumentError("invalid cBias"); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(batch * units); return _process_gru_cell_candidate_helper(state, value, bias, cValue, cBias, blockCount, threadCount); } template <class T> Variable<T> CUDACellInterface<T>::processPyTorchGRUCell( const Tensor<T>& state, const Tensor<T>& inputValue, const Tensor<T>& stateValue, const Tensor<T>& gateBias, const Tensor<T>& candidateIBias, const Tensor<T>& candidateHBias) { if (state.shape().size() != 2) throw InvalidArgumentError("invalid state"); unsigned batch = state.shape(0); unsigned units = state.shape(1); if (inputValue.shape().size() != 2 || inputValue.shape(0) != batch || inputValue.shape(1) != units * 3) throw InvalidArgumentError("invalid inputValue"); if (stateValue.shape().size() != 2 || stateValue.shape(0) != batch || stateValue.shape(1) != units * 3) throw InvalidArgumentError("invalid stateValue"); if (gateBias.shape().size() != 1 || gateBias.shape(0) != units * 2) throw InvalidArgumentError("invalid gateBias"); if (candidateIBias.shape().size() != 1 || candidateIBias.shape(0) != units) throw InvalidArgumentError("invalid candidateIBias"); if (candidateHBias.shape().size() != 1 || candidateHBias.shape(0) != units) throw InvalidArgumentError("invalid candidateHBias"); Variable<T> result(state.shape()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(batch * units); return _process_pytorch_gru_cell_helper(state, inputValue, stateValue, gateBias, candidateIBias, candidateHBias, blockCount, threadCount); } #define DEFINE_FUNC(type) template class CUDACellInterface<type>; TFCC_FOR_ALL_TYPES(DEFINE_FUNC); } // namespace tfcc
f7bc02703723d137febc380310fa08a6ee82875c.cu
#include "tfcc_cudacellinterface.h" #include <type_traits> #include "exceptions/tfcc_cudaruntimeerror.h" #include "exceptions/tfcc_cudnnruntimeerror.h" #include "exceptions/tfcc_invalidargumenterror.h" #include "exceptions/tfcc_notimplementederror.h" #include "framework/tfcc_cudasession.h" #include "framework/tfcc_session.h" #include "framework/tfcc_types.h" namespace tfcc { // cuda functionsz template <class T> static inline T __device__ sigmoid(const T value) { return static_cast<T>(1) / (static_cast<T>(1) + exp(-value)); } template <class T> static void __global__ _cuda_process_lstm_cell( unsigned batch, unsigned units, const T* stateC, const T* inputValue, const T* stateHValue, const T* bias, T forget, T* result, T* resultState) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; const unsigned total = batch * units; for (unsigned i = tid; i < total; i += skip) { unsigned row = i / units; unsigned col = i % units; const T* currentInputValue = inputValue + row * units * 4; const T* currentStateHValue = stateHValue + row * units * 4; T xi = currentInputValue[col] + currentStateHValue[col] + bias[col]; T xc = currentInputValue[col + units] + currentStateHValue[col + units] + bias[col + units]; T xf = currentInputValue[col + units * 2] + currentStateHValue[col + units * 2] + bias[col + units * 2]; T xo = currentInputValue[col + units * 3] + currentStateHValue[col + units * 3] + bias[col + units * 3]; xi = sigmoid(xi); xc = tanh(xc); xf = sigmoid(xf + forget); xo = sigmoid(xo); T cs = xc * xi + stateC[i] * xf; T rs = tanh(cs) * xo; result[i] = rs; resultState[i] = cs; } } template <class T> static void __global__ _cuda_process_gru_cell_gates( unsigned batch, unsigned units, unsigned inputSize, const T* state, const T* inputs, const T* value, const T* bias, T* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; const unsigned total = batch * units; for (unsigned i = tid; i < total; i += skip) { unsigned row = i / units; unsigned col = i % units; T r = sigmoid(value[row * units * 2 + col] + bias[col]); T s = r * state[i]; result[row * (inputSize + units) + col + inputSize] = s; } const unsigned inputTotal = batch * inputSize; for (unsigned i = tid; i < inputTotal; i += skip) { unsigned row = i / inputSize; unsigned col = i % inputSize; result[row * (inputSize + units) + col] = inputs[i]; } } template <class T> static void __global__ _cuda_process_gru_cell_candidate( unsigned batch, unsigned units, const T* state, const T* value, const T* bias, const T* cValue, const T* cBias, T* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; const unsigned total = batch * units; for (unsigned i = tid; i < total; i += skip) { unsigned row = i / units; unsigned col = i % units; T r = sigmoid(value[row * units * 2 + col] + bias[col]); T u = sigmoid(value[row * units * 2 + col + units] + bias[col + units]); T c = tanh(cValue[i] + cBias[col]); result[i] = u * state[i] + (static_cast<T>(1) - u) * c; } } template <class T> static void __global__ _cuda_process_pytorch_gru_cell( unsigned batch, unsigned units, const T* state, const T* inputValue, const T* stateValue, const T* gateBias, const T* candidateIBias, const T* candidateHBias, T* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; const unsigned total = batch * units; for (unsigned i = tid; i < total; i += skip) { unsigned row = i / units; unsigned col = i % units; const T* currentInputValue = inputValue + row * units * 3; const T* currentStateValue = stateValue + row * units * 3; T r = currentInputValue[col] + currentStateValue[col] + gateBias[col]; r = sigmoid(r); T z = currentInputValue[col + units] + currentStateValue[col + units] + gateBias[col + units]; z = sigmoid(z); T ni = currentInputValue[col + units * 2] + candidateIBias[col]; T nh = currentStateValue[col + units * 2] + candidateHBias[col]; T n = tanh(ni + r * nh); result[i] = n + z * (state[i] - n); } } // helper functions template <class T> static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, std::tuple<Variable<T>, Variable<T>>>::type _process_lstm_cell_helper( const Tensor<T>& stateC, const Tensor<T>& inputValue, const Tensor<T>& stateHValue, const Tensor<T>& bias, T forget, size_t blockCount, size_t threadCount) { unsigned batch = stateC.shape(0); unsigned units = stateC.shape(1); Variable<T> result(stateC.shape()); Variable<T> resultState(stateC.shape()); tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault()); _cuda_process_lstm_cell<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( batch, units, stateC.data(), inputValue.data(), stateHValue.data(), bias.data(), forget, result.data(), resultState.data()); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return std::make_tuple(std::move(result), std::move(resultState)); } template <class T, class ST> static inline std::tuple<Variable<T>, Variable<T>> _process_lstm_cell_helper( const Tensor<T>& stateC, const Tensor<T>& inputValue, const Tensor<T>& stateHValue, const Tensor<T>& bias, T forget, ST blockCount, ST threadCount) { throw NotImplementedError(); } template <class T> static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type _process_gru_cell_gates_helper( const Tensor<T>& state, const Tensor<T>& inputs, const Tensor<T>& value, const Tensor<T>& bias, size_t blockCount, size_t threadCount) { unsigned batch = state.shape(0); unsigned units = state.shape(1); unsigned inputSize = inputs.shape(1); Variable<T> result({batch, inputSize + units}); tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault()); _cuda_process_gru_cell_gates<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( batch, units, inputSize, state.data(), inputs.data(), value.data(), bias.data(), result.data()); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return result; } template <class T, class ST> static inline Variable<T> _process_gru_cell_gates_helper( const Tensor<T>& state, const Tensor<T>& inputs, const Tensor<T>& value, const Tensor<T>& bias, ST blockCount, ST threadCount) { throw NotImplementedError(); } template <class T> static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type _process_gru_cell_candidate_helper( const Tensor<T>& state, const Tensor<T>& value, const Tensor<T>& bias, const Tensor<T>& cValue, const Tensor<T>& cBias, size_t blockCount, size_t threadCount) { unsigned batch = state.shape(0); unsigned units = state.shape(1); Variable<T> result({batch, units}); tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault()); _cuda_process_gru_cell_candidate<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( batch, units, state.data(), value.data(), bias.data(), cValue.data(), cBias.data(), result.data()); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return result; } template <class T, class ST> static inline Variable<T> _process_gru_cell_candidate_helper( const Tensor<T>& state, const Tensor<T>& value, const Tensor<T>& bias, const Tensor<T>& cValue, const Tensor<T>& cBias, ST blockCount, ST threadCount) { throw NotImplementedError(); } template <class T> static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type _process_pytorch_gru_cell_helper( const Tensor<T>& state, const Tensor<T>& inputValue, const Tensor<T>& stateValue, const Tensor<T>& gateBias, const Tensor<T>& candidateIBias, const Tensor<T>& candidateHBias, size_t blockCount, size_t threadCount) { unsigned batch = state.shape(0); unsigned units = state.shape(1); Variable<T> result(state.shape()); tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault()); _cuda_process_pytorch_gru_cell<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( batch, units, state.data(), inputValue.data(), stateValue.data(), gateBias.data(), candidateIBias.data(), candidateHBias.data(), result.data()); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return result; } template <class T, class ST> static inline Variable<T> _process_pytorch_gru_cell_helper( const Tensor<T>& state, const Tensor<T>& inputValue, const Tensor<T>& stateValue, const Tensor<T>& gateBias, const Tensor<T>& candidateIBias, const Tensor<T>& candidateHBias, ST blockCount, ST threadCount) { throw NotImplementedError(); } //class function template <class T> CUDACellInterface<T>::CUDACellInterface(const CUDADeviceProperty& property) : _property(property) { } template <class T> CUDACellInterface<T>::~CUDACellInterface() { } template <class T> std::tuple<Variable<T>, Variable<T>> CUDACellInterface<T>::processLSTMCell( const Tensor<T>& stateC, const Tensor<T>& inputValue, const Tensor<T>& stateHValue, const Tensor<T>& bias, T forget) { if (stateC.shape().size() != 2) throw InvalidArgumentError("invalid stateC"); unsigned batch = stateC.shape(0); unsigned units = stateC.shape(1); if (inputValue.shape().size() != 2 || inputValue.shape(0) != batch || inputValue.shape(1) != units * 4) throw InvalidArgumentError("invalid inputValue"); if (stateHValue.shape().size() != 2 || stateHValue.shape(0) != batch || stateHValue.shape(1) != units * 4) throw InvalidArgumentError("invalid stateHValue"); if (bias.shape().size() != 1 || bias.shape(0) != units * 4) throw InvalidArgumentError("invalid bias"); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(batch * units); return _process_lstm_cell_helper(stateC, inputValue, stateHValue, bias, forget, blockCount, threadCount); } template <class T> Variable<T> CUDACellInterface<T>::processGRUCellGates( const Tensor<T>& state, const Tensor<T>& inputs, const Tensor<T>& value, const Tensor<T>& bias) { if (state.shape().size() != 2) throw InvalidArgumentError("invalid state"); unsigned batch = state.shape(0); unsigned units = state.shape(1); if (inputs.shape().size() != 2 || inputs.shape(0) != batch) throw InvalidArgumentError("invalid inputs"); unsigned inputSize = inputs.shape(1); if (value.shape().size() != 2 || value.shape(0) != batch || value.shape(1) != units * 2) throw InvalidArgumentError("invalid value"); if (bias.shape().size() != 1 || bias.shape(0) != units * 2) throw InvalidArgumentError("invalid bias"); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(batch * units); return _process_gru_cell_gates_helper(state, inputs, value, bias, blockCount, threadCount); } template <class T> Variable<T> CUDACellInterface<T>::processGRUCellCandidate( const Tensor<T>& state, const Tensor<T>& value, const Tensor<T>& bias, const Tensor<T>& cValue, const Tensor<T>& cBias) { if (state.shape().size() != 2) throw InvalidArgumentError("invalid state"); unsigned batch = state.shape(0); unsigned units = state.shape(1); if (value.shape().size() != 2 || value.shape(0) != batch || value.shape(1) != units * 2) throw InvalidArgumentError("invalid value"); if (bias.shape().size() != 1 || bias.shape(0) != units * 2) throw InvalidArgumentError("invalid bias"); if (cValue.shape().size() != 2 || cValue.shape(0) != batch || cValue.shape(1) != units) throw InvalidArgumentError("invalid cValue"); if (cBias.shape().size() != 1 || cBias.shape(0) != units) throw InvalidArgumentError("invalid cBias"); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(batch * units); return _process_gru_cell_candidate_helper(state, value, bias, cValue, cBias, blockCount, threadCount); } template <class T> Variable<T> CUDACellInterface<T>::processPyTorchGRUCell( const Tensor<T>& state, const Tensor<T>& inputValue, const Tensor<T>& stateValue, const Tensor<T>& gateBias, const Tensor<T>& candidateIBias, const Tensor<T>& candidateHBias) { if (state.shape().size() != 2) throw InvalidArgumentError("invalid state"); unsigned batch = state.shape(0); unsigned units = state.shape(1); if (inputValue.shape().size() != 2 || inputValue.shape(0) != batch || inputValue.shape(1) != units * 3) throw InvalidArgumentError("invalid inputValue"); if (stateValue.shape().size() != 2 || stateValue.shape(0) != batch || stateValue.shape(1) != units * 3) throw InvalidArgumentError("invalid stateValue"); if (gateBias.shape().size() != 1 || gateBias.shape(0) != units * 2) throw InvalidArgumentError("invalid gateBias"); if (candidateIBias.shape().size() != 1 || candidateIBias.shape(0) != units) throw InvalidArgumentError("invalid candidateIBias"); if (candidateHBias.shape().size() != 1 || candidateHBias.shape(0) != units) throw InvalidArgumentError("invalid candidateHBias"); Variable<T> result(state.shape()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(batch * units); return _process_pytorch_gru_cell_helper(state, inputValue, stateValue, gateBias, candidateIBias, candidateHBias, blockCount, threadCount); } #define DEFINE_FUNC(type) template class CUDACellInterface<type>; TFCC_FOR_ALL_TYPES(DEFINE_FUNC); } // namespace tfcc
308b34cc66f6f86ba60d6a3a4792e8dd1a3ce91e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file ex_particle_OPENMP_seq.c * @author Michael Trotter & Matt Goodrum * @brief Particle filter implementation in C/OpenMP */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <math.h> #include <unistd.h> #include <fcntl.h> #include <float.h> #include <sys/time.h> #define PI acos(-1) #define BLOCK_X 16 #define BLOCK_Y 16 /** @var M value for Linear Congruential Generator (LCG); use GCC's value */ long M = INT_MAX; /** @var A value for LCG */ int A = 1103515245; /** @var C value for LCG */ int C = 12345; const int threads_per_block = 128; /***************************** *GET_TIME *returns a long int representing the time *****************************/ long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } // Returns the number of seconds elapsed between the two specified times float elapsed_time(long long start_time, long long end_time) { return (float) (end_time - start_time) / (1000 * 1000); } /***************************** * CHECK_ERROR * Checks for CUDA errors and prints them to the screen to help with * debugging of CUDA related programming *****************************/ void check_error(hipError_t e) { if (e != hipSuccess) { printf("\nCUDA error: %s\n", hipGetErrorString(e)); exit(1); } } __device__ int findIndexSeq(double * CDF, int lengthCDF, double value) { int index = -1; int x; for(x = 0; x < lengthCDF; x++) { if(CDF[x] >= value) { index = x; break; } } if(index == -1) return lengthCDF-1; return index; } __device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) { if(endIndex < beginIndex) return -1; int middleIndex; while(endIndex > beginIndex) { middleIndex = beginIndex + ((endIndex-beginIndex)/2); if(CDF[middleIndex] >= value) { if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(CDF[middleIndex] == value && middleIndex >= 0) middleIndex--; middleIndex++; return middleIndex; } } if(CDF[middleIndex] > value) endIndex = middleIndex-1; else beginIndex = middleIndex+1; } return -1; } /***************************** * CUDA Kernel Function to replace FindIndex * param1: arrayX * param2: arrayY * param3: CDF * param4: u * param5: xj * param6: yj * param7: Nparticles *****************************/ __global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){ int block_id = blockIdx.x;// + gridDim.x * blockIdx.y; int i = blockDim.x * block_id + threadIdx.x; if(i < Nparticles){ int index = -1; int x; for(x = 0; x < Nparticles; x++){ if(CDF[x] >= u[i]){ index = x; break; } } if(index == -1){ index = Nparticles-1; } xj[i] = arrayX[index]; yj[i] = arrayY[index]; } } /** * Takes in a double and returns an integer that approximates to that double * @return if the mantissa < .5 => return value < input value; else return value > input value */ double roundDouble(double value){ int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } /** * Set values of the 3D array to a newValue if that value is equal to the testValue * @param testValue The value to be replaced * @param newValue The value to replace testValue with * @param array3D The image vector * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames */ void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue) array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue; } } } } /** * Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG) * @see http://en.wikipedia.org/wiki/Linear_congruential_generator * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a uniformly distributed number [0, 1) */ double randu(int * seed, int index) { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } /** * Generates a normally distributed random number using the Box-Muller transformation * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a double representing random number generated using the Box-Muller algorithm * @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution */ double randn(int * seed, int index){ /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } /** * Sets values of 3D matrix using randomly generated numbers from a normal distribution * @param array3D The video to be modified * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param seed The seed array */ void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0)); } } } } /** * Fills a radius x radius matrix representing the disk * @param disk The pointer to the disk to be made * @param radius The radius of the disk to be made */ void strelDisk(int * disk, int radius) { int diameter = radius*2 - 1; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2)); if(distance < radius) disk[x*diameter + y] = 1; } } } /** * Dilates the provided video * @param matrix The video to be dilated * @param posX The x location of the pixel to be dilated * @param posY The y location of the pixel to be dilated * @param poxZ The z location of the pixel to be dilated * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param error The error radius */ void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) { int startX = posX - error; while(startX < 0) startX++; int startY = posY - error; while(startY < 0) startY++; int endX = posX + error; while(endX > dimX) endX--; int endY = posY + error; while(endY > dimY) endY--; int x,y; for(x = startX; x < endX; x++){ for(y = startY; y < endY; y++){ double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) ); if(distance < error) matrix[x*dimY*dimZ + y*dimZ + posZ] = 1; } } } /** * Dilates the target matrix using the radius as a guide * @param matrix The reference matrix * @param dimX The x dimension of the video * @param dimY The y dimension of the video * @param dimZ The z dimension of the video * @param error The error radius to be dilated * @param newMatrix The target matrix */ void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix) { int x, y, z; for(z = 0; z < dimZ; z++){ for(x = 0; x < dimX; x++){ for(y = 0; y < dimY; y++){ if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){ dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error); } } } } } /** * Fills a 2D array describing the offsets of the disk object * @param se The disk object * @param numOnes The number of ones in the disk * @param neighbors The array that will contain the offsets * @param radius The radius used for dilation */ void getneighbors(int * se, int numOnes, double * neighbors, int radius){ int x, y; int neighY = 0; int center = radius - 1; int diameter = radius*2 -1; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(se[x*diameter + y]){ neighbors[neighY*2] = (int)(y - center); neighbors[neighY*2 + 1] = (int)(x - center); neighY++; } } } } /** * The synthetic video sequence we will work with here is composed of a * single moving object, circular in shape (fixed radius) * The motion here is a linear motion * the foreground intensity and the backgrounf intensity is known * the image is corrupted with zero mean Gaussian noise * @param I The video itself * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames of the video * @param seed The seed array used for number generation */ void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){ int k; int max_size = IszX*IszY*Nfr; /*get object centers*/ int x0 = (int)roundDouble(IszY/2.0); int y0 = (int)roundDouble(IszX/2.0); I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1; /*move point*/ int xk, yk, pos; for(k = 1; k < Nfr; k++){ xk = abs(x0 + (k-1)); yk = abs(y0 - 2*(k-1)); pos = yk * IszY * Nfr + xk *Nfr + k; if(pos >= max_size) pos = 0; I[pos] = 1; } /*dilate matrix*/ int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(k = 0; k < Nfr; k++){ I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k]; } } } free(newMatrix); /*define background, add noise*/ setIf(0, 100, I, &IszX, &IszY, &Nfr); setIf(1, 228, I, &IszX, &IszY, &Nfr); /*add noise*/ addNoise(I, &IszX, &IszY, &Nfr, seed); } /** * Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100 * @param I The 3D matrix * @param ind The current ind array * @param numOnes The length of ind array * @return A double representing the sum */ double calcLikelihoodSum(int * I, int * ind, int numOnes){ double likelihoodSum = 0.0; int y; for(y = 0; y < numOnes; y++) likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0; return likelihoodSum; } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses sequential search * @param CDF The CDF * @param lengthCDF The length of CDF * @param value The value to be found * @return The index of value in the CDF; if value is never found, returns the last index */ int findIndex(double * CDF, int lengthCDF, double value){ int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } /** * The implementation of the particle filter using OpenMP for many frames * @see http://openmp.org/wp/ * @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods * @param I The video to be run * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames * @param seed The seed array used for random number generation * @param Nparticles The number of particles to be used */ void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){ int max_size = IszX*IszY*Nfr; long long start = get_time(); //original particle centroid double xe = roundDouble(IszY/2.0); double ye = roundDouble(IszX/2.0); //expected object locations, compared to center int radius = 5; int diameter = radius*2 - 1; int * disk = (int *)malloc(diameter*diameter*sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)malloc(countOnes*2*sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); //printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); //initial weights are all equal (1/Nparticles) double * weights = (double *)malloc(sizeof(double)*Nparticles); for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); } long long get_weights = get_time(); //printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); //initial likelihood to 0.0 double * likelihood = (double *)malloc(sizeof(double)*Nparticles); double * arrayX = (double *)malloc(sizeof(double)*Nparticles); double * arrayY = (double *)malloc(sizeof(double)*Nparticles); double * xj = (double *)malloc(sizeof(double)*Nparticles); double * yj = (double *)malloc(sizeof(double)*Nparticles); double * CDF = (double *)malloc(sizeof(double)*Nparticles); //GPU copies of arrays double * arrayX_GPU; double * arrayY_GPU; double * xj_GPU; double * yj_GPU; double * CDF_GPU; int * ind = (int*)malloc(sizeof(int)*countOnes); double * u = (double *)malloc(sizeof(double)*Nparticles); double * u_GPU; //CUDA memory allocation check_error(hipMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &xj_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &yj_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &u_GPU, sizeof(double)*Nparticles)); for(x = 0; x < Nparticles; x++){ arrayX[x] = xe; arrayY[x] = ye; } int k; //double * Ik = (double *)malloc(sizeof(double)*IszX*IszY); int indX, indY; for(k = 1; k < Nfr; k++){ long long set_arrays = get_time(); //printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays)); //apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction for(x = 0; x < Nparticles; x++){ arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x); arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x); } //particle filter likelihood long long error = get_time(); //printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); for(x = 0; x < Nparticles; x++){ //compute the likelihood: remember our assumption is that you know // foreground and the background image intensity distribution. // Notice that we consider here a likelihood ratio, instead of // p(z|x). It is possible in this case. why? a hometask for you. //calc ind for(y = 0; y < countOnes; y++){ indX = roundDouble(arrayX[x]) + objxy[y*2 + 1]; indY = roundDouble(arrayY[x]) + objxy[y*2]; ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k); if(ind[y] >= max_size) ind[y] = 0; } likelihood[x] = calcLikelihoodSum(I, ind, countOnes); likelihood[x] = likelihood[x]/countOnes; } long long likelihood_time = get_time(); //printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); // update & normalize weights // using equation (63) of Arulampalam Tutorial for(x = 0; x < Nparticles; x++){ weights[x] = weights[x] * exp(likelihood[x]); } long long exponential = get_time(); //printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; for(x = 0; x < Nparticles; x++){ sumWeights += weights[x]; } long long sum_time = get_time(); //printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); for(x = 0; x < Nparticles; x++){ weights[x] = weights[x]/sumWeights; } long long normalize = get_time(); //printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; // estimate the object location by expected values for(x = 0; x < Nparticles; x++){ xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; } long long move_time = get_time(); //printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("%lf\n", xe); //XE printf("%lf\n", ye); //YE double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); //display(hold off for now) //pause(hold off for now) //resampling CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); //printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); for(x = 0; x < Nparticles; x++){ u[x] = u1 + x/((double)(Nparticles)); } long long u_time = get_time(); //printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); long long start_copy = get_time(); //CUDA memory copying from CPU memory to GPU memory hipMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(u_GPU, u, sizeof(double)*Nparticles, hipMemcpyHostToDevice); long long end_copy = get_time(); //Set number of threads int num_blocks = ceil((double) Nparticles/(double) threads_per_block); //KERNEL FUNCTION CALL hipLaunchKernelGGL(( kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles); hipDeviceSynchronize(); long long start_copy_back = get_time(); //CUDA memory copying back from GPU to CPU memory hipMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost); hipMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost); long long end_copy_back = get_time(); //printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy)); //printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back)); //printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back)); long long xyj_time = get_time(); //printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); for(x = 0; x < Nparticles; x++){ //reassign arrayX and arrayY arrayX[x] = xj[x]; arrayY[x] = yj[x]; weights[x] = 1/((double)(Nparticles)); } long long reset = get_time(); //printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); } //CUDA freeing of memory hipFree(u_GPU); hipFree(CDF_GPU); hipFree(yj_GPU); hipFree(xj_GPU); hipFree(arrayY_GPU); hipFree(arrayX_GPU); //free memory free(disk); free(objxy); free(weights); free(likelihood); free(arrayX); free(arrayY); free(xj); free(yj); free(CDF); free(u); free(ind); } int main(int argc, char * argv[]){ char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>"; //check number of arguments if(argc != 9) { printf("%s\n", usage); return 0; } //check args deliminators if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) { printf( "%s\n",usage ); return 0; } int IszX, IszY, Nfr, Nparticles; //converting a string to a integer if( sscanf( argv[2], "%d", &IszX ) == EOF ) { printf("ERROR: dimX input is incorrect"); return 0; } if( IszX <= 0 ) { printf("dimX must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[4], "%d", &IszY ) == EOF ) { printf("ERROR: dimY input is incorrect"); return 0; } if( IszY <= 0 ) { printf("dimY must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[6], "%d", &Nfr ) == EOF ) { printf("ERROR: Number of frames input is incorrect"); return 0; } if( Nfr <= 0 ) { printf("number of frames must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) { printf("ERROR: Number of particles input is incorrect"); return 0; } if( Nparticles <= 0 ) { printf("Number of particles must be > 0\n"); return 0; } //establish seed int * seed = (int *)malloc(sizeof(int)*Nparticles); int i; for(i = 0; i < Nparticles; i++) //TAN: use the same seed seed[i] = i;//time(0)*i; //malloc matrix int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); long long start = get_time(); //call video sequence videoSequence(I, IszX, IszY, Nfr, seed); long long endVideoSequence = get_time(); //printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence)); //call particle filter particleFilter(I, IszX, IszY, Nfr, seed, Nparticles); long long endParticleFilter = get_time(); //printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter)); //printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter)); free(seed); free(I); return 0; }
308b34cc66f6f86ba60d6a3a4792e8dd1a3ce91e.cu
/** * @file ex_particle_OPENMP_seq.c * @author Michael Trotter & Matt Goodrum * @brief Particle filter implementation in C/OpenMP */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <math.h> #include <unistd.h> #include <fcntl.h> #include <float.h> #include <sys/time.h> #define PI acos(-1) #define BLOCK_X 16 #define BLOCK_Y 16 /** @var M value for Linear Congruential Generator (LCG); use GCC's value */ long M = INT_MAX; /** @var A value for LCG */ int A = 1103515245; /** @var C value for LCG */ int C = 12345; const int threads_per_block = 128; /***************************** *GET_TIME *returns a long int representing the time *****************************/ long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } // Returns the number of seconds elapsed between the two specified times float elapsed_time(long long start_time, long long end_time) { return (float) (end_time - start_time) / (1000 * 1000); } /***************************** * CHECK_ERROR * Checks for CUDA errors and prints them to the screen to help with * debugging of CUDA related programming *****************************/ void check_error(cudaError e) { if (e != cudaSuccess) { printf("\nCUDA error: %s\n", cudaGetErrorString(e)); exit(1); } } __device__ int findIndexSeq(double * CDF, int lengthCDF, double value) { int index = -1; int x; for(x = 0; x < lengthCDF; x++) { if(CDF[x] >= value) { index = x; break; } } if(index == -1) return lengthCDF-1; return index; } __device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) { if(endIndex < beginIndex) return -1; int middleIndex; while(endIndex > beginIndex) { middleIndex = beginIndex + ((endIndex-beginIndex)/2); if(CDF[middleIndex] >= value) { if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(CDF[middleIndex] == value && middleIndex >= 0) middleIndex--; middleIndex++; return middleIndex; } } if(CDF[middleIndex] > value) endIndex = middleIndex-1; else beginIndex = middleIndex+1; } return -1; } /***************************** * CUDA Kernel Function to replace FindIndex * param1: arrayX * param2: arrayY * param3: CDF * param4: u * param5: xj * param6: yj * param7: Nparticles *****************************/ __global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){ int block_id = blockIdx.x;// + gridDim.x * blockIdx.y; int i = blockDim.x * block_id + threadIdx.x; if(i < Nparticles){ int index = -1; int x; for(x = 0; x < Nparticles; x++){ if(CDF[x] >= u[i]){ index = x; break; } } if(index == -1){ index = Nparticles-1; } xj[i] = arrayX[index]; yj[i] = arrayY[index]; } } /** * Takes in a double and returns an integer that approximates to that double * @return if the mantissa < .5 => return value < input value; else return value > input value */ double roundDouble(double value){ int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } /** * Set values of the 3D array to a newValue if that value is equal to the testValue * @param testValue The value to be replaced * @param newValue The value to replace testValue with * @param array3D The image vector * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames */ void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue) array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue; } } } } /** * Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG) * @see http://en.wikipedia.org/wiki/Linear_congruential_generator * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a uniformly distributed number [0, 1) */ double randu(int * seed, int index) { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } /** * Generates a normally distributed random number using the Box-Muller transformation * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a double representing random number generated using the Box-Muller algorithm * @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution */ double randn(int * seed, int index){ /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } /** * Sets values of 3D matrix using randomly generated numbers from a normal distribution * @param array3D The video to be modified * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param seed The seed array */ void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0)); } } } } /** * Fills a radius x radius matrix representing the disk * @param disk The pointer to the disk to be made * @param radius The radius of the disk to be made */ void strelDisk(int * disk, int radius) { int diameter = radius*2 - 1; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2)); if(distance < radius) disk[x*diameter + y] = 1; } } } /** * Dilates the provided video * @param matrix The video to be dilated * @param posX The x location of the pixel to be dilated * @param posY The y location of the pixel to be dilated * @param poxZ The z location of the pixel to be dilated * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param error The error radius */ void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) { int startX = posX - error; while(startX < 0) startX++; int startY = posY - error; while(startY < 0) startY++; int endX = posX + error; while(endX > dimX) endX--; int endY = posY + error; while(endY > dimY) endY--; int x,y; for(x = startX; x < endX; x++){ for(y = startY; y < endY; y++){ double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) ); if(distance < error) matrix[x*dimY*dimZ + y*dimZ + posZ] = 1; } } } /** * Dilates the target matrix using the radius as a guide * @param matrix The reference matrix * @param dimX The x dimension of the video * @param dimY The y dimension of the video * @param dimZ The z dimension of the video * @param error The error radius to be dilated * @param newMatrix The target matrix */ void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix) { int x, y, z; for(z = 0; z < dimZ; z++){ for(x = 0; x < dimX; x++){ for(y = 0; y < dimY; y++){ if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){ dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error); } } } } } /** * Fills a 2D array describing the offsets of the disk object * @param se The disk object * @param numOnes The number of ones in the disk * @param neighbors The array that will contain the offsets * @param radius The radius used for dilation */ void getneighbors(int * se, int numOnes, double * neighbors, int radius){ int x, y; int neighY = 0; int center = radius - 1; int diameter = radius*2 -1; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(se[x*diameter + y]){ neighbors[neighY*2] = (int)(y - center); neighbors[neighY*2 + 1] = (int)(x - center); neighY++; } } } } /** * The synthetic video sequence we will work with here is composed of a * single moving object, circular in shape (fixed radius) * The motion here is a linear motion * the foreground intensity and the backgrounf intensity is known * the image is corrupted with zero mean Gaussian noise * @param I The video itself * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames of the video * @param seed The seed array used for number generation */ void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){ int k; int max_size = IszX*IszY*Nfr; /*get object centers*/ int x0 = (int)roundDouble(IszY/2.0); int y0 = (int)roundDouble(IszX/2.0); I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1; /*move point*/ int xk, yk, pos; for(k = 1; k < Nfr; k++){ xk = abs(x0 + (k-1)); yk = abs(y0 - 2*(k-1)); pos = yk * IszY * Nfr + xk *Nfr + k; if(pos >= max_size) pos = 0; I[pos] = 1; } /*dilate matrix*/ int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(k = 0; k < Nfr; k++){ I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k]; } } } free(newMatrix); /*define background, add noise*/ setIf(0, 100, I, &IszX, &IszY, &Nfr); setIf(1, 228, I, &IszX, &IszY, &Nfr); /*add noise*/ addNoise(I, &IszX, &IszY, &Nfr, seed); } /** * Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100 * @param I The 3D matrix * @param ind The current ind array * @param numOnes The length of ind array * @return A double representing the sum */ double calcLikelihoodSum(int * I, int * ind, int numOnes){ double likelihoodSum = 0.0; int y; for(y = 0; y < numOnes; y++) likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0; return likelihoodSum; } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses sequential search * @param CDF The CDF * @param lengthCDF The length of CDF * @param value The value to be found * @return The index of value in the CDF; if value is never found, returns the last index */ int findIndex(double * CDF, int lengthCDF, double value){ int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } /** * The implementation of the particle filter using OpenMP for many frames * @see http://openmp.org/wp/ * @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods * @param I The video to be run * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames * @param seed The seed array used for random number generation * @param Nparticles The number of particles to be used */ void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){ int max_size = IszX*IszY*Nfr; long long start = get_time(); //original particle centroid double xe = roundDouble(IszY/2.0); double ye = roundDouble(IszX/2.0); //expected object locations, compared to center int radius = 5; int diameter = radius*2 - 1; int * disk = (int *)malloc(diameter*diameter*sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)malloc(countOnes*2*sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); //printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); //initial weights are all equal (1/Nparticles) double * weights = (double *)malloc(sizeof(double)*Nparticles); for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); } long long get_weights = get_time(); //printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); //initial likelihood to 0.0 double * likelihood = (double *)malloc(sizeof(double)*Nparticles); double * arrayX = (double *)malloc(sizeof(double)*Nparticles); double * arrayY = (double *)malloc(sizeof(double)*Nparticles); double * xj = (double *)malloc(sizeof(double)*Nparticles); double * yj = (double *)malloc(sizeof(double)*Nparticles); double * CDF = (double *)malloc(sizeof(double)*Nparticles); //GPU copies of arrays double * arrayX_GPU; double * arrayY_GPU; double * xj_GPU; double * yj_GPU; double * CDF_GPU; int * ind = (int*)malloc(sizeof(int)*countOnes); double * u = (double *)malloc(sizeof(double)*Nparticles); double * u_GPU; //CUDA memory allocation check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles)); check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles)); check_error(cudaMalloc((void **) &xj_GPU, sizeof(double)*Nparticles)); check_error(cudaMalloc((void **) &yj_GPU, sizeof(double)*Nparticles)); check_error(cudaMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles)); check_error(cudaMalloc((void **) &u_GPU, sizeof(double)*Nparticles)); for(x = 0; x < Nparticles; x++){ arrayX[x] = xe; arrayY[x] = ye; } int k; //double * Ik = (double *)malloc(sizeof(double)*IszX*IszY); int indX, indY; for(k = 1; k < Nfr; k++){ long long set_arrays = get_time(); //printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays)); //apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction for(x = 0; x < Nparticles; x++){ arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x); arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x); } //particle filter likelihood long long error = get_time(); //printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); for(x = 0; x < Nparticles; x++){ //compute the likelihood: remember our assumption is that you know // foreground and the background image intensity distribution. // Notice that we consider here a likelihood ratio, instead of // p(z|x). It is possible in this case. why? a hometask for you. //calc ind for(y = 0; y < countOnes; y++){ indX = roundDouble(arrayX[x]) + objxy[y*2 + 1]; indY = roundDouble(arrayY[x]) + objxy[y*2]; ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k); if(ind[y] >= max_size) ind[y] = 0; } likelihood[x] = calcLikelihoodSum(I, ind, countOnes); likelihood[x] = likelihood[x]/countOnes; } long long likelihood_time = get_time(); //printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); // update & normalize weights // using equation (63) of Arulampalam Tutorial for(x = 0; x < Nparticles; x++){ weights[x] = weights[x] * exp(likelihood[x]); } long long exponential = get_time(); //printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; for(x = 0; x < Nparticles; x++){ sumWeights += weights[x]; } long long sum_time = get_time(); //printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); for(x = 0; x < Nparticles; x++){ weights[x] = weights[x]/sumWeights; } long long normalize = get_time(); //printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; // estimate the object location by expected values for(x = 0; x < Nparticles; x++){ xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; } long long move_time = get_time(); //printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("%lf\n", xe); //XE printf("%lf\n", ye); //YE double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); //display(hold off for now) //pause(hold off for now) //resampling CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); //printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); for(x = 0; x < Nparticles; x++){ u[x] = u1 + x/((double)(Nparticles)); } long long u_time = get_time(); //printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); long long start_copy = get_time(); //CUDA memory copying from CPU memory to GPU memory cudaMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(u_GPU, u, sizeof(double)*Nparticles, cudaMemcpyHostToDevice); long long end_copy = get_time(); //Set number of threads int num_blocks = ceil((double) Nparticles/(double) threads_per_block); //KERNEL FUNCTION CALL kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles); cudaThreadSynchronize(); long long start_copy_back = get_time(); //CUDA memory copying back from GPU to CPU memory cudaMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost); cudaMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost); long long end_copy_back = get_time(); //printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy)); //printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back)); //printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back)); long long xyj_time = get_time(); //printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); for(x = 0; x < Nparticles; x++){ //reassign arrayX and arrayY arrayX[x] = xj[x]; arrayY[x] = yj[x]; weights[x] = 1/((double)(Nparticles)); } long long reset = get_time(); //printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); } //CUDA freeing of memory cudaFree(u_GPU); cudaFree(CDF_GPU); cudaFree(yj_GPU); cudaFree(xj_GPU); cudaFree(arrayY_GPU); cudaFree(arrayX_GPU); //free memory free(disk); free(objxy); free(weights); free(likelihood); free(arrayX); free(arrayY); free(xj); free(yj); free(CDF); free(u); free(ind); } int main(int argc, char * argv[]){ char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>"; //check number of arguments if(argc != 9) { printf("%s\n", usage); return 0; } //check args deliminators if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) { printf( "%s\n",usage ); return 0; } int IszX, IszY, Nfr, Nparticles; //converting a string to a integer if( sscanf( argv[2], "%d", &IszX ) == EOF ) { printf("ERROR: dimX input is incorrect"); return 0; } if( IszX <= 0 ) { printf("dimX must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[4], "%d", &IszY ) == EOF ) { printf("ERROR: dimY input is incorrect"); return 0; } if( IszY <= 0 ) { printf("dimY must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[6], "%d", &Nfr ) == EOF ) { printf("ERROR: Number of frames input is incorrect"); return 0; } if( Nfr <= 0 ) { printf("number of frames must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) { printf("ERROR: Number of particles input is incorrect"); return 0; } if( Nparticles <= 0 ) { printf("Number of particles must be > 0\n"); return 0; } //establish seed int * seed = (int *)malloc(sizeof(int)*Nparticles); int i; for(i = 0; i < Nparticles; i++) //TAN: use the same seed seed[i] = i;//time(0)*i; //malloc matrix int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); long long start = get_time(); //call video sequence videoSequence(I, IszX, IszY, Nfr, seed); long long endVideoSequence = get_time(); //printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence)); //call particle filter particleFilter(I, IszX, IszY, Nfr, seed, Nparticles); long long endParticleFilter = get_time(); //printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter)); //printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter)); free(seed); free(I); return 0; }
466eeac16c406ca532eacf332947a34cc5e27a79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iomanip> #include <iostream> #include "../dtime.hpp" #include "../gpu_error.cuh" #include "../metrics.cuh" using namespace std; __global__ void initKernel(double* A, size_t N) { size_t tidx = blockDim.x * blockIdx.x + threadIdx.x; for (int idx = tidx; idx < N; idx += blockDim.x * gridDim.x) { A[idx] = 3.0; } } __global__ void heat_kernel(double* A, double* B, unsigned int* volatile tags, int width, int height) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; int iters = 1000; double* dst = A; double* src = B; #pragma unroll(1) for (int i = 0; i < iters; i++) { // for (int idx = tidx + width; idx < width * (height - 2); // idx += gridDim.x * blockDim.x) { int x = tidx % width; int y = tidx / width; if (y > 0 && y < height - 1 && x > 0 && y < width) dst[tidx] = 0.25 * (src[tidx + width] + src[tidx - width] + // src[tidx + height] + src[tidx - height]); //} double* temp = dst; dst = src; src = temp; unsigned int old = atomicInc(tags, gridDim.x * blockDim.x - 1); __threadfence(); while (true) { unsigned int val = tags[0]; if (val == 0) break; } // if (tidx == 0) printf("\n"); // if (threadIdx.x == 0) printf("Iter: %d %d\n", i, blockIdx.x); atomicInc(tags + 1, gridDim.x * blockDim.x - 1); __threadfence(); while (true) { unsigned int val = tags[1]; // atomicCAS(tags + 1, 0, 0); if (val == 0) break; } } } int main(int argc, char** argv) { int width = 128; int height = 128; size_t sizeA = width * height; double* dA; double* dB; unsigned int* dTag; unsigned int val[] = {0, 0}; GPU_ERROR(hipMalloc(&dTag, 2 * sizeof(unsigned int))); GPU_ERROR(hipMalloc(&dA, sizeof(double) * sizeA)); GPU_ERROR(hipMalloc(&dB, sizeof(double) * sizeA)); GPU_ERROR( hipMemcpy(dTag, &val, 2 * sizeof(unsigned int), hipMemcpyDefault)); hipLaunchKernelGGL(( initKernel), dim3(52), dim3(256), 0, 0, dA, sizeA); hipLaunchKernelGGL(( initKernel), dim3(52), dim3(256), 0, 0, dB, sizeA); hipDeviceProp_t prop; int deviceId; GPU_ERROR(hipGetDevice(&deviceId)); GPU_ERROR(hipGetDeviceProperties(&prop, deviceId)); std::string deviceName = prop.name; int smCount = prop.multiProcessorCount; int blockSize = 256; int maxActiveBlocks = 0; GPU_ERROR(hipOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, heat_kernel, blockSize, 0)); cout << smCount * maxActiveBlocks << " " << blockSize << "\n"; double t1 = dtime(); for (int i = 0; i < 100; i++) { hipLaunchKernelGGL(( heat_kernel), dim3(smCount * maxActiveBlocks), dim3(blockSize), 0, 0, dA, dB, dTag, width, height); GPU_ERROR(hipDeviceSynchronize()); cout << i << "\n"; } double t2 = dtime(); cout << t2 - t1 << "\n"; }
466eeac16c406ca532eacf332947a34cc5e27a79.cu
#include <iomanip> #include <iostream> #include "../dtime.hpp" #include "../gpu_error.cuh" #include "../metrics.cuh" using namespace std; __global__ void initKernel(double* A, size_t N) { size_t tidx = blockDim.x * blockIdx.x + threadIdx.x; for (int idx = tidx; idx < N; idx += blockDim.x * gridDim.x) { A[idx] = 3.0; } } __global__ void heat_kernel(double* A, double* B, unsigned int* volatile tags, int width, int height) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; int iters = 1000; double* dst = A; double* src = B; #pragma unroll(1) for (int i = 0; i < iters; i++) { // for (int idx = tidx + width; idx < width * (height - 2); // idx += gridDim.x * blockDim.x) { int x = tidx % width; int y = tidx / width; if (y > 0 && y < height - 1 && x > 0 && y < width) dst[tidx] = 0.25 * (src[tidx + width] + src[tidx - width] + // src[tidx + height] + src[tidx - height]); //} double* temp = dst; dst = src; src = temp; unsigned int old = atomicInc(tags, gridDim.x * blockDim.x - 1); __threadfence(); while (true) { unsigned int val = tags[0]; if (val == 0) break; } // if (tidx == 0) printf("\n"); // if (threadIdx.x == 0) printf("Iter: %d %d\n", i, blockIdx.x); atomicInc(tags + 1, gridDim.x * blockDim.x - 1); __threadfence(); while (true) { unsigned int val = tags[1]; // atomicCAS(tags + 1, 0, 0); if (val == 0) break; } } } int main(int argc, char** argv) { int width = 128; int height = 128; size_t sizeA = width * height; double* dA; double* dB; unsigned int* dTag; unsigned int val[] = {0, 0}; GPU_ERROR(cudaMalloc(&dTag, 2 * sizeof(unsigned int))); GPU_ERROR(cudaMalloc(&dA, sizeof(double) * sizeA)); GPU_ERROR(cudaMalloc(&dB, sizeof(double) * sizeA)); GPU_ERROR( cudaMemcpy(dTag, &val, 2 * sizeof(unsigned int), cudaMemcpyDefault)); initKernel<<<52, 256>>>(dA, sizeA); initKernel<<<52, 256>>>(dB, sizeA); cudaDeviceProp prop; int deviceId; GPU_ERROR(cudaGetDevice(&deviceId)); GPU_ERROR(cudaGetDeviceProperties(&prop, deviceId)); std::string deviceName = prop.name; int smCount = prop.multiProcessorCount; int blockSize = 256; int maxActiveBlocks = 0; GPU_ERROR(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, heat_kernel, blockSize, 0)); cout << smCount * maxActiveBlocks << " " << blockSize << "\n"; double t1 = dtime(); for (int i = 0; i < 100; i++) { heat_kernel<<<smCount * maxActiveBlocks, blockSize>>>(dA, dB, dTag, width, height); GPU_ERROR(cudaDeviceSynchronize()); cout << i << "\n"; } double t2 = dtime(); cout << t2 - t1 << "\n"; }
0709cfb9566bd3c8303802d96bdbb53c27508a30.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient_sm.h" namespace StreamCompaction { namespace EfficientSM { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernEfficientScan(int N, int *odata, int *idata){ extern __shared__ int tmp[]; int index = threadIdx.x; if (index >= N) return; int offset = 1; tmp[2 * index] = idata[2 * index]; tmp[2 * index + 1] = idata[2 * index + 1]; // up sweep for (int d = (N >> 1); d > 0; d >>= 1){ __syncthreads(); if (index < d) tmp[offset * (2 * index + 2) - 1] += tmp[offset * (2 * index + 1) - 1]; offset <<= 1; } // clear last digit if (index == 0) tmp[N - 1] = 0; // down sweep for (int d = 1; d < N; d <<= 1){ offset >>= 1; __syncthreads(); if (index < d){ int t = tmp[offset * (2 * index + 1) - 1]; tmp[offset * (2 * index + 1) - 1] = tmp[offset * (2 * index + 2) - 1]; tmp[offset * (2 * index + 2) - 1] += t; } } __syncthreads(); odata[2 * index] = tmp[2 * index]; odata[2 * index + 1] = tmp[2 * index + 1]; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int N = 1 << ilog2ceil(n); dim3 fullBlockPerGrid((N + blockSize - 1) / blockSize); int* dev_in, *dev_out; hipMalloc((void**) &dev_in, N * sizeof(int)); checkCUDAError("hipMalloc dev_in failed"); hipMalloc((void**) &dev_out, N * sizeof(int)); checkCUDAError("hipMalloc dev_out failed"); hipMemset(dev_out, 0, sizeof(int) * N); checkCUDAError("cuda Memset failed"); hipMemcpy(dev_in, idata, n * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("hipMemcpy HostToDevice failed"); timer().startGpuTimer(); hipLaunchKernelGGL(( kernEfficientScan) , dim3(fullBlockPerGrid), dim3(blockSize), 2 * N * sizeof(int) , 0, N, dev_out, dev_in); checkCUDAError("kernNaiveScan dev_in failed"); timer().endGpuTimer(); hipMemcpy(odata, dev_out, n * sizeof(int), hipMemcpyDeviceToHost); checkCUDAError("hipMemcpy DeviceToHost failed"); hipFree(dev_in); hipFree(dev_out); } } }
0709cfb9566bd3c8303802d96bdbb53c27508a30.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient_sm.h" namespace StreamCompaction { namespace EfficientSM { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernEfficientScan(int N, int *odata, int *idata){ extern __shared__ int tmp[]; int index = threadIdx.x; if (index >= N) return; int offset = 1; tmp[2 * index] = idata[2 * index]; tmp[2 * index + 1] = idata[2 * index + 1]; // up sweep for (int d = (N >> 1); d > 0; d >>= 1){ __syncthreads(); if (index < d) tmp[offset * (2 * index + 2) - 1] += tmp[offset * (2 * index + 1) - 1]; offset <<= 1; } // clear last digit if (index == 0) tmp[N - 1] = 0; // down sweep for (int d = 1; d < N; d <<= 1){ offset >>= 1; __syncthreads(); if (index < d){ int t = tmp[offset * (2 * index + 1) - 1]; tmp[offset * (2 * index + 1) - 1] = tmp[offset * (2 * index + 2) - 1]; tmp[offset * (2 * index + 2) - 1] += t; } } __syncthreads(); odata[2 * index] = tmp[2 * index]; odata[2 * index + 1] = tmp[2 * index + 1]; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int N = 1 << ilog2ceil(n); dim3 fullBlockPerGrid((N + blockSize - 1) / blockSize); int* dev_in, *dev_out; cudaMalloc((void**) &dev_in, N * sizeof(int)); checkCUDAError("cudaMalloc dev_in failed"); cudaMalloc((void**) &dev_out, N * sizeof(int)); checkCUDAError("cudaMalloc dev_out failed"); cudaMemset(dev_out, 0, sizeof(int) * N); checkCUDAError("cuda Memset failed"); cudaMemcpy(dev_in, idata, n * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy HostToDevice failed"); timer().startGpuTimer(); kernEfficientScan <<< fullBlockPerGrid, blockSize, 2 * N * sizeof(int) >>> (N, dev_out, dev_in); checkCUDAError("kernNaiveScan dev_in failed"); timer().endGpuTimer(); cudaMemcpy(odata, dev_out, n * sizeof(int), cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy DeviceToHost failed"); cudaFree(dev_in); cudaFree(dev_out); } } }
54ed525b8ce1dd96e0092507d64e2e3abc547da0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file has been adapted from FasterTransformer file: // https://github.com/NVIDIA/FasterTransformer/blob/v4.0/fastertransformer/cuda/masked_multihead_attention.cu // We add License in the head. #include <hip/hip_fp16.h> #include <float.h> #include <hipcub/hipcub.hpp> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/fused/attention_layer_norm.h" #include "paddle/fluid/operators/fused/attn_gemm.h" #include "paddle/fluid/operators/fused/fmha_ref.h" #include "paddle/fluid/operators/fused/fused_dropout_helper.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/kernels/funcs/math_function.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/ProcessGroup.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { using Tensor = framework::Tensor; // for debug // #define _DEBUG_FUSED_MULTI_TRANSFORMER template <typename T> static void AllReduce(framework::Tensor &tensor, // NOLINT const int ring_id, const phi::GPUContext &ctx) { if (ring_id == -1) return; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(ring_id)) { paddle::distributed::ProcessGroup *pg = map->get(ring_id); std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(tensor); out_tensor.push_back(tensor); paddle::distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { auto dtype = platform::ToNCCLDataType( framework::TransToProtoVarType(tensor.dtype())); int64_t numel = tensor.numel(); const void *sendbuff = tensor.data<T>(); auto place = ctx.GetPlace(); void *recvbuff = tensor.mutable_data<T>(place); auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place); auto stream = ctx.stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream)); } #else PADDLE_THROW(platform::errors::Unimplemented( "PaddlePaddle should compile with NCCL or RCCL when used tensor model " "parallel op.")); #endif } namespace { namespace plat = paddle::platform; using float16 = plat::float16; #define MMHA_USE_FP32_ACUM_FOR_LOGITS #define MMHA_USE_FP32_ACUM_FOR_OUT template <typename T> struct Masked_multihead_attention_params { // output buffer, [B, 1(seq_len), num_head * dim_head] T *out; // qkv_out, [B, 1(seq_len), 3, num_head * dim_head] const T *qkv; // bias, [3, num_head, dim_head] const T *qkv_bias; // TODO(wangxi): optimize with input_lengths and max_input_len? // [bsz, 1, 1, time_step(cache_seq_length)+1] const T *attn_mask; // [2, B, num_head, max_seq_len(valid cache_seq_len), dim_head] // k [B, num_head, dim_head/x, max_seq_len, x], that is `seq_len` first // v [B, num_head, max_seq_len, dim_head] T *cache_kv; int batch_size; int num_head; int timestep; // cache_seq_length int max_seq_length; // 1.f / sqrt(Dh) float inv_sqrt_dh; }; struct Float8_ { float2 x; float2 y; float2 z; float2 w; }; // clang-format off template <typename T, int Dh> struct Qk_vec_ {}; template <> struct Qk_vec_<float, 32> { using Type = float; }; template <> struct Qk_vec_<float, 64> { using Type = float2; }; template <> struct Qk_vec_<float, 128> { using Type = float4; }; template <> struct Qk_vec_<float, 256> { using Type = float4; }; template <> struct Qk_vec_<float16, 32> { using Type = uint32_t; }; template <> struct Qk_vec_<float16, 64> { using Type = uint32_t; }; template <> struct Qk_vec_<float16, 128> { using Type = uint2; }; template <> struct Qk_vec_<float16, 256> { using Type = uint4; }; template <typename T, int THREADS_PER_KEY> struct K_vec_ {}; template <> struct K_vec_<float, 4> { using Type = float; }; template <> struct K_vec_<float, 2> { using Type = float2; }; template <> struct K_vec_<float, 1> { using Type = float4; }; template <> struct K_vec_<float16, 4> { using Type = uint32_t; }; template <> struct K_vec_<float16, 2> { using Type = uint2; }; template <> struct K_vec_<float16, 1> { using Type = uint4; }; template <typename T, int V_VEC_SIZE> struct V_vec_ {}; template <> struct V_vec_<float, 1> { using Type = float; }; template <> struct V_vec_<float, 2> { using Type = float2; }; template <> struct V_vec_<float, 4> { using Type = float4; }; template <> struct V_vec_<float16, 2> { using Type = uint32_t; }; template <> struct V_vec_<float16, 4> { using Type = uint2; }; template <> struct V_vec_<float16, 8> { using Type = uint4; }; #ifdef MMHA_USE_FP32_ACUM_FOR_OUT template <typename T> struct V_vec_acum_fp32_ {}; // template <> struct V_vec_acum_fp32_<float> { using Type = float; }; // template <> struct V_vec_acum_fp32_<float2> { using Type = float2; }; template <> struct V_vec_acum_fp32_<float4> { using Type = float4; }; // template <> struct V_vec_acum_fp32_<uint32_t> { using Type = float2; }; // template <> struct V_vec_acum_fp32_<uint2 > { using Type = Float4_; }; template <> struct V_vec_acum_fp32_<uint4> { using Type = Float8_; }; #endif // clang-format on inline __device__ float half_to_float(uint16_t h) { float f; asm volatile("cvt.f32.f16 %0, %1;\n" : "=f"(f) : "h"(h)); return f; } inline __device__ float2 half2_to_float2(uint32_t v) { uint16_t lo, hi; asm volatile("mov.b32 {%0, %1}, %2;\n" : "=h"(lo), "=h"(hi) : "r"(v)); return make_float2(half_to_float(lo), half_to_float(hi)); } inline __device__ uint32_t float2_to_half2(float2 f) { union { uint32_t u32; uint16_t u16[2]; } tmp; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 asm volatile("cvt.rn.f16x2.f32 %0, %1, %2;\n" : "=r"(tmp.u32) : "f"(f.y), "f"(f.x)); #else asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[0]) : "f"(f.x)); asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[1]) : "f"(f.y)); #endif return tmp.u32; } inline __device__ float add(float a, float b) { return a + b; } inline __device__ float2 add(float2 a, float2 b) { float2 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); return c; } inline __device__ float4 add(float4 a, float4 b) { float4 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); c.z = add(a.z, b.z); c.w = add(a.w, b.w); return c; } inline __device__ uint16_t add(uint16_t a, uint16_t b) { uint16_t c; asm volatile("add.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b)); return c; } inline __device__ uint32_t add(uint32_t a, uint32_t b) { uint32_t c; asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b)); return c; } inline __device__ uint2 add(uint2 a, uint2 b) { uint2 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); return c; } inline __device__ uint4 add(uint4 a, uint4 b) { uint4 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); c.z = add(a.z, b.z); c.w = add(a.w, b.w); return c; } inline __device__ float2 add(uint32_t a, float2 fb) { float2 fa = half2_to_float2(a); return add(fa, fb); } inline __device__ Float8_ add(uint4 a, Float8_ fb) { Float8_ fc; fc.x = add(a.x, fb.x); fc.y = add(a.y, fb.y); fc.z = add(a.z, fb.z); fc.w = add(a.w, fb.w); return fc; } template <typename Acc, typename A, typename B> inline __device__ Acc mul(A a, B b); template <> inline __device__ float mul<float, float>(float a, float b) { return a * b; } template <> inline __device__ float2 mul(float2 a, float2 b) { float2 c; c.x = a.x * b.x; c.y = a.y * b.y; return c; } template <> inline __device__ float4 mul(float4 a, float4 b) { float4 c; c.x = a.x * b.x; c.y = a.y * b.y; c.z = a.z * b.z; c.w = a.w * b.w; return c; } template <> inline __device__ uint16_t mul(uint16_t a, uint16_t b) { uint16_t c; asm volatile("mul.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b)); return c; } template <> inline __device__ uint32_t mul(uint32_t a, uint32_t b) { uint32_t c; asm volatile("mul.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b)); return c; } template <> inline __device__ uint2 mul(uint2 a, uint2 b) { uint2 c; c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x); c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y); return c; } template <> inline __device__ uint4 mul(uint4 a, uint4 b) { uint4 c; c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x); c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y); c.z = mul<uint32_t, uint32_t, uint32_t>(a.z, b.z); c.w = mul<uint32_t, uint32_t, uint32_t>(a.w, b.w); return c; } template <> inline __device__ uint32_t mul(uint32_t a, float b) { float2 tmp = half2_to_float2(a); float2 tmp_res; tmp_res.x = tmp.x * b; tmp_res.y = tmp.y * b; uint32_t res = float2_to_half2(tmp_res); return res; } template <> inline __device__ uint2 mul(uint2 a, float b) { uint2 res; res.x = mul<uint32_t, uint32_t, float>(a.x, b); res.y = mul<uint32_t, uint32_t, float>(a.y, b); return res; } template <> inline __device__ uint4 mul(uint4 a, float b) { uint4 res; res.x = mul<uint32_t, uint32_t, float>(a.x, b); res.y = mul<uint32_t, uint32_t, float>(a.y, b); res.z = mul<uint32_t, uint32_t, float>(a.z, b); res.w = mul<uint32_t, uint32_t, float>(a.w, b); return res; } template <> inline __device__ float2 mul(float2 a, float b) { float2 res; res.x = a.x * b; res.y = a.y * b; return res; } template <> inline __device__ float4 mul(float4 a, float b) { float4 res; res.x = a.x * b; res.y = a.y * b; res.z = a.z * b; res.w = a.w * b; return res; } inline __device__ float sum(float v) { return v; } inline __device__ float sum(float2 v) { return v.x + v.y; } inline __device__ float sum(float4 v) { return v.x + v.y + v.z + v.w; } inline __device__ float sum(uint16_t v) { return half_to_float(v); } inline __device__ float sum(uint32_t v) { float2 tmp = half2_to_float2(v); return tmp.x + tmp.y; } inline __device__ float sum(uint2 v) { uint32_t c = add(v.x, v.y); return sum(c); } inline __device__ float sum(uint4 v) { uint32_t c = add(v.x, v.y); c = add(c, v.z); c = add(c, v.w); return sum(c); } template <typename T> inline __device__ float dot(T a, T b) { return sum(mul<T, T, T>(a, b)); } template <typename A, typename T> inline __device__ float dot(T a, T b) { return sum(mul<A, T, T>(a, b)); } inline __device__ constexpr uint32_t shfl_mask(int threads) { return threads == 32 ? uint32_t(-1) : (1u << threads) - 1u; } template <typename T> inline __device__ __host__ T div_up(T m, T n) { return (m + n - 1) / n; } inline __device__ float fma(float a, float b, float c) { return a * b + c; } inline __device__ float2 fma(float2 a, float2 b, float2 c) { float2 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); return d; } inline __device__ float4 fma(float4 a, float4 b, float4 c) { float4 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); d.z = fma(a.z, b.z, c.z); d.w = fma(a.w, b.w, c.w); return d; } inline __device__ uint32_t fma(uint32_t a, uint32_t b, uint32_t c) { uint32_t d; asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(d) : "r"(a), "r"(b), "r"(c)); return d; } inline __device__ uint2 fma(uint2 a, uint2 b, uint2 c) { uint2 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); return d; } inline __device__ uint4 fma(uint4 a, uint4 b, uint4 c) { uint4 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); d.z = fma(a.z, b.z, c.z); d.w = fma(a.w, b.w, c.w); return d; } inline __device__ float2 fma(float a, float2 b, float2 c) { float2 d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); return d; } inline __device__ float4 fma(float a, float4 b, float4 c) { float4 d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); d.z = fma(a, b.z, c.z); d.w = fma(a, b.w, c.w); return d; } inline __device__ Float8_ fma(float a, Float8_ b, Float8_ c) { Float8_ d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); d.z = fma(a, b.z, c.z); d.w = fma(a, b.w, c.w); return d; } inline __device__ uint32_t h0_h0(uint16_t a) { uint32_t b; asm volatile("mov.b32 %0, {%1, %1};" : "=r"(b) : "h"(a)); return b; } inline __device__ uint32_t fma(uint16_t a, uint32_t b, uint32_t c) { return fma(h0_h0(a), b, c); } inline __device__ uint2 fma(uint16_t a, uint2 b, uint2 c) { uint32_t s = h0_h0(a); uint2 d; d.x = fma(s, b.x, c.x); d.y = fma(s, b.y, c.y); return d; } inline __device__ uint4 fma(uint16_t a, uint4 b, uint4 c) { uint32_t s = h0_h0(a); uint4 d; d.x = fma(s, b.x, c.x); d.y = fma(s, b.y, c.y); d.z = fma(s, b.z, c.z); d.w = fma(s, b.w, c.w); return d; } inline __device__ float cast_to_float(float u) { return u; } inline __device__ float2 cast_to_float(float2 u) { return u; } inline __device__ float4 cast_to_float(float4 u) { return u; } inline __device__ Float8_ cast_to_float(uint4 u) { Float8_ tmp; tmp.x = half2_to_float2(u.x); tmp.y = half2_to_float2(u.y); tmp.z = half2_to_float2(u.z); tmp.w = half2_to_float2(u.w); return tmp; } template <int THREADS_PER_KEY, typename K_vec, int N> inline __device__ float qk_dot_(const K_vec (&q)[N], const K_vec (&k)[N], float inv_sqrt_dh) { K_vec inv_q = mul<K_vec, K_vec, float>(q[0], inv_sqrt_dh); K_vec qk_vec = mul<K_vec, K_vec, K_vec>(inv_q, k[0]); #pragma unroll for (int ii = 1; ii < N; ++ii) { inv_q = mul<K_vec, K_vec, float>(q[ii], inv_sqrt_dh); qk_vec = fma(inv_q, k[ii], qk_vec); } float qk = sum(qk_vec); #pragma unroll for (int mask = THREADS_PER_KEY / 2; mask >= 1; mask /= 2) { qk += __shfl_xor_sync(uint32_t(-1), qk, mask); } return qk; } template <typename T, int THREADS_PER_KEY> struct Qk_dot { template <typename K_vec, int N> static inline __device__ float dot(const K_vec (&q)[N], const K_vec (&k)[N], float inv_sqrt_dh) { return qk_dot_<THREADS_PER_KEY>(q, k, inv_sqrt_dh); } }; template <int WARPS_PER_BLOCK, int WARP_SIZE = 32> inline __device__ float block_sum(float *red_smem, float sum) { int warp = threadIdx.x / WARP_SIZE; int lane = threadIdx.x % WARP_SIZE; #pragma unroll for (int mask = WARP_SIZE / 2; mask >= 1; mask /= 2) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } if (lane == 0) { red_smem[warp] = sum; } __syncthreads(); if (lane < WARPS_PER_BLOCK) { sum = red_smem[lane]; } #pragma unroll for (int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } return __shfl_sync(uint32_t(-1), sum, 0); } inline __device__ void convert_from_float(float &dst, float src) { // NOLINT dst = src; } inline __device__ void convert_from_float(float4 &dst, float4 src) { // NOLINT dst = src; } inline __device__ void convert_from_float(plat::float16 &dst, // NOLINT float src) { dst = static_cast<plat::float16>(src); } inline __device__ void convert_from_float(uint4 &dst, Float8_ src) { // NOLINT dst.x = float2_to_half2(src.x); dst.y = float2_to_half2(src.y); dst.z = float2_to_half2(src.z); dst.w = float2_to_half2(src.w); } inline __device__ void zero(uint16_t &dst) { dst = uint16_t(0); } // NOLINT template <typename T> inline __device__ void zero(T &dst) { // NOLINT constexpr int WORDS = sizeof(T) / 4; union { T raw; uint32_t words[WORDS]; } tmp; #pragma unroll for (int ii = 0; ii < WORDS; ++ii) { tmp.words[ii] = 0u; } dst = tmp.raw; } template <typename T, int Dh, int Dh_MAX, int THREADS_PER_KEY, int THREADS_PER_VALUE, int THREADS_PER_BLOCK> __global__ void masked_multihead_attention_kernel( Masked_multihead_attention_params<T> params) { #if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) static_assert(Dh_MAX % THREADS_PER_KEY == 0, ""); static_assert(Dh_MAX % THREADS_PER_VALUE == 0, ""); constexpr int WARP_SIZE = 32; constexpr int WARPS_PER_BLOCK = THREADS_PER_BLOCK / WARP_SIZE; extern __shared__ char smem_[]; float *qk_smem = reinterpret_cast<float *>(smem_); char *logits_smem_ = smem_; // fp32 accum for logits float *logits_smem = reinterpret_cast<float *>(logits_smem_); T *out_smem = reinterpret_cast<T *>(smem_); __shared__ float red_smem[WARPS_PER_BLOCK * 2]; using Qk_vec = typename Qk_vec_<T, Dh_MAX>::Type; __shared__ __align__(sizeof(Qk_vec)) T q_smem[Dh_MAX]; const int bi = blockIdx.y; const int hi = blockIdx.x; const int bhi = bi * params.num_head + hi; const int tid = threadIdx.x; float qk_max = -FLT_MAX; float qk = 0; // qkv [B, S=1, 3, num_head, head_dim] int qkv_base_offset = bi * 3 * params.num_head * Dh + hi * Dh; constexpr int QK_VEC_SIZE = sizeof(Qk_vec) / sizeof(T); static_assert(Dh_MAX % QK_VEC_SIZE == 0, ""); // Use block reduction if needed // static_assert(Dh_MAX / QK_VEC_SIZE <= WARP_SIZE, ""); constexpr int QK_VECS_PER_WARP = Dh_MAX / QK_VEC_SIZE; // cache_k, [B, num_head, head_dim / x, max_seq_len, x] // x == 4/8 for FP32/FP16, 128bit, 16Byte constexpr int QK_ELTS_IN_16B = 16 / sizeof(T); constexpr int QK_VECS_IN_16B = 16 / sizeof(Qk_vec); const T *q_base = params.qkv; const T *k_base = params.qkv + params.num_head * Dh; const T *q_bias_base = params.qkv_bias; const T *k_bias_base = params.qkv_bias + params.num_head * Dh; if (tid < QK_VECS_PER_WARP) { int qk_offset = qkv_base_offset + tid * QK_VEC_SIZE; int qk_bias_offset = hi * Dh + tid * QK_VEC_SIZE; Qk_vec q; zero(q); q = (Dh == Dh_MAX || tid * QK_VEC_SIZE < Dh) ? *reinterpret_cast<const Qk_vec *>(&q_base[qk_offset]) : q; Qk_vec k; zero(k); k = (Dh == Dh_MAX || tid * QK_VEC_SIZE < Dh) ? *reinterpret_cast<const Qk_vec *>(&k_base[qk_offset]) : k; Qk_vec q_bias; zero(q_bias); q_bias = (Dh == Dh_MAX || tid * QK_VEC_SIZE < Dh) ? *reinterpret_cast<const Qk_vec *>(&q_bias_base[qk_bias_offset]) : q_bias; Qk_vec k_bias; zero(k_bias); k_bias = (Dh == Dh_MAX || tid * QK_VEC_SIZE < Dh) ? *reinterpret_cast<const Qk_vec *>(&k_bias_base[qk_bias_offset]) : k_bias; q = add(q, q_bias); // TODO(wangxi): See this https://github.com/microsoft/unilm/issues/510 // we may not require k_bias. k = add(k, k_bias); *reinterpret_cast<Qk_vec *>(&q_smem[tid * QK_VEC_SIZE]) = q; int co = tid / QK_VECS_IN_16B; int ci = (tid % QK_VECS_IN_16B) * QK_VEC_SIZE; int offset = bhi * params.max_seq_length * Dh + co * params.max_seq_length * QK_ELTS_IN_16B + params.timestep * QK_ELTS_IN_16B + ci; if (Dh == Dh_MAX || co < Dh / QK_ELTS_IN_16B) { *reinterpret_cast<Qk_vec *>(&params.cache_kv[offset]) = k; } qk = dot<Qk_vec, Qk_vec>(q, k); if (QK_VECS_PER_WARP <= WARP_SIZE) { #pragma unroll for (int mask = QK_VECS_PER_WARP / 2; mask >= 1; mask /= 2) { qk += __shfl_xor_sync(shfl_mask(QK_VECS_PER_WARP), qk, mask); } } } if (QK_VECS_PER_WARP > WARP_SIZE) { constexpr int WARPS_PER_RED = (QK_VECS_PER_WARP + WARP_SIZE - 1) / WARP_SIZE; qk = block_sum<WARPS_PER_RED>(&red_smem[WARPS_PER_RED], qk); } if (tid == 0) { // NOTE(wangxi): mask must be 0.0 // T mask = params.attn_mask[ // bi * (params.timestep + 1) + params.timestep]; // qk += static_cast<float>(mask); qk *= params.inv_sqrt_dh; qk_max = qk; qk_smem[params.timestep] = qk; } __syncthreads(); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("=======q_out=======\n"); for (int i = 0; i < Dh; ++i) printf("%f ", static_cast<float>(q_smem[i])); printf("\n"); } __syncthreads(); #endif using K_vec = typename K_vec_<T, THREADS_PER_KEY>::Type; constexpr int K_VEC_SIZE = sizeof(K_vec) / sizeof(T); static_assert(Dh_MAX % K_VEC_SIZE == 0, ""); constexpr int K_ELTS_PER_THREAD = Dh_MAX / THREADS_PER_KEY; constexpr int K_VECS_PER_THREAD = K_ELTS_PER_THREAD / K_VEC_SIZE; int ko = tid / THREADS_PER_KEY; int ki = (tid % THREADS_PER_KEY) * K_VEC_SIZE; static_assert(Dh_MAX == THREADS_PER_KEY * K_VEC_SIZE * K_VECS_PER_THREAD, ""); K_vec q[K_VECS_PER_THREAD]; #pragma unroll for (int i = 0; i < K_VECS_PER_THREAD; ++i) { q[i] = *reinterpret_cast<const K_vec *>( &q_smem[ki + i * THREADS_PER_KEY * K_VEC_SIZE]); } constexpr int K_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_KEY; constexpr int K_PER_WARP = WARP_SIZE / THREADS_PER_KEY; T *k_cache = &params.cache_kv[bhi * params.max_seq_length * Dh + ki]; int ti_end = div_up(params.timestep, K_PER_WARP) * K_PER_WARP; for (int ti = ko; ti < ti_end; ti += K_PER_ITER) { K_vec k[K_VECS_PER_THREAD]; K_vec k_vec_zero; zero(k_vec_zero); #pragma unroll for (int ii = 0; ii < K_VECS_PER_THREAD; ++ii) { int jj = ii * params.max_seq_length + ti; if (ti < params.timestep) { k[ii] = (Dh == Dh_MAX || jj * QK_ELTS_IN_16B < Dh * params.max_seq_length) ? *reinterpret_cast<const K_vec *>( &k_cache[jj * QK_ELTS_IN_16B]) : k_vec_zero; } } // NOTE(liyurui): We should multiple q with inv_sqrt_dh first, for dot(q, k) // may overflow with FP16 in large model. float qk = Qk_dot<T, THREADS_PER_KEY>::dot(q, k, params.inv_sqrt_dh); // bool is_mask = false; if (ti < params.timestep && tid % THREADS_PER_KEY == 0) { // qk_max = is_mask ? qk_max : fmaxf(qk_max, qk); T mask = params.attn_mask[bi * (params.timestep + 1) + ti]; qk += static_cast<float>(mask); qk_max = fmaxf(qk_max, qk); qk_smem[ti] = qk; } } #pragma unroll for (int mask = WARP_SIZE / 2; mask >= THREADS_PER_KEY; mask /= 2) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } const int warp = tid / WARP_SIZE; const int lane = tid % WARP_SIZE; if (lane == 0) { red_smem[warp] = qk_max; } __syncthreads(); qk_max = lane < WARPS_PER_BLOCK ? red_smem[lane] : -FLT_MAX; #pragma unroll for (int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } qk_max = __shfl_sync(uint32_t(-1), qk_max, 0); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("=======qk_out=======\n"); for (int i = 0; i <= params.timestep; ++i) printf("%f ", qk_smem[i]); printf("qk_max=%f\n", qk_max); } __syncthreads(); #endif float sum = 0.f; for (int ti = tid; ti <= params.timestep; ti += THREADS_PER_BLOCK) { // bool is_mask = false; // float logit = is_mask ? 0.f : __expf(qk_smem[ti] - qk_max); float logit = __expf(qk_smem[ti] - qk_max); sum += logit; qk_smem[ti] = logit; } sum = block_sum<WARPS_PER_BLOCK>(&red_smem[WARPS_PER_BLOCK], sum); // FIXME(wangxi): need add 1.e-6f? float inv_sum = __fdividef(1.f, sum + 1.e-6f); for (int ti = tid; ti <= params.timestep; ti += THREADS_PER_BLOCK) { convert_from_float(logits_smem[ti], qk_smem[ti] * inv_sum); } __syncthreads(); constexpr int V_VEC_SIZE = Dh_MAX / THREADS_PER_VALUE; using V_vec = typename V_vec_<T, V_VEC_SIZE>::Type; int vo = tid / THREADS_PER_VALUE; int vi = (tid % THREADS_PER_VALUE) * V_VEC_SIZE; T *v_cache = &params.cache_kv[params.batch_size * params.num_head * params.max_seq_length * Dh + bhi * params.max_seq_length * Dh + vi]; #ifdef MMHA_USE_FP32_ACUM_FOR_OUT using V_vec_acum = typename V_vec_acum_fp32_<V_vec>::Type; #else using V_vec_acum = V_vec; #endif V_vec_acum out; zero(out); constexpr int V_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_VALUE; if (Dh == Dh_MAX || vi < Dh) { for (int ti = vo; ti < params.timestep; ti += V_PER_ITER) { V_vec v = *reinterpret_cast<const V_vec *>(&v_cache[ti * Dh]); #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) float logit = logits_smem[ti]; out = fma(logit, cast_to_float(v), out); #else T logit = logits_smem[ti]; // Update the partial sums. out = fma(logit, v, out); #endif } } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("======logits_out=====\n"); for (int i = 0; i <= params.timestep; ++i) printf("%f ", logits_smem[i]); printf("\n"); } __syncthreads(); #endif V_vec v_bias; zero(v_bias); if (vo == (params.timestep % V_PER_ITER) && (Dh == Dh_MAX || vi < Dh)) { V_vec v = *reinterpret_cast<const V_vec *>( &params.qkv[2 * params.num_head * Dh + qkv_base_offset + vi]); v_bias = *reinterpret_cast<const V_vec *>( &params.qkv_bias[2 * params.num_head * Dh + hi * Dh + vi]); v = add(v, v_bias); *reinterpret_cast<V_vec *>(&v_cache[params.timestep * Dh]) = v; #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) out = fma(logits_smem[params.timestep], cast_to_float(v), out); #else out = fma(logits_smem[params.timestep], v, out); #endif } __syncthreads(); if (Dh == Dh_MAX || vi < Dh) { #pragma unroll for (int active_groups = V_PER_ITER; active_groups >= 2; active_groups /= 2) { int midpoint = active_groups / 2; if (vo >= midpoint && vo < active_groups && (Dh == Dh_MAX || vi < Dh)) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float( *reinterpret_cast<V_vec *>(&out_smem[(vo - midpoint) * Dh + vi]), out); #else *reinterpret_cast<V_vec *>(&out_smem[(vo - midpoint) * Dh + vi]) = out; #endif } __syncthreads(); if (vo < midpoint && (Dh == Dh_MAX || vi < Dh)) { out = add(*reinterpret_cast<const V_vec *>(&out_smem[vo * Dh + vi]), out); } __syncthreads(); } } if (vo == 0 && (Dh == Dh_MAX || vi < Dh)) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float(*reinterpret_cast<V_vec *>(&params.out[bhi * Dh + vi]), out); #else *reinterpret_cast<V_vec *>(&params.out[bhi * Dh + vi]) = out; #endif } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER __syncthreads(); if (bi == 0 && hi == 0 && tid == 0) { printf("======fmha_out=====\n"); for (int i = 0; i < Dh; ++i) printf("%f ", static_cast<float>(params.out[i])); printf("\n"); } #endif #else assert(false); #endif } template <typename T> inline size_t smem_size_in_bytes( const Masked_multihead_attention_params<T> &params, int dim_head, int threads_per_value, int threads_per_block) { size_t qk_sz = div_up(params.timestep + 1, 4) * 16; size_t logits_sz = 0; #ifndef MMHA_USE_FP32_ACUM_FOR_LOGITS if (sizeof(T) != 4) { logits_sz = div_up(params.max_seq_length, 4) * 4 * sizeof(T); } #endif size_t softmax_sz = qk_sz + logits_sz; int rows_per_red = threads_per_block / threads_per_value; size_t red_sz = rows_per_red * dim_head * sizeof(T) / 2; return max(softmax_sz, red_sz); } #define MMHA_LAUNCH_KERNEL( \ T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, stream) \ size_t smem_sz = \ smem_size_in_bytes<T>(params, Dh, THDS_PER_VALUE, THDS_PER_BLOCK); \ dim3 grid(params.num_head, params.batch_size); \ hipLaunchKernelGGL(( masked_multihead_attention_kernel<T, \ Dh, \ Dh_MAX, \ THDS_PER_KEY, \ THDS_PER_VALUE, \ THDS_PER_BLOCK>) \ , dim3(grid), dim3(THDS_PER_BLOCK), smem_sz, stream, params) template <typename T, int Dh, int Dh_MAX> void fmha_launch_kernel(const Masked_multihead_attention_params<T> &params, const hipStream_t &stream) { constexpr int THREADS_PER_VALUE = Dh_MAX * sizeof(T) / 16; if (params.timestep < 32) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, stream); } else if (params.timestep < 2048) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, stream); } else { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, stream); } } template <typename T> void fmha(const phi::GPUContext &dev_ctx, const Tensor &qkv_tensor, const Tensor &qkv_bias_tensor, const Tensor &src_mask_tensor, Tensor *cache_kv_tensor, Tensor *out_tensor, int batch_size, int max_seq_length, int num_head, int dim_head, int timestep, float inv_sqrt_dh) { Masked_multihead_attention_params<T> params; params.out = out_tensor->data<T>(); params.qkv = qkv_tensor.data<T>(); params.qkv_bias = qkv_bias_tensor.data<T>(); params.attn_mask = src_mask_tensor.data<T>(); params.cache_kv = cache_kv_tensor->data<T>(); params.batch_size = batch_size; params.num_head = num_head; params.timestep = timestep; params.max_seq_length = max_seq_length; params.inv_sqrt_dh = inv_sqrt_dh; switch (dim_head) { case 10: fmha_launch_kernel<T, 10, 32>(params, dev_ctx.stream()); break; case 26: fmha_launch_kernel<T, 26, 32>(params, dev_ctx.stream()); break; case 32: fmha_launch_kernel<T, 32, 32>(params, dev_ctx.stream()); break; case 64: fmha_launch_kernel<T, 64, 64>(params, dev_ctx.stream()); break; case 96: fmha_launch_kernel<T, 96, 128>(params, dev_ctx.stream()); break; case 128: fmha_launch_kernel<T, 128, 128>(params, dev_ctx.stream()); break; case 192: fmha_launch_kernel<T, 192, 256>(params, dev_ctx.stream()); break; default: PADDLE_THROW(platform::errors::Unimplemented( "Dim_head = %d is unsupport!", dim_head)); } } // NOTE: simd with 16Bytes(128bit), float is 4, float16 is 8 constexpr int VEC_16B = 16; template <typename T> __global__ void write_cache_k_kernel(T *cache_k, const T *k, const int num_head, const int dim_head, const int seq_len, const int max_seq_len) { const int bi = blockIdx.y; const int hi = blockIdx.z; constexpr int X_ELEMS = VEC_16B / sizeof(T); // [bsz, num_head, seq_len, dim_head/x, x] auto k_src = reinterpret_cast<const uint4 *>( k + bi * num_head * seq_len * dim_head + hi * seq_len * dim_head); // [bsz, num_head, dim_head/x, max_seq_len, x] auto k_dst = reinterpret_cast<uint4 *>( cache_k + bi * num_head * max_seq_len * dim_head + hi * max_seq_len * dim_head); const int out_idx = blockIdx.x * blockDim.x + threadIdx.x; // vec size int dim_head_div_x = dim_head / X_ELEMS; // FIXME(wangxi): num_head is not need? // if (out_idx >= num_head * dim_head_div_x * max_seq_len) return; if (out_idx >= dim_head_div_x * max_seq_len) return; int idx = out_idx; const int k_seq_len_id = idx % max_seq_len; // idx = (idx - k_seq_len_id) / max_seq_len; idx = idx / max_seq_len; const int k_vec_id = idx % dim_head_div_x; if (k_seq_len_id < seq_len) { k_dst[out_idx] = k_src[k_seq_len_id * dim_head_div_x + k_vec_id]; } } template <typename T> __global__ void write_cache_v_kernel(T *cache_v, const T *v, const int num_head, const int dim_head, const int seq_len, const int max_seq_len) { const int bi = blockIdx.y; const int hi = blockIdx.z; // [bsz, num_head, seq_len, dim_head/x, x] auto v_src = reinterpret_cast<const uint4 *>( v + bi * num_head * seq_len * dim_head + hi * seq_len * dim_head); // [bsz, num_head, max_seq_len, dim_head/x, x] auto v_dst = reinterpret_cast<uint4 *>( cache_v + bi * num_head * max_seq_len * dim_head + hi * max_seq_len * dim_head); const int idx = blockIdx.x * blockDim.x + threadIdx.x; constexpr int X_ELEMS = VEC_16B / sizeof(T); const int dim_head_div_x = dim_head / X_ELEMS; if (idx >= dim_head_div_x * seq_len) return; v_dst[idx] = v_src[idx]; } template <typename T> void write_cache_kv(const phi::GPUContext &dev_ctx, T *cache_k, T *cache_v, const T *k, const T *v, const int bsz, const int num_head, const int seq_len, const int max_seq_len, const int dim_head) { constexpr int block_sz = 128; constexpr int x = VEC_16B / sizeof(T); assert(dim_head % x == 0); PADDLE_ENFORCE_EQ( dim_head % x, 0, platform::errors::PreconditionNotMet( "dim_head=%d must be divisible by vec_size=%d", dim_head, x)); int max_size = max_seq_len * dim_head / x; int size = seq_len * dim_head / x; dim3 grid(div_up(max_size, block_sz), bsz, num_head); dim3 grid_v(div_up(size, block_sz), bsz, num_head); // transpose [bsz, num_head, seq_len, dim_head/x, x]-> // [bsz, num_head, dim_head/x, max_seq_len, x] hipLaunchKernelGGL(( write_cache_k_kernel), dim3(grid), dim3(block_sz), 0, dev_ctx.stream(), cache_k, k, num_head, dim_head, seq_len, max_seq_len); // copy [bsz, num_head, seq_len, dim_head/x, x]-> // [bsz, num_head, max_seq_len, dim_head/x, x] hipLaunchKernelGGL(( write_cache_v_kernel), dim3(grid_v), dim3(block_sz), 0, dev_ctx.stream(), cache_v, v, num_head, dim_head, seq_len, max_seq_len); } } // namespace template <typename T> class FusedMultiTransformerOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; auto place = ctx.GetPlace(); auto &dev_ctx = ctx.cuda_device_context(); auto *time_step = ctx.Input<Tensor>("TimeStep"); // 0. input auto *input_x = ctx.Input<Tensor>("X"); const auto input_x_dims = input_x->dims(); int bsz = input_x_dims[0]; int seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int bsz_seq = bsz * seq_len; // 1. layer norm const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); auto ln_scales = ctx.MultiInput<Tensor>("LnScale"); auto ln_biases = ctx.MultiInput<Tensor>("LnBias"); auto ln_compute = AttnLayerNorm<T>(dev_ctx, epsilon, bsz_seq, dim_embed); Tensor ln_mean, ln_var; auto *ln_mean_data = ln_mean.mutable_data<U>({bsz_seq}, place); auto *ln_var_data = ln_var.mutable_data<U>({bsz_seq}, place); // 2. qkv // x: qkv's input [batch_size, seq_len, dim_embed] // y: qkv's weight: [3, num_head, dim_head, dim_embed] auto qkv_weights = ctx.MultiInput<Tensor>("QKVW"); auto qkv_biases = ctx.MultiInput<Tensor>("QKVBias"); const bool trans_qkvw = ctx.Attr<bool>("trans_qkvw"); const auto qkv_w_dims = qkv_weights[0]->dims(); int num_head = trans_qkvw ? qkv_w_dims[1] : qkv_w_dims[2]; int dim_head = trans_qkvw ? qkv_w_dims[2] : qkv_w_dims[3]; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; bool compute_bias = qkv_biases.size() > 0 && time_step == nullptr; // (transA, transB, compute_bias) = (false, trans_qkvw, false) auto qkv_compute = AttnMatMul<T>(dev_ctx, false, trans_qkvw, bsz_seq, output_size, input_size, compute_bias); Tensor qkv_out; auto *qkv_out_data = qkv_out.mutable_data<T>({bsz, seq_len, 3, num_head, dim_head}, place); // 3. fmha AttnDropoutParam attn_param( true, "upscale_in_train", 0.0, true, true, 0, nullptr); auto fmha_compute = FMHARef<T>(dev_ctx, bsz, seq_len, num_head, dim_head, attn_param); auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto cache_kvs = ctx.MultiInput<Tensor>("CacheKV"); auto cache_kv_outs = ctx.MultiOutput<Tensor>("CacheKVOut"); // auto *time_step = ctx.Input<Tensor>("TimeStep"); auto out_seq_len = seq_len; if (time_step) { PADDLE_ENFORCE_EQ(time_step->place(), platform::CPUPlace(), platform::errors::PreconditionNotMet( "The place of input(TimeStep) must be CPUPlace.")); // cache_seq_len int time_step_value = time_step->data<int>()[0]; PADDLE_ENFORCE_GT(time_step_value, 0, platform::errors::PreconditionNotMet( "The value of time_step must > 0, but now is %d", time_step_value)); PADDLE_ENFORCE_EQ( seq_len, 1, platform::errors::PreconditionNotMet( "In decode stage, the seq_len of input must be 1, but now is %d", seq_len)); out_seq_len += time_step_value; } Tensor transpose_out_2, qk_out; auto *transpose_out_2_data = transpose_out_2.mutable_data<T>( {3, bsz, num_head, seq_len, dim_head}, place); auto *qk_out_data = qk_out.mutable_data<T>({bsz, num_head, seq_len, out_seq_len}, place); Tensor softmax_out; Tensor attn_dropout_mask_out, attn_dropout_out; Tensor qktv_out, fmha_out; auto *softmax_out_data = softmax_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *attn_dropout_mask_out_data = attn_dropout_mask_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *attn_dropout_data_data = attn_dropout_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *qktv_out_data = qktv_out.mutable_data<T>({bsz, num_head, seq_len, dim_head}, place); auto *fmha_out_data = fmha_out.mutable_data<T>({bsz, seq_len, num_head, dim_head}, place); // 4. out_linear auto out_linear_weights = ctx.MultiInput<Tensor>("OutLinearW"); auto out_linear_biases = ctx.MultiInput<Tensor>("OutLinearBias"); int ring_id = ctx.Attr<int>("ring_id"); // (transA, transB, compute_bias) = (false, false, false) auto out_linear_compute = AttnMatMul<T>( dev_ctx, false, false, bsz_seq, dim_embed, hidden_size, false); // 5. ln(residual + bias) DropoutParam dropout_param2(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( dev_ctx, bsz_seq, dim_embed, dropout_param2, epsilon); auto ffn_ln_scales = ctx.MultiInput<Tensor>("FFNLnScale"); auto ffn_ln_biases = ctx.MultiInput<Tensor>("FFNLnBias"); Tensor bias_dropout_residual_out, dropout_mask_out; T *bias_dropout_residual_out_data = nullptr; if (pre_layer_norm) { bias_dropout_residual_out_data = bias_dropout_residual_out.mutable_data<T>({bsz, seq_len, dim_embed}, place); } auto *dropout_mask_out_data = dropout_mask_out.mutable_data<uint8_t>( {bsz, seq_len, dim_embed}, place); // 6. ffn matmul1 auto ffn1_weights = ctx.MultiInput<Tensor>("FFN1Weight"); auto ffn1_biases = ctx.MultiInput<Tensor>("FFN1Bias"); auto ffn1_weight_dim = ffn1_weights[0]->dims(); int dim_ffn = ffn1_weight_dim[1]; auto ffn1_linear_compute = AttnMatMul<T>( dev_ctx, false, false, bsz_seq, dim_ffn, dim_embed, false); Tensor ffn1_out; auto *ffn1_out_data = ffn1_out.mutable_data<T>({bsz_seq, dim_ffn}, place); // 7. ffn act + bias DropoutParam ffn1_dropout_param(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutHelper<T, uint8_t> fused_act_dropout_helper( dev_ctx, bsz_seq, dim_ffn, ffn1_dropout_param); Tensor ffn1_dropout_out, ffn1_dropout_mask; auto *ffn1_dropout_out_data = ffn1_dropout_out.mutable_data<T>({bsz_seq, dim_ffn}, place); auto *ffn1_dropout_mask_data = ffn1_dropout_mask.mutable_data<uint8_t>({bsz_seq, dim_ffn}, place); // 8. ffn2 matmul auto ffn2_weights = ctx.MultiInput<Tensor>("FFN2Weight"); auto ffn2_biases = ctx.MultiInput<Tensor>("FFN2Bias"); auto ffn2_linear_compute = AttnMatMul<T>( dev_ctx, false, false, bsz_seq, dim_embed, dim_ffn, false); // 9. ffn2 residual bias DropoutParam ffn2_dropout_param(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutLayerNormHelper<T, uint8_t> ffn2_fused_dropout_helper( dev_ctx, bsz_seq, dim_embed, ffn2_dropout_param, epsilon); // calc auto *out = ctx.Output<Tensor>("Out"); auto *from_data = out->mutable_data<T>(place); Tensor *from_tensor = out; Tensor tmp_out; auto *tmp_out_data = tmp_out.mutable_data<T>({bsz, seq_len, dim_embed}, place); auto *x_data = input_x->data<T>(); Tensor *buf0 = nullptr; Tensor *buf1 = nullptr; // step0: x --> buf1 // step1: buf1 --> buf0 // step2: buf0 --> buf1 int layers = qkv_weights.size(); if (pre_layer_norm) { if (layers & 1) { // odd, set buf1 as out buf0 = &tmp_out; buf1 = out; } else { // even, set buf0 as out buf0 = out; buf1 = &tmp_out; } } else { buf0 = &tmp_out; buf1 = out; } for (int i = 0; i < layers; ++i) { // step1. layer_norm if (i == 0 && pre_layer_norm) { auto *ln_scale_data = ln_scales[i]->data<U>(); auto *ln_bias_data = ln_biases[i]->data<U>(); // TODO(wangxi): can remove mean var in inference ln_compute.ComputeForward(x_data, ln_scale_data, ln_bias_data, buf1->data<T>(), ln_mean_data, ln_var_data); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step1"; #endif // step2. qkv const Tensor *qkv_bias = qkv_biases.size() > 0 ? qkv_biases[i] : nullptr; // NOTE: in decoder stage, bias is fused in fmha const Tensor *bias = time_step ? nullptr : qkv_bias; if (!pre_layer_norm && i == 0) { qkv_compute.ComputeForward( qkv_weights[i], input_x, bias, &qkv_out, &qkv_out); } else { qkv_compute.ComputeForward( qkv_weights[i], buf1, bias, &qkv_out, &qkv_out); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step2"; #endif // step3. fmha const Tensor *cache_kv = cache_kvs.size() > 0 ? cache_kvs[i] : nullptr; Tensor *cache_kv_out = cache_kv ? cache_kv_outs[i] : nullptr; if (time_step) { // generation decoder stage // [2, batch_size, num_head, max_seq_len, head_size] int max_seq_len = cache_kv->dims()[3]; fmha<T>(dev_ctx, qkv_out, *qkv_bias, *src_mask, cache_kv_out, &fmha_out, bsz, max_seq_len, num_head, dim_head, time_step->data<int>()[0], 1. / sqrt(dim_head)); } else if (cache_kv_out) { // generation context stage // TODO(wangxi): can remove dropout in inference fmha_compute.ComputeForward(qkv_out, nullptr, src_mask, &transpose_out_2, nullptr, &qk_out, nullptr, &softmax_out, &attn_dropout_mask_out, &attn_dropout_out, &qktv_out, &fmha_out); // [3, bsz, num_head, seq_len, head_dim] T *qkv_data = transpose_out_2_data; int64_t q_size = bsz * seq_len * num_head * dim_head; int64_t k_size = q_size; const T *q_ptr = qkv_data; const T *k_ptr = q_ptr + q_size; const T *v_ptr = k_ptr + k_size; // [2, bsz, num_head, max_seq_len, head_dim] int max_seq_len = cache_kv_out->dims()[3]; T *cache_kv_data = cache_kv_out->data<T>(); int64_t cache_k_size = bsz * num_head * max_seq_len * dim_head; T *cache_k_ptr = cache_kv_data; T *cache_v_ptr = cache_kv_data + cache_k_size; write_cache_kv<T>(dev_ctx, cache_k_ptr, cache_v_ptr, k_ptr, v_ptr, bsz, num_head, seq_len, max_seq_len, dim_head); } else { // not generation // TODO(wangxi): can remove dropout in inference fmha_compute.ComputeForward(qkv_out, cache_kv, src_mask, &transpose_out_2, cache_kv_out, &qk_out, nullptr, &softmax_out, &attn_dropout_mask_out, &attn_dropout_out, &qktv_out, &fmha_out); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step3"; #endif if (pre_layer_norm) { out_linear_compute.ComputeForward( out_linear_weights[i], &fmha_out, nullptr, buf1, nullptr); AllReduce<T>(*buf1, ring_id, dev_ctx); } else { out_linear_compute.ComputeForward( out_linear_weights[i], &fmha_out, nullptr, buf0, nullptr); AllReduce<T>(*buf0, ring_id, dev_ctx); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step4"; #endif // step5. ln(residual + dropout(input + bias)) if (pre_layer_norm) { auto *ln_scale_data = ffn_ln_scales[i]->data<U>(); auto *ln_bias_data = ffn_ln_biases[i]->data<U>(); auto *out_linear_bias_data = out_linear_biases[i]->data<T>(); // inplace fused_dropout_layernorm_helper.LayernormResidualDropoutBias( dev_ctx, buf1->data<T>(), x_data, out_linear_bias_data, ln_scale_data, ln_bias_data, bias_dropout_residual_out_data, dropout_mask_out_data, buf1->data<T>(), ln_mean_data, ln_var_data); } else { auto *ln_scale_data = ln_scales[i]->data<U>(); auto *ln_bias_data = ln_biases[i]->data<U>(); auto *out_linear_bias_data = out_linear_biases[i]->data<T>(); auto *residual_data = (i == 0 ? x_data : buf1->data<T>()); fused_dropout_layernorm_helper.LayernormResidualDropoutBias( dev_ctx, buf0->data<T>(), residual_data, out_linear_bias_data, ln_scale_data, ln_bias_data, buf0->data<T>(), dropout_mask_out_data, buf1->data<T>(), ln_mean_data, ln_var_data); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step5"; #endif // step6. ffn matmul1 ffn1_linear_compute.ComputeForward( ffn1_weights[i], buf1, nullptr, &ffn1_out, nullptr); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step6"; #endif // step7. act bias // TODO(wangxi): remove dropout mask in inference fused_act_dropout_helper.DropoutActBias(dev_ctx, ffn1_out_data, ffn1_biases[i]->data<T>(), "gelu", ffn1_dropout_out_data, ffn1_dropout_mask_data); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step7"; #endif // step8. ffn matmul2 if (pre_layer_norm) { ffn2_linear_compute.ComputeForward( ffn2_weights[i], &ffn1_dropout_out, nullptr, buf1, nullptr); } else { ffn2_linear_compute.ComputeForward( ffn2_weights[i], &ffn1_dropout_out, nullptr, buf0, nullptr); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step8.0"; #endif if (pre_layer_norm) { AllReduce<T>(*buf1, ring_id, dev_ctx); } else { AllReduce<T>(*buf0, ring_id, dev_ctx); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step8.1"; #endif // step9. residual bias if (pre_layer_norm) { // TODO(wangxi): remove dropout mask in inference if (i < layers - 1) { auto *ln_scale_data = ln_scales[i + 1]->data<U>(); auto *ln_bias_data = ln_biases[i + 1]->data<U>(); ffn2_fused_dropout_helper.LayernormResidualDropoutBias( dev_ctx, buf1->data<T>(), bias_dropout_residual_out_data, ffn2_biases[i]->data<T>(), ln_scale_data, ln_bias_data, buf1->data<T>(), dropout_mask_out_data, buf0->data<T>(), ln_mean_data, ln_var_data); } else { ffn2_fused_dropout_helper.ResidualDropoutBias( dev_ctx, buf1->data<T>(), bias_dropout_residual_out_data, ffn2_biases[i]->data<T>(), buf1->data<T>(), dropout_mask_out_data); } } else { auto *ln_scale_data = ffn_ln_scales[i]->data<U>(); auto *ln_bias_data = ffn_ln_biases[i]->data<U>(); ffn2_fused_dropout_helper.LayernormResidualDropoutBias( dev_ctx, buf0->data<T>(), buf1->data<T>(), ffn2_biases[i]->data<T>(), ln_scale_data, ln_bias_data, buf0->data<T>(), dropout_mask_out_data, buf1->data<T>(), ln_mean_data, ln_var_data); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step9"; #endif if (pre_layer_norm) { x_data = buf1->data<T>(); std::swap(buf0, buf1); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(fused_multi_transformer, ops::FusedMultiTransformerOpKernel<plat::float16>, ops::FusedMultiTransformerOpKernel<float>);
54ed525b8ce1dd96e0092507d64e2e3abc547da0.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file has been adapted from FasterTransformer file: // https://github.com/NVIDIA/FasterTransformer/blob/v4.0/fastertransformer/cuda/masked_multihead_attention.cu // We add License in the head. #include <cuda_fp16.h> #include <float.h> #include <cub/cub.cuh> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/fused/attention_layer_norm.h" #include "paddle/fluid/operators/fused/attn_gemm.h" #include "paddle/fluid/operators/fused/fmha_ref.h" #include "paddle/fluid/operators/fused/fused_dropout_helper.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/kernels/funcs/math_function.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/ProcessGroup.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { using Tensor = framework::Tensor; // for debug // #define _DEBUG_FUSED_MULTI_TRANSFORMER template <typename T> static void AllReduce(framework::Tensor &tensor, // NOLINT const int ring_id, const phi::GPUContext &ctx) { if (ring_id == -1) return; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(ring_id)) { paddle::distributed::ProcessGroup *pg = map->get(ring_id); std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(tensor); out_tensor.push_back(tensor); paddle::distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { auto dtype = platform::ToNCCLDataType( framework::TransToProtoVarType(tensor.dtype())); int64_t numel = tensor.numel(); const void *sendbuff = tensor.data<T>(); auto place = ctx.GetPlace(); void *recvbuff = tensor.mutable_data<T>(place); auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place); auto stream = ctx.stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream)); } #else PADDLE_THROW(platform::errors::Unimplemented( "PaddlePaddle should compile with NCCL or RCCL when used tensor model " "parallel op.")); #endif } namespace { namespace plat = paddle::platform; using float16 = plat::float16; #define MMHA_USE_FP32_ACUM_FOR_LOGITS #define MMHA_USE_FP32_ACUM_FOR_OUT template <typename T> struct Masked_multihead_attention_params { // output buffer, [B, 1(seq_len), num_head * dim_head] T *out; // qkv_out, [B, 1(seq_len), 3, num_head * dim_head] const T *qkv; // bias, [3, num_head, dim_head] const T *qkv_bias; // TODO(wangxi): optimize with input_lengths and max_input_len? // [bsz, 1, 1, time_step(cache_seq_length)+1] const T *attn_mask; // [2, B, num_head, max_seq_len(valid cache_seq_len), dim_head] // k [B, num_head, dim_head/x, max_seq_len, x], that is `seq_len` first // v [B, num_head, max_seq_len, dim_head] T *cache_kv; int batch_size; int num_head; int timestep; // cache_seq_length int max_seq_length; // 1.f / sqrt(Dh) float inv_sqrt_dh; }; struct Float8_ { float2 x; float2 y; float2 z; float2 w; }; // clang-format off template <typename T, int Dh> struct Qk_vec_ {}; template <> struct Qk_vec_<float, 32> { using Type = float; }; template <> struct Qk_vec_<float, 64> { using Type = float2; }; template <> struct Qk_vec_<float, 128> { using Type = float4; }; template <> struct Qk_vec_<float, 256> { using Type = float4; }; template <> struct Qk_vec_<float16, 32> { using Type = uint32_t; }; template <> struct Qk_vec_<float16, 64> { using Type = uint32_t; }; template <> struct Qk_vec_<float16, 128> { using Type = uint2; }; template <> struct Qk_vec_<float16, 256> { using Type = uint4; }; template <typename T, int THREADS_PER_KEY> struct K_vec_ {}; template <> struct K_vec_<float, 4> { using Type = float; }; template <> struct K_vec_<float, 2> { using Type = float2; }; template <> struct K_vec_<float, 1> { using Type = float4; }; template <> struct K_vec_<float16, 4> { using Type = uint32_t; }; template <> struct K_vec_<float16, 2> { using Type = uint2; }; template <> struct K_vec_<float16, 1> { using Type = uint4; }; template <typename T, int V_VEC_SIZE> struct V_vec_ {}; template <> struct V_vec_<float, 1> { using Type = float; }; template <> struct V_vec_<float, 2> { using Type = float2; }; template <> struct V_vec_<float, 4> { using Type = float4; }; template <> struct V_vec_<float16, 2> { using Type = uint32_t; }; template <> struct V_vec_<float16, 4> { using Type = uint2; }; template <> struct V_vec_<float16, 8> { using Type = uint4; }; #ifdef MMHA_USE_FP32_ACUM_FOR_OUT template <typename T> struct V_vec_acum_fp32_ {}; // template <> struct V_vec_acum_fp32_<float> { using Type = float; }; // template <> struct V_vec_acum_fp32_<float2> { using Type = float2; }; template <> struct V_vec_acum_fp32_<float4> { using Type = float4; }; // template <> struct V_vec_acum_fp32_<uint32_t> { using Type = float2; }; // template <> struct V_vec_acum_fp32_<uint2 > { using Type = Float4_; }; template <> struct V_vec_acum_fp32_<uint4> { using Type = Float8_; }; #endif // clang-format on inline __device__ float half_to_float(uint16_t h) { float f; asm volatile("cvt.f32.f16 %0, %1;\n" : "=f"(f) : "h"(h)); return f; } inline __device__ float2 half2_to_float2(uint32_t v) { uint16_t lo, hi; asm volatile("mov.b32 {%0, %1}, %2;\n" : "=h"(lo), "=h"(hi) : "r"(v)); return make_float2(half_to_float(lo), half_to_float(hi)); } inline __device__ uint32_t float2_to_half2(float2 f) { union { uint32_t u32; uint16_t u16[2]; } tmp; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 asm volatile("cvt.rn.f16x2.f32 %0, %1, %2;\n" : "=r"(tmp.u32) : "f"(f.y), "f"(f.x)); #else asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[0]) : "f"(f.x)); asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[1]) : "f"(f.y)); #endif return tmp.u32; } inline __device__ float add(float a, float b) { return a + b; } inline __device__ float2 add(float2 a, float2 b) { float2 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); return c; } inline __device__ float4 add(float4 a, float4 b) { float4 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); c.z = add(a.z, b.z); c.w = add(a.w, b.w); return c; } inline __device__ uint16_t add(uint16_t a, uint16_t b) { uint16_t c; asm volatile("add.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b)); return c; } inline __device__ uint32_t add(uint32_t a, uint32_t b) { uint32_t c; asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b)); return c; } inline __device__ uint2 add(uint2 a, uint2 b) { uint2 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); return c; } inline __device__ uint4 add(uint4 a, uint4 b) { uint4 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); c.z = add(a.z, b.z); c.w = add(a.w, b.w); return c; } inline __device__ float2 add(uint32_t a, float2 fb) { float2 fa = half2_to_float2(a); return add(fa, fb); } inline __device__ Float8_ add(uint4 a, Float8_ fb) { Float8_ fc; fc.x = add(a.x, fb.x); fc.y = add(a.y, fb.y); fc.z = add(a.z, fb.z); fc.w = add(a.w, fb.w); return fc; } template <typename Acc, typename A, typename B> inline __device__ Acc mul(A a, B b); template <> inline __device__ float mul<float, float>(float a, float b) { return a * b; } template <> inline __device__ float2 mul(float2 a, float2 b) { float2 c; c.x = a.x * b.x; c.y = a.y * b.y; return c; } template <> inline __device__ float4 mul(float4 a, float4 b) { float4 c; c.x = a.x * b.x; c.y = a.y * b.y; c.z = a.z * b.z; c.w = a.w * b.w; return c; } template <> inline __device__ uint16_t mul(uint16_t a, uint16_t b) { uint16_t c; asm volatile("mul.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b)); return c; } template <> inline __device__ uint32_t mul(uint32_t a, uint32_t b) { uint32_t c; asm volatile("mul.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b)); return c; } template <> inline __device__ uint2 mul(uint2 a, uint2 b) { uint2 c; c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x); c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y); return c; } template <> inline __device__ uint4 mul(uint4 a, uint4 b) { uint4 c; c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x); c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y); c.z = mul<uint32_t, uint32_t, uint32_t>(a.z, b.z); c.w = mul<uint32_t, uint32_t, uint32_t>(a.w, b.w); return c; } template <> inline __device__ uint32_t mul(uint32_t a, float b) { float2 tmp = half2_to_float2(a); float2 tmp_res; tmp_res.x = tmp.x * b; tmp_res.y = tmp.y * b; uint32_t res = float2_to_half2(tmp_res); return res; } template <> inline __device__ uint2 mul(uint2 a, float b) { uint2 res; res.x = mul<uint32_t, uint32_t, float>(a.x, b); res.y = mul<uint32_t, uint32_t, float>(a.y, b); return res; } template <> inline __device__ uint4 mul(uint4 a, float b) { uint4 res; res.x = mul<uint32_t, uint32_t, float>(a.x, b); res.y = mul<uint32_t, uint32_t, float>(a.y, b); res.z = mul<uint32_t, uint32_t, float>(a.z, b); res.w = mul<uint32_t, uint32_t, float>(a.w, b); return res; } template <> inline __device__ float2 mul(float2 a, float b) { float2 res; res.x = a.x * b; res.y = a.y * b; return res; } template <> inline __device__ float4 mul(float4 a, float b) { float4 res; res.x = a.x * b; res.y = a.y * b; res.z = a.z * b; res.w = a.w * b; return res; } inline __device__ float sum(float v) { return v; } inline __device__ float sum(float2 v) { return v.x + v.y; } inline __device__ float sum(float4 v) { return v.x + v.y + v.z + v.w; } inline __device__ float sum(uint16_t v) { return half_to_float(v); } inline __device__ float sum(uint32_t v) { float2 tmp = half2_to_float2(v); return tmp.x + tmp.y; } inline __device__ float sum(uint2 v) { uint32_t c = add(v.x, v.y); return sum(c); } inline __device__ float sum(uint4 v) { uint32_t c = add(v.x, v.y); c = add(c, v.z); c = add(c, v.w); return sum(c); } template <typename T> inline __device__ float dot(T a, T b) { return sum(mul<T, T, T>(a, b)); } template <typename A, typename T> inline __device__ float dot(T a, T b) { return sum(mul<A, T, T>(a, b)); } inline __device__ constexpr uint32_t shfl_mask(int threads) { return threads == 32 ? uint32_t(-1) : (1u << threads) - 1u; } template <typename T> inline __device__ __host__ T div_up(T m, T n) { return (m + n - 1) / n; } inline __device__ float fma(float a, float b, float c) { return a * b + c; } inline __device__ float2 fma(float2 a, float2 b, float2 c) { float2 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); return d; } inline __device__ float4 fma(float4 a, float4 b, float4 c) { float4 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); d.z = fma(a.z, b.z, c.z); d.w = fma(a.w, b.w, c.w); return d; } inline __device__ uint32_t fma(uint32_t a, uint32_t b, uint32_t c) { uint32_t d; asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(d) : "r"(a), "r"(b), "r"(c)); return d; } inline __device__ uint2 fma(uint2 a, uint2 b, uint2 c) { uint2 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); return d; } inline __device__ uint4 fma(uint4 a, uint4 b, uint4 c) { uint4 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); d.z = fma(a.z, b.z, c.z); d.w = fma(a.w, b.w, c.w); return d; } inline __device__ float2 fma(float a, float2 b, float2 c) { float2 d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); return d; } inline __device__ float4 fma(float a, float4 b, float4 c) { float4 d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); d.z = fma(a, b.z, c.z); d.w = fma(a, b.w, c.w); return d; } inline __device__ Float8_ fma(float a, Float8_ b, Float8_ c) { Float8_ d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); d.z = fma(a, b.z, c.z); d.w = fma(a, b.w, c.w); return d; } inline __device__ uint32_t h0_h0(uint16_t a) { uint32_t b; asm volatile("mov.b32 %0, {%1, %1};" : "=r"(b) : "h"(a)); return b; } inline __device__ uint32_t fma(uint16_t a, uint32_t b, uint32_t c) { return fma(h0_h0(a), b, c); } inline __device__ uint2 fma(uint16_t a, uint2 b, uint2 c) { uint32_t s = h0_h0(a); uint2 d; d.x = fma(s, b.x, c.x); d.y = fma(s, b.y, c.y); return d; } inline __device__ uint4 fma(uint16_t a, uint4 b, uint4 c) { uint32_t s = h0_h0(a); uint4 d; d.x = fma(s, b.x, c.x); d.y = fma(s, b.y, c.y); d.z = fma(s, b.z, c.z); d.w = fma(s, b.w, c.w); return d; } inline __device__ float cast_to_float(float u) { return u; } inline __device__ float2 cast_to_float(float2 u) { return u; } inline __device__ float4 cast_to_float(float4 u) { return u; } inline __device__ Float8_ cast_to_float(uint4 u) { Float8_ tmp; tmp.x = half2_to_float2(u.x); tmp.y = half2_to_float2(u.y); tmp.z = half2_to_float2(u.z); tmp.w = half2_to_float2(u.w); return tmp; } template <int THREADS_PER_KEY, typename K_vec, int N> inline __device__ float qk_dot_(const K_vec (&q)[N], const K_vec (&k)[N], float inv_sqrt_dh) { K_vec inv_q = mul<K_vec, K_vec, float>(q[0], inv_sqrt_dh); K_vec qk_vec = mul<K_vec, K_vec, K_vec>(inv_q, k[0]); #pragma unroll for (int ii = 1; ii < N; ++ii) { inv_q = mul<K_vec, K_vec, float>(q[ii], inv_sqrt_dh); qk_vec = fma(inv_q, k[ii], qk_vec); } float qk = sum(qk_vec); #pragma unroll for (int mask = THREADS_PER_KEY / 2; mask >= 1; mask /= 2) { qk += __shfl_xor_sync(uint32_t(-1), qk, mask); } return qk; } template <typename T, int THREADS_PER_KEY> struct Qk_dot { template <typename K_vec, int N> static inline __device__ float dot(const K_vec (&q)[N], const K_vec (&k)[N], float inv_sqrt_dh) { return qk_dot_<THREADS_PER_KEY>(q, k, inv_sqrt_dh); } }; template <int WARPS_PER_BLOCK, int WARP_SIZE = 32> inline __device__ float block_sum(float *red_smem, float sum) { int warp = threadIdx.x / WARP_SIZE; int lane = threadIdx.x % WARP_SIZE; #pragma unroll for (int mask = WARP_SIZE / 2; mask >= 1; mask /= 2) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } if (lane == 0) { red_smem[warp] = sum; } __syncthreads(); if (lane < WARPS_PER_BLOCK) { sum = red_smem[lane]; } #pragma unroll for (int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } return __shfl_sync(uint32_t(-1), sum, 0); } inline __device__ void convert_from_float(float &dst, float src) { // NOLINT dst = src; } inline __device__ void convert_from_float(float4 &dst, float4 src) { // NOLINT dst = src; } inline __device__ void convert_from_float(plat::float16 &dst, // NOLINT float src) { dst = static_cast<plat::float16>(src); } inline __device__ void convert_from_float(uint4 &dst, Float8_ src) { // NOLINT dst.x = float2_to_half2(src.x); dst.y = float2_to_half2(src.y); dst.z = float2_to_half2(src.z); dst.w = float2_to_half2(src.w); } inline __device__ void zero(uint16_t &dst) { dst = uint16_t(0); } // NOLINT template <typename T> inline __device__ void zero(T &dst) { // NOLINT constexpr int WORDS = sizeof(T) / 4; union { T raw; uint32_t words[WORDS]; } tmp; #pragma unroll for (int ii = 0; ii < WORDS; ++ii) { tmp.words[ii] = 0u; } dst = tmp.raw; } template <typename T, int Dh, int Dh_MAX, int THREADS_PER_KEY, int THREADS_PER_VALUE, int THREADS_PER_BLOCK> __global__ void masked_multihead_attention_kernel( Masked_multihead_attention_params<T> params) { #if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) static_assert(Dh_MAX % THREADS_PER_KEY == 0, ""); static_assert(Dh_MAX % THREADS_PER_VALUE == 0, ""); constexpr int WARP_SIZE = 32; constexpr int WARPS_PER_BLOCK = THREADS_PER_BLOCK / WARP_SIZE; extern __shared__ char smem_[]; float *qk_smem = reinterpret_cast<float *>(smem_); char *logits_smem_ = smem_; // fp32 accum for logits float *logits_smem = reinterpret_cast<float *>(logits_smem_); T *out_smem = reinterpret_cast<T *>(smem_); __shared__ float red_smem[WARPS_PER_BLOCK * 2]; using Qk_vec = typename Qk_vec_<T, Dh_MAX>::Type; __shared__ __align__(sizeof(Qk_vec)) T q_smem[Dh_MAX]; const int bi = blockIdx.y; const int hi = blockIdx.x; const int bhi = bi * params.num_head + hi; const int tid = threadIdx.x; float qk_max = -FLT_MAX; float qk = 0; // qkv [B, S=1, 3, num_head, head_dim] int qkv_base_offset = bi * 3 * params.num_head * Dh + hi * Dh; constexpr int QK_VEC_SIZE = sizeof(Qk_vec) / sizeof(T); static_assert(Dh_MAX % QK_VEC_SIZE == 0, ""); // Use block reduction if needed // static_assert(Dh_MAX / QK_VEC_SIZE <= WARP_SIZE, ""); constexpr int QK_VECS_PER_WARP = Dh_MAX / QK_VEC_SIZE; // cache_k, [B, num_head, head_dim / x, max_seq_len, x] // x == 4/8 for FP32/FP16, 128bit, 16Byte constexpr int QK_ELTS_IN_16B = 16 / sizeof(T); constexpr int QK_VECS_IN_16B = 16 / sizeof(Qk_vec); const T *q_base = params.qkv; const T *k_base = params.qkv + params.num_head * Dh; const T *q_bias_base = params.qkv_bias; const T *k_bias_base = params.qkv_bias + params.num_head * Dh; if (tid < QK_VECS_PER_WARP) { int qk_offset = qkv_base_offset + tid * QK_VEC_SIZE; int qk_bias_offset = hi * Dh + tid * QK_VEC_SIZE; Qk_vec q; zero(q); q = (Dh == Dh_MAX || tid * QK_VEC_SIZE < Dh) ? *reinterpret_cast<const Qk_vec *>(&q_base[qk_offset]) : q; Qk_vec k; zero(k); k = (Dh == Dh_MAX || tid * QK_VEC_SIZE < Dh) ? *reinterpret_cast<const Qk_vec *>(&k_base[qk_offset]) : k; Qk_vec q_bias; zero(q_bias); q_bias = (Dh == Dh_MAX || tid * QK_VEC_SIZE < Dh) ? *reinterpret_cast<const Qk_vec *>(&q_bias_base[qk_bias_offset]) : q_bias; Qk_vec k_bias; zero(k_bias); k_bias = (Dh == Dh_MAX || tid * QK_VEC_SIZE < Dh) ? *reinterpret_cast<const Qk_vec *>(&k_bias_base[qk_bias_offset]) : k_bias; q = add(q, q_bias); // TODO(wangxi): See this https://github.com/microsoft/unilm/issues/510 // we may not require k_bias. k = add(k, k_bias); *reinterpret_cast<Qk_vec *>(&q_smem[tid * QK_VEC_SIZE]) = q; int co = tid / QK_VECS_IN_16B; int ci = (tid % QK_VECS_IN_16B) * QK_VEC_SIZE; int offset = bhi * params.max_seq_length * Dh + co * params.max_seq_length * QK_ELTS_IN_16B + params.timestep * QK_ELTS_IN_16B + ci; if (Dh == Dh_MAX || co < Dh / QK_ELTS_IN_16B) { *reinterpret_cast<Qk_vec *>(&params.cache_kv[offset]) = k; } qk = dot<Qk_vec, Qk_vec>(q, k); if (QK_VECS_PER_WARP <= WARP_SIZE) { #pragma unroll for (int mask = QK_VECS_PER_WARP / 2; mask >= 1; mask /= 2) { qk += __shfl_xor_sync(shfl_mask(QK_VECS_PER_WARP), qk, mask); } } } if (QK_VECS_PER_WARP > WARP_SIZE) { constexpr int WARPS_PER_RED = (QK_VECS_PER_WARP + WARP_SIZE - 1) / WARP_SIZE; qk = block_sum<WARPS_PER_RED>(&red_smem[WARPS_PER_RED], qk); } if (tid == 0) { // NOTE(wangxi): mask must be 0.0 // T mask = params.attn_mask[ // bi * (params.timestep + 1) + params.timestep]; // qk += static_cast<float>(mask); qk *= params.inv_sqrt_dh; qk_max = qk; qk_smem[params.timestep] = qk; } __syncthreads(); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("=======q_out=======\n"); for (int i = 0; i < Dh; ++i) printf("%f ", static_cast<float>(q_smem[i])); printf("\n"); } __syncthreads(); #endif using K_vec = typename K_vec_<T, THREADS_PER_KEY>::Type; constexpr int K_VEC_SIZE = sizeof(K_vec) / sizeof(T); static_assert(Dh_MAX % K_VEC_SIZE == 0, ""); constexpr int K_ELTS_PER_THREAD = Dh_MAX / THREADS_PER_KEY; constexpr int K_VECS_PER_THREAD = K_ELTS_PER_THREAD / K_VEC_SIZE; int ko = tid / THREADS_PER_KEY; int ki = (tid % THREADS_PER_KEY) * K_VEC_SIZE; static_assert(Dh_MAX == THREADS_PER_KEY * K_VEC_SIZE * K_VECS_PER_THREAD, ""); K_vec q[K_VECS_PER_THREAD]; #pragma unroll for (int i = 0; i < K_VECS_PER_THREAD; ++i) { q[i] = *reinterpret_cast<const K_vec *>( &q_smem[ki + i * THREADS_PER_KEY * K_VEC_SIZE]); } constexpr int K_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_KEY; constexpr int K_PER_WARP = WARP_SIZE / THREADS_PER_KEY; T *k_cache = &params.cache_kv[bhi * params.max_seq_length * Dh + ki]; int ti_end = div_up(params.timestep, K_PER_WARP) * K_PER_WARP; for (int ti = ko; ti < ti_end; ti += K_PER_ITER) { K_vec k[K_VECS_PER_THREAD]; K_vec k_vec_zero; zero(k_vec_zero); #pragma unroll for (int ii = 0; ii < K_VECS_PER_THREAD; ++ii) { int jj = ii * params.max_seq_length + ti; if (ti < params.timestep) { k[ii] = (Dh == Dh_MAX || jj * QK_ELTS_IN_16B < Dh * params.max_seq_length) ? *reinterpret_cast<const K_vec *>( &k_cache[jj * QK_ELTS_IN_16B]) : k_vec_zero; } } // NOTE(liyurui): We should multiple q with inv_sqrt_dh first, for dot(q, k) // may overflow with FP16 in large model. float qk = Qk_dot<T, THREADS_PER_KEY>::dot(q, k, params.inv_sqrt_dh); // bool is_mask = false; if (ti < params.timestep && tid % THREADS_PER_KEY == 0) { // qk_max = is_mask ? qk_max : fmaxf(qk_max, qk); T mask = params.attn_mask[bi * (params.timestep + 1) + ti]; qk += static_cast<float>(mask); qk_max = fmaxf(qk_max, qk); qk_smem[ti] = qk; } } #pragma unroll for (int mask = WARP_SIZE / 2; mask >= THREADS_PER_KEY; mask /= 2) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } const int warp = tid / WARP_SIZE; const int lane = tid % WARP_SIZE; if (lane == 0) { red_smem[warp] = qk_max; } __syncthreads(); qk_max = lane < WARPS_PER_BLOCK ? red_smem[lane] : -FLT_MAX; #pragma unroll for (int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } qk_max = __shfl_sync(uint32_t(-1), qk_max, 0); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("=======qk_out=======\n"); for (int i = 0; i <= params.timestep; ++i) printf("%f ", qk_smem[i]); printf("qk_max=%f\n", qk_max); } __syncthreads(); #endif float sum = 0.f; for (int ti = tid; ti <= params.timestep; ti += THREADS_PER_BLOCK) { // bool is_mask = false; // float logit = is_mask ? 0.f : __expf(qk_smem[ti] - qk_max); float logit = __expf(qk_smem[ti] - qk_max); sum += logit; qk_smem[ti] = logit; } sum = block_sum<WARPS_PER_BLOCK>(&red_smem[WARPS_PER_BLOCK], sum); // FIXME(wangxi): need add 1.e-6f? float inv_sum = __fdividef(1.f, sum + 1.e-6f); for (int ti = tid; ti <= params.timestep; ti += THREADS_PER_BLOCK) { convert_from_float(logits_smem[ti], qk_smem[ti] * inv_sum); } __syncthreads(); constexpr int V_VEC_SIZE = Dh_MAX / THREADS_PER_VALUE; using V_vec = typename V_vec_<T, V_VEC_SIZE>::Type; int vo = tid / THREADS_PER_VALUE; int vi = (tid % THREADS_PER_VALUE) * V_VEC_SIZE; T *v_cache = &params.cache_kv[params.batch_size * params.num_head * params.max_seq_length * Dh + bhi * params.max_seq_length * Dh + vi]; #ifdef MMHA_USE_FP32_ACUM_FOR_OUT using V_vec_acum = typename V_vec_acum_fp32_<V_vec>::Type; #else using V_vec_acum = V_vec; #endif V_vec_acum out; zero(out); constexpr int V_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_VALUE; if (Dh == Dh_MAX || vi < Dh) { for (int ti = vo; ti < params.timestep; ti += V_PER_ITER) { V_vec v = *reinterpret_cast<const V_vec *>(&v_cache[ti * Dh]); #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) float logit = logits_smem[ti]; out = fma(logit, cast_to_float(v), out); #else T logit = logits_smem[ti]; // Update the partial sums. out = fma(logit, v, out); #endif } } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("======logits_out=====\n"); for (int i = 0; i <= params.timestep; ++i) printf("%f ", logits_smem[i]); printf("\n"); } __syncthreads(); #endif V_vec v_bias; zero(v_bias); if (vo == (params.timestep % V_PER_ITER) && (Dh == Dh_MAX || vi < Dh)) { V_vec v = *reinterpret_cast<const V_vec *>( &params.qkv[2 * params.num_head * Dh + qkv_base_offset + vi]); v_bias = *reinterpret_cast<const V_vec *>( &params.qkv_bias[2 * params.num_head * Dh + hi * Dh + vi]); v = add(v, v_bias); *reinterpret_cast<V_vec *>(&v_cache[params.timestep * Dh]) = v; #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) out = fma(logits_smem[params.timestep], cast_to_float(v), out); #else out = fma(logits_smem[params.timestep], v, out); #endif } __syncthreads(); if (Dh == Dh_MAX || vi < Dh) { #pragma unroll for (int active_groups = V_PER_ITER; active_groups >= 2; active_groups /= 2) { int midpoint = active_groups / 2; if (vo >= midpoint && vo < active_groups && (Dh == Dh_MAX || vi < Dh)) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float( *reinterpret_cast<V_vec *>(&out_smem[(vo - midpoint) * Dh + vi]), out); #else *reinterpret_cast<V_vec *>(&out_smem[(vo - midpoint) * Dh + vi]) = out; #endif } __syncthreads(); if (vo < midpoint && (Dh == Dh_MAX || vi < Dh)) { out = add(*reinterpret_cast<const V_vec *>(&out_smem[vo * Dh + vi]), out); } __syncthreads(); } } if (vo == 0 && (Dh == Dh_MAX || vi < Dh)) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float(*reinterpret_cast<V_vec *>(&params.out[bhi * Dh + vi]), out); #else *reinterpret_cast<V_vec *>(&params.out[bhi * Dh + vi]) = out; #endif } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER __syncthreads(); if (bi == 0 && hi == 0 && tid == 0) { printf("======fmha_out=====\n"); for (int i = 0; i < Dh; ++i) printf("%f ", static_cast<float>(params.out[i])); printf("\n"); } #endif #else assert(false); #endif } template <typename T> inline size_t smem_size_in_bytes( const Masked_multihead_attention_params<T> &params, int dim_head, int threads_per_value, int threads_per_block) { size_t qk_sz = div_up(params.timestep + 1, 4) * 16; size_t logits_sz = 0; #ifndef MMHA_USE_FP32_ACUM_FOR_LOGITS if (sizeof(T) != 4) { logits_sz = div_up(params.max_seq_length, 4) * 4 * sizeof(T); } #endif size_t softmax_sz = qk_sz + logits_sz; int rows_per_red = threads_per_block / threads_per_value; size_t red_sz = rows_per_red * dim_head * sizeof(T) / 2; return max(softmax_sz, red_sz); } #define MMHA_LAUNCH_KERNEL( \ T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, stream) \ size_t smem_sz = \ smem_size_in_bytes<T>(params, Dh, THDS_PER_VALUE, THDS_PER_BLOCK); \ dim3 grid(params.num_head, params.batch_size); \ masked_multihead_attention_kernel<T, \ Dh, \ Dh_MAX, \ THDS_PER_KEY, \ THDS_PER_VALUE, \ THDS_PER_BLOCK> \ <<<grid, THDS_PER_BLOCK, smem_sz, stream>>>(params) template <typename T, int Dh, int Dh_MAX> void fmha_launch_kernel(const Masked_multihead_attention_params<T> &params, const cudaStream_t &stream) { constexpr int THREADS_PER_VALUE = Dh_MAX * sizeof(T) / 16; if (params.timestep < 32) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, stream); } else if (params.timestep < 2048) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, stream); } else { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, stream); } } template <typename T> void fmha(const phi::GPUContext &dev_ctx, const Tensor &qkv_tensor, const Tensor &qkv_bias_tensor, const Tensor &src_mask_tensor, Tensor *cache_kv_tensor, Tensor *out_tensor, int batch_size, int max_seq_length, int num_head, int dim_head, int timestep, float inv_sqrt_dh) { Masked_multihead_attention_params<T> params; params.out = out_tensor->data<T>(); params.qkv = qkv_tensor.data<T>(); params.qkv_bias = qkv_bias_tensor.data<T>(); params.attn_mask = src_mask_tensor.data<T>(); params.cache_kv = cache_kv_tensor->data<T>(); params.batch_size = batch_size; params.num_head = num_head; params.timestep = timestep; params.max_seq_length = max_seq_length; params.inv_sqrt_dh = inv_sqrt_dh; switch (dim_head) { case 10: fmha_launch_kernel<T, 10, 32>(params, dev_ctx.stream()); break; case 26: fmha_launch_kernel<T, 26, 32>(params, dev_ctx.stream()); break; case 32: fmha_launch_kernel<T, 32, 32>(params, dev_ctx.stream()); break; case 64: fmha_launch_kernel<T, 64, 64>(params, dev_ctx.stream()); break; case 96: fmha_launch_kernel<T, 96, 128>(params, dev_ctx.stream()); break; case 128: fmha_launch_kernel<T, 128, 128>(params, dev_ctx.stream()); break; case 192: fmha_launch_kernel<T, 192, 256>(params, dev_ctx.stream()); break; default: PADDLE_THROW(platform::errors::Unimplemented( "Dim_head = %d is unsupport!", dim_head)); } } // NOTE: simd with 16Bytes(128bit), float is 4, float16 is 8 constexpr int VEC_16B = 16; template <typename T> __global__ void write_cache_k_kernel(T *cache_k, const T *k, const int num_head, const int dim_head, const int seq_len, const int max_seq_len) { const int bi = blockIdx.y; const int hi = blockIdx.z; constexpr int X_ELEMS = VEC_16B / sizeof(T); // [bsz, num_head, seq_len, dim_head/x, x] auto k_src = reinterpret_cast<const uint4 *>( k + bi * num_head * seq_len * dim_head + hi * seq_len * dim_head); // [bsz, num_head, dim_head/x, max_seq_len, x] auto k_dst = reinterpret_cast<uint4 *>( cache_k + bi * num_head * max_seq_len * dim_head + hi * max_seq_len * dim_head); const int out_idx = blockIdx.x * blockDim.x + threadIdx.x; // vec size int dim_head_div_x = dim_head / X_ELEMS; // FIXME(wangxi): num_head is not need? // if (out_idx >= num_head * dim_head_div_x * max_seq_len) return; if (out_idx >= dim_head_div_x * max_seq_len) return; int idx = out_idx; const int k_seq_len_id = idx % max_seq_len; // idx = (idx - k_seq_len_id) / max_seq_len; idx = idx / max_seq_len; const int k_vec_id = idx % dim_head_div_x; if (k_seq_len_id < seq_len) { k_dst[out_idx] = k_src[k_seq_len_id * dim_head_div_x + k_vec_id]; } } template <typename T> __global__ void write_cache_v_kernel(T *cache_v, const T *v, const int num_head, const int dim_head, const int seq_len, const int max_seq_len) { const int bi = blockIdx.y; const int hi = blockIdx.z; // [bsz, num_head, seq_len, dim_head/x, x] auto v_src = reinterpret_cast<const uint4 *>( v + bi * num_head * seq_len * dim_head + hi * seq_len * dim_head); // [bsz, num_head, max_seq_len, dim_head/x, x] auto v_dst = reinterpret_cast<uint4 *>( cache_v + bi * num_head * max_seq_len * dim_head + hi * max_seq_len * dim_head); const int idx = blockIdx.x * blockDim.x + threadIdx.x; constexpr int X_ELEMS = VEC_16B / sizeof(T); const int dim_head_div_x = dim_head / X_ELEMS; if (idx >= dim_head_div_x * seq_len) return; v_dst[idx] = v_src[idx]; } template <typename T> void write_cache_kv(const phi::GPUContext &dev_ctx, T *cache_k, T *cache_v, const T *k, const T *v, const int bsz, const int num_head, const int seq_len, const int max_seq_len, const int dim_head) { constexpr int block_sz = 128; constexpr int x = VEC_16B / sizeof(T); assert(dim_head % x == 0); PADDLE_ENFORCE_EQ( dim_head % x, 0, platform::errors::PreconditionNotMet( "dim_head=%d must be divisible by vec_size=%d", dim_head, x)); int max_size = max_seq_len * dim_head / x; int size = seq_len * dim_head / x; dim3 grid(div_up(max_size, block_sz), bsz, num_head); dim3 grid_v(div_up(size, block_sz), bsz, num_head); // transpose [bsz, num_head, seq_len, dim_head/x, x]-> // [bsz, num_head, dim_head/x, max_seq_len, x] write_cache_k_kernel<<<grid, block_sz, 0, dev_ctx.stream()>>>( cache_k, k, num_head, dim_head, seq_len, max_seq_len); // copy [bsz, num_head, seq_len, dim_head/x, x]-> // [bsz, num_head, max_seq_len, dim_head/x, x] write_cache_v_kernel<<<grid_v, block_sz, 0, dev_ctx.stream()>>>( cache_v, v, num_head, dim_head, seq_len, max_seq_len); } } // namespace template <typename T> class FusedMultiTransformerOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; auto place = ctx.GetPlace(); auto &dev_ctx = ctx.cuda_device_context(); auto *time_step = ctx.Input<Tensor>("TimeStep"); // 0. input auto *input_x = ctx.Input<Tensor>("X"); const auto input_x_dims = input_x->dims(); int bsz = input_x_dims[0]; int seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int bsz_seq = bsz * seq_len; // 1. layer norm const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); auto ln_scales = ctx.MultiInput<Tensor>("LnScale"); auto ln_biases = ctx.MultiInput<Tensor>("LnBias"); auto ln_compute = AttnLayerNorm<T>(dev_ctx, epsilon, bsz_seq, dim_embed); Tensor ln_mean, ln_var; auto *ln_mean_data = ln_mean.mutable_data<U>({bsz_seq}, place); auto *ln_var_data = ln_var.mutable_data<U>({bsz_seq}, place); // 2. qkv // x: qkv's input [batch_size, seq_len, dim_embed] // y: qkv's weight: [3, num_head, dim_head, dim_embed] auto qkv_weights = ctx.MultiInput<Tensor>("QKVW"); auto qkv_biases = ctx.MultiInput<Tensor>("QKVBias"); const bool trans_qkvw = ctx.Attr<bool>("trans_qkvw"); const auto qkv_w_dims = qkv_weights[0]->dims(); int num_head = trans_qkvw ? qkv_w_dims[1] : qkv_w_dims[2]; int dim_head = trans_qkvw ? qkv_w_dims[2] : qkv_w_dims[3]; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; bool compute_bias = qkv_biases.size() > 0 && time_step == nullptr; // (transA, transB, compute_bias) = (false, trans_qkvw, false) auto qkv_compute = AttnMatMul<T>(dev_ctx, false, trans_qkvw, bsz_seq, output_size, input_size, compute_bias); Tensor qkv_out; auto *qkv_out_data = qkv_out.mutable_data<T>({bsz, seq_len, 3, num_head, dim_head}, place); // 3. fmha AttnDropoutParam attn_param( true, "upscale_in_train", 0.0, true, true, 0, nullptr); auto fmha_compute = FMHARef<T>(dev_ctx, bsz, seq_len, num_head, dim_head, attn_param); auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto cache_kvs = ctx.MultiInput<Tensor>("CacheKV"); auto cache_kv_outs = ctx.MultiOutput<Tensor>("CacheKVOut"); // auto *time_step = ctx.Input<Tensor>("TimeStep"); auto out_seq_len = seq_len; if (time_step) { PADDLE_ENFORCE_EQ(time_step->place(), platform::CPUPlace(), platform::errors::PreconditionNotMet( "The place of input(TimeStep) must be CPUPlace.")); // cache_seq_len int time_step_value = time_step->data<int>()[0]; PADDLE_ENFORCE_GT(time_step_value, 0, platform::errors::PreconditionNotMet( "The value of time_step must > 0, but now is %d", time_step_value)); PADDLE_ENFORCE_EQ( seq_len, 1, platform::errors::PreconditionNotMet( "In decode stage, the seq_len of input must be 1, but now is %d", seq_len)); out_seq_len += time_step_value; } Tensor transpose_out_2, qk_out; auto *transpose_out_2_data = transpose_out_2.mutable_data<T>( {3, bsz, num_head, seq_len, dim_head}, place); auto *qk_out_data = qk_out.mutable_data<T>({bsz, num_head, seq_len, out_seq_len}, place); Tensor softmax_out; Tensor attn_dropout_mask_out, attn_dropout_out; Tensor qktv_out, fmha_out; auto *softmax_out_data = softmax_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *attn_dropout_mask_out_data = attn_dropout_mask_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *attn_dropout_data_data = attn_dropout_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *qktv_out_data = qktv_out.mutable_data<T>({bsz, num_head, seq_len, dim_head}, place); auto *fmha_out_data = fmha_out.mutable_data<T>({bsz, seq_len, num_head, dim_head}, place); // 4. out_linear auto out_linear_weights = ctx.MultiInput<Tensor>("OutLinearW"); auto out_linear_biases = ctx.MultiInput<Tensor>("OutLinearBias"); int ring_id = ctx.Attr<int>("ring_id"); // (transA, transB, compute_bias) = (false, false, false) auto out_linear_compute = AttnMatMul<T>( dev_ctx, false, false, bsz_seq, dim_embed, hidden_size, false); // 5. ln(residual + bias) DropoutParam dropout_param2(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( dev_ctx, bsz_seq, dim_embed, dropout_param2, epsilon); auto ffn_ln_scales = ctx.MultiInput<Tensor>("FFNLnScale"); auto ffn_ln_biases = ctx.MultiInput<Tensor>("FFNLnBias"); Tensor bias_dropout_residual_out, dropout_mask_out; T *bias_dropout_residual_out_data = nullptr; if (pre_layer_norm) { bias_dropout_residual_out_data = bias_dropout_residual_out.mutable_data<T>({bsz, seq_len, dim_embed}, place); } auto *dropout_mask_out_data = dropout_mask_out.mutable_data<uint8_t>( {bsz, seq_len, dim_embed}, place); // 6. ffn matmul1 auto ffn1_weights = ctx.MultiInput<Tensor>("FFN1Weight"); auto ffn1_biases = ctx.MultiInput<Tensor>("FFN1Bias"); auto ffn1_weight_dim = ffn1_weights[0]->dims(); int dim_ffn = ffn1_weight_dim[1]; auto ffn1_linear_compute = AttnMatMul<T>( dev_ctx, false, false, bsz_seq, dim_ffn, dim_embed, false); Tensor ffn1_out; auto *ffn1_out_data = ffn1_out.mutable_data<T>({bsz_seq, dim_ffn}, place); // 7. ffn act + bias DropoutParam ffn1_dropout_param(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutHelper<T, uint8_t> fused_act_dropout_helper( dev_ctx, bsz_seq, dim_ffn, ffn1_dropout_param); Tensor ffn1_dropout_out, ffn1_dropout_mask; auto *ffn1_dropout_out_data = ffn1_dropout_out.mutable_data<T>({bsz_seq, dim_ffn}, place); auto *ffn1_dropout_mask_data = ffn1_dropout_mask.mutable_data<uint8_t>({bsz_seq, dim_ffn}, place); // 8. ffn2 matmul auto ffn2_weights = ctx.MultiInput<Tensor>("FFN2Weight"); auto ffn2_biases = ctx.MultiInput<Tensor>("FFN2Bias"); auto ffn2_linear_compute = AttnMatMul<T>( dev_ctx, false, false, bsz_seq, dim_embed, dim_ffn, false); // 9. ffn2 residual bias DropoutParam ffn2_dropout_param(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutLayerNormHelper<T, uint8_t> ffn2_fused_dropout_helper( dev_ctx, bsz_seq, dim_embed, ffn2_dropout_param, epsilon); // calc auto *out = ctx.Output<Tensor>("Out"); auto *from_data = out->mutable_data<T>(place); Tensor *from_tensor = out; Tensor tmp_out; auto *tmp_out_data = tmp_out.mutable_data<T>({bsz, seq_len, dim_embed}, place); auto *x_data = input_x->data<T>(); Tensor *buf0 = nullptr; Tensor *buf1 = nullptr; // step0: x --> buf1 // step1: buf1 --> buf0 // step2: buf0 --> buf1 int layers = qkv_weights.size(); if (pre_layer_norm) { if (layers & 1) { // odd, set buf1 as out buf0 = &tmp_out; buf1 = out; } else { // even, set buf0 as out buf0 = out; buf1 = &tmp_out; } } else { buf0 = &tmp_out; buf1 = out; } for (int i = 0; i < layers; ++i) { // step1. layer_norm if (i == 0 && pre_layer_norm) { auto *ln_scale_data = ln_scales[i]->data<U>(); auto *ln_bias_data = ln_biases[i]->data<U>(); // TODO(wangxi): can remove mean var in inference ln_compute.ComputeForward(x_data, ln_scale_data, ln_bias_data, buf1->data<T>(), ln_mean_data, ln_var_data); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step1"; #endif // step2. qkv const Tensor *qkv_bias = qkv_biases.size() > 0 ? qkv_biases[i] : nullptr; // NOTE: in decoder stage, bias is fused in fmha const Tensor *bias = time_step ? nullptr : qkv_bias; if (!pre_layer_norm && i == 0) { qkv_compute.ComputeForward( qkv_weights[i], input_x, bias, &qkv_out, &qkv_out); } else { qkv_compute.ComputeForward( qkv_weights[i], buf1, bias, &qkv_out, &qkv_out); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step2"; #endif // step3. fmha const Tensor *cache_kv = cache_kvs.size() > 0 ? cache_kvs[i] : nullptr; Tensor *cache_kv_out = cache_kv ? cache_kv_outs[i] : nullptr; if (time_step) { // generation decoder stage // [2, batch_size, num_head, max_seq_len, head_size] int max_seq_len = cache_kv->dims()[3]; fmha<T>(dev_ctx, qkv_out, *qkv_bias, *src_mask, cache_kv_out, &fmha_out, bsz, max_seq_len, num_head, dim_head, time_step->data<int>()[0], 1. / sqrt(dim_head)); } else if (cache_kv_out) { // generation context stage // TODO(wangxi): can remove dropout in inference fmha_compute.ComputeForward(qkv_out, nullptr, src_mask, &transpose_out_2, nullptr, &qk_out, nullptr, &softmax_out, &attn_dropout_mask_out, &attn_dropout_out, &qktv_out, &fmha_out); // [3, bsz, num_head, seq_len, head_dim] T *qkv_data = transpose_out_2_data; int64_t q_size = bsz * seq_len * num_head * dim_head; int64_t k_size = q_size; const T *q_ptr = qkv_data; const T *k_ptr = q_ptr + q_size; const T *v_ptr = k_ptr + k_size; // [2, bsz, num_head, max_seq_len, head_dim] int max_seq_len = cache_kv_out->dims()[3]; T *cache_kv_data = cache_kv_out->data<T>(); int64_t cache_k_size = bsz * num_head * max_seq_len * dim_head; T *cache_k_ptr = cache_kv_data; T *cache_v_ptr = cache_kv_data + cache_k_size; write_cache_kv<T>(dev_ctx, cache_k_ptr, cache_v_ptr, k_ptr, v_ptr, bsz, num_head, seq_len, max_seq_len, dim_head); } else { // not generation // TODO(wangxi): can remove dropout in inference fmha_compute.ComputeForward(qkv_out, cache_kv, src_mask, &transpose_out_2, cache_kv_out, &qk_out, nullptr, &softmax_out, &attn_dropout_mask_out, &attn_dropout_out, &qktv_out, &fmha_out); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step3"; #endif if (pre_layer_norm) { out_linear_compute.ComputeForward( out_linear_weights[i], &fmha_out, nullptr, buf1, nullptr); AllReduce<T>(*buf1, ring_id, dev_ctx); } else { out_linear_compute.ComputeForward( out_linear_weights[i], &fmha_out, nullptr, buf0, nullptr); AllReduce<T>(*buf0, ring_id, dev_ctx); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step4"; #endif // step5. ln(residual + dropout(input + bias)) if (pre_layer_norm) { auto *ln_scale_data = ffn_ln_scales[i]->data<U>(); auto *ln_bias_data = ffn_ln_biases[i]->data<U>(); auto *out_linear_bias_data = out_linear_biases[i]->data<T>(); // inplace fused_dropout_layernorm_helper.LayernormResidualDropoutBias( dev_ctx, buf1->data<T>(), x_data, out_linear_bias_data, ln_scale_data, ln_bias_data, bias_dropout_residual_out_data, dropout_mask_out_data, buf1->data<T>(), ln_mean_data, ln_var_data); } else { auto *ln_scale_data = ln_scales[i]->data<U>(); auto *ln_bias_data = ln_biases[i]->data<U>(); auto *out_linear_bias_data = out_linear_biases[i]->data<T>(); auto *residual_data = (i == 0 ? x_data : buf1->data<T>()); fused_dropout_layernorm_helper.LayernormResidualDropoutBias( dev_ctx, buf0->data<T>(), residual_data, out_linear_bias_data, ln_scale_data, ln_bias_data, buf0->data<T>(), dropout_mask_out_data, buf1->data<T>(), ln_mean_data, ln_var_data); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step5"; #endif // step6. ffn matmul1 ffn1_linear_compute.ComputeForward( ffn1_weights[i], buf1, nullptr, &ffn1_out, nullptr); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step6"; #endif // step7. act bias // TODO(wangxi): remove dropout mask in inference fused_act_dropout_helper.DropoutActBias(dev_ctx, ffn1_out_data, ffn1_biases[i]->data<T>(), "gelu", ffn1_dropout_out_data, ffn1_dropout_mask_data); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step7"; #endif // step8. ffn matmul2 if (pre_layer_norm) { ffn2_linear_compute.ComputeForward( ffn2_weights[i], &ffn1_dropout_out, nullptr, buf1, nullptr); } else { ffn2_linear_compute.ComputeForward( ffn2_weights[i], &ffn1_dropout_out, nullptr, buf0, nullptr); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step8.0"; #endif if (pre_layer_norm) { AllReduce<T>(*buf1, ring_id, dev_ctx); } else { AllReduce<T>(*buf0, ring_id, dev_ctx); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step8.1"; #endif // step9. residual bias if (pre_layer_norm) { // TODO(wangxi): remove dropout mask in inference if (i < layers - 1) { auto *ln_scale_data = ln_scales[i + 1]->data<U>(); auto *ln_bias_data = ln_biases[i + 1]->data<U>(); ffn2_fused_dropout_helper.LayernormResidualDropoutBias( dev_ctx, buf1->data<T>(), bias_dropout_residual_out_data, ffn2_biases[i]->data<T>(), ln_scale_data, ln_bias_data, buf1->data<T>(), dropout_mask_out_data, buf0->data<T>(), ln_mean_data, ln_var_data); } else { ffn2_fused_dropout_helper.ResidualDropoutBias( dev_ctx, buf1->data<T>(), bias_dropout_residual_out_data, ffn2_biases[i]->data<T>(), buf1->data<T>(), dropout_mask_out_data); } } else { auto *ln_scale_data = ffn_ln_scales[i]->data<U>(); auto *ln_bias_data = ffn_ln_biases[i]->data<U>(); ffn2_fused_dropout_helper.LayernormResidualDropoutBias( dev_ctx, buf0->data<T>(), buf1->data<T>(), ffn2_biases[i]->data<T>(), ln_scale_data, ln_bias_data, buf0->data<T>(), dropout_mask_out_data, buf1->data<T>(), ln_mean_data, ln_var_data); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step9"; #endif if (pre_layer_norm) { x_data = buf1->data<T>(); std::swap(buf0, buf1); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(fused_multi_transformer, ops::FusedMultiTransformerOpKernel<plat::float16>, ops::FusedMultiTransformerOpKernel<float>);
61025fe6c4e28f31e1fa91f9bf058fe5c7e80281.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../main.h" // (in the main program folder) needed to recognized input parameters //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/device/device.h" // (in library path specified to compiler) needed by for device functions #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer //======================================================================================================================================================150 // KERNEL_GPU_CUDA_WRAPPER FUNCTION HEADER //======================================================================================================================================================150 #include "./kernel_gpu_cuda_wrapper.h" // (in the current directory) //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 #include "./kernel_gpu_cuda.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables //========================================================================================================================================================================================================200 // KERNEL_GPU_CUDA_WRAPPER FUNCTION //========================================================================================================================================================================================================200 void kernel_gpu_cuda_wrapper(par_str par_cpu, dim_str dim_cpu, box_str* box_cpu, FOUR_VECTOR* rv_cpu, fp* qv_cpu, FOUR_VECTOR* fv_cpu) { //======================================================================================================================================================150 // CPU VARIABLES //======================================================================================================================================================150 // timer long long time0; long long time1; long long time2; long long time3; long long time4; long long time5; long long time6; time0 = get_time(); //======================================================================================================================================================150 // GPU SETUP //======================================================================================================================================================150 //====================================================================================================100 // INITIAL DRIVER OVERHEAD //====================================================================================================100 hipDeviceSetLimit(hipLimitMallocHeapSize, 1024*1024*800); //sderek // hipDeviceSynchronize(); //====================================================================================================100 // VARIABLES //====================================================================================================100 box_str* d_box_gpu; FOUR_VECTOR* d_rv_gpu; fp* d_qv_gpu; FOUR_VECTOR* d_fv_gpu; dim3 threads; dim3 blocks; //====================================================================================================100 // EXECUTION PARAMETERS //====================================================================================================100 blocks.x = dim_cpu.number_boxes; blocks.y = 1; threads.x = NUMBER_THREADS; // define the number of threads in the block threads.y = 1; time1 = get_time(); //======================================================================================================================================================150 // GPU MEMORY (MALLOC) //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 hipMalloc( (void **)&d_box_gpu, dim_cpu.box_mem); //==================================================50 // rv //==================================================50 hipMalloc( (void **)&d_rv_gpu, dim_cpu.space_mem); //==================================================50 // qv //==================================================50 hipMalloc( (void **)&d_qv_gpu, dim_cpu.space_mem2); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 hipMalloc( (void **)&d_fv_gpu, dim_cpu.space_mem); time2 = get_time(); //======================================================================================================================================================150 // GPU MEMORY COPY //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 hipMemcpy( d_box_gpu, box_cpu, dim_cpu.box_mem, hipMemcpyHostToDevice); //==================================================50 // rv //==================================================50 hipMemcpy( d_rv_gpu, rv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice); //==================================================50 // qv //==================================================50 hipMemcpy( d_qv_gpu, qv_cpu, dim_cpu.space_mem2, hipMemcpyHostToDevice); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 hipMemcpy( d_fv_gpu, fv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice); time3 = get_time(); //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 // launch kernel - all boxes hipLaunchKernelGGL(( kernel_gpu_cuda), dim3(blocks), dim3(threads), 0, 0, par_cpu, dim_cpu, d_box_gpu, d_rv_gpu, d_qv_gpu, d_fv_gpu); hipDeviceSynchronize(); time4 = get_time(); //======================================================================================================================================================150 // GPU MEMORY COPY (CONTD.) //======================================================================================================================================================150 hipMemcpy( fv_cpu, d_fv_gpu, dim_cpu.space_mem, hipMemcpyDeviceToHost); time5 = get_time(); //======================================================================================================================================================150 // GPU MEMORY DEALLOCATION //======================================================================================================================================================150 hipFree(d_rv_gpu); hipFree(d_qv_gpu); hipFree(d_fv_gpu); hipFree(d_box_gpu); time6 = get_time(); //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Time spent in different stages of GPU_CUDA KERNEL:\n"); printf("%15.12f s, %15.12f % : GPU: SET DEVICE / DRIVER INIT\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100); printf("%15.12f s, %15.12f % : GPU MEM: ALO\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100); printf("%15.12f s, %15.12f % : GPU MEM: COPY IN\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100); printf("%15.12f s, %15.12f % : GPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100); printf("%15.12f s, %15.12f % : GPU MEM: COPY OUT\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100); printf("%15.12f s, %15.12f % : GPU MEM: FRE\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100); printf("Total time:\n"); printf("%.12f s\n", (float) (time6-time0) / 1000000); fprintf(stderr, "%15.12f s, GPU: KERNEL\n", (float) (time4-time3) / 1000000); }
61025fe6c4e28f31e1fa91f9bf058fe5c7e80281.cu
//========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../main.h" // (in the main program folder) needed to recognized input parameters //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/device/device.h" // (in library path specified to compiler) needed by for device functions #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer //======================================================================================================================================================150 // KERNEL_GPU_CUDA_WRAPPER FUNCTION HEADER //======================================================================================================================================================150 #include "./kernel_gpu_cuda_wrapper.h" // (in the current directory) //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 #include "./kernel_gpu_cuda.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables //========================================================================================================================================================================================================200 // KERNEL_GPU_CUDA_WRAPPER FUNCTION //========================================================================================================================================================================================================200 void kernel_gpu_cuda_wrapper(par_str par_cpu, dim_str dim_cpu, box_str* box_cpu, FOUR_VECTOR* rv_cpu, fp* qv_cpu, FOUR_VECTOR* fv_cpu) { //======================================================================================================================================================150 // CPU VARIABLES //======================================================================================================================================================150 // timer long long time0; long long time1; long long time2; long long time3; long long time4; long long time5; long long time6; time0 = get_time(); //======================================================================================================================================================150 // GPU SETUP //======================================================================================================================================================150 //====================================================================================================100 // INITIAL DRIVER OVERHEAD //====================================================================================================100 cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1024*1024*800); //sderek // cudaThreadSynchronize(); //====================================================================================================100 // VARIABLES //====================================================================================================100 box_str* d_box_gpu; FOUR_VECTOR* d_rv_gpu; fp* d_qv_gpu; FOUR_VECTOR* d_fv_gpu; dim3 threads; dim3 blocks; //====================================================================================================100 // EXECUTION PARAMETERS //====================================================================================================100 blocks.x = dim_cpu.number_boxes; blocks.y = 1; threads.x = NUMBER_THREADS; // define the number of threads in the block threads.y = 1; time1 = get_time(); //======================================================================================================================================================150 // GPU MEMORY (MALLOC) //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 cudaMalloc( (void **)&d_box_gpu, dim_cpu.box_mem); //==================================================50 // rv //==================================================50 cudaMalloc( (void **)&d_rv_gpu, dim_cpu.space_mem); //==================================================50 // qv //==================================================50 cudaMalloc( (void **)&d_qv_gpu, dim_cpu.space_mem2); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 cudaMalloc( (void **)&d_fv_gpu, dim_cpu.space_mem); time2 = get_time(); //======================================================================================================================================================150 // GPU MEMORY COPY //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 cudaMemcpy( d_box_gpu, box_cpu, dim_cpu.box_mem, cudaMemcpyHostToDevice); //==================================================50 // rv //==================================================50 cudaMemcpy( d_rv_gpu, rv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice); //==================================================50 // qv //==================================================50 cudaMemcpy( d_qv_gpu, qv_cpu, dim_cpu.space_mem2, cudaMemcpyHostToDevice); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 cudaMemcpy( d_fv_gpu, fv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice); time3 = get_time(); //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 // launch kernel - all boxes kernel_gpu_cuda<<<blocks, threads>>>( par_cpu, dim_cpu, d_box_gpu, d_rv_gpu, d_qv_gpu, d_fv_gpu); cudaThreadSynchronize(); time4 = get_time(); //======================================================================================================================================================150 // GPU MEMORY COPY (CONTD.) //======================================================================================================================================================150 cudaMemcpy( fv_cpu, d_fv_gpu, dim_cpu.space_mem, cudaMemcpyDeviceToHost); time5 = get_time(); //======================================================================================================================================================150 // GPU MEMORY DEALLOCATION //======================================================================================================================================================150 cudaFree(d_rv_gpu); cudaFree(d_qv_gpu); cudaFree(d_fv_gpu); cudaFree(d_box_gpu); time6 = get_time(); //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Time spent in different stages of GPU_CUDA KERNEL:\n"); printf("%15.12f s, %15.12f % : GPU: SET DEVICE / DRIVER INIT\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100); printf("%15.12f s, %15.12f % : GPU MEM: ALO\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100); printf("%15.12f s, %15.12f % : GPU MEM: COPY IN\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100); printf("%15.12f s, %15.12f % : GPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100); printf("%15.12f s, %15.12f % : GPU MEM: COPY OUT\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100); printf("%15.12f s, %15.12f % : GPU MEM: FRE\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100); printf("Total time:\n"); printf("%.12f s\n", (float) (time6-time0) / 1000000); fprintf(stderr, "%15.12f s, GPU: KERNEL\n", (float) (time4-time3) / 1000000); }
7d5333e9c39036ad6df0f9125b0dad9c8ffef172.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include "saber/funcs/impl/cuda/saber_conv_act.h" #include "saber/saber_types.h" #include "saber/core/common.h" #include <sm_61_intrinsics.h> namespace anakin{ namespace saber{ template <bool bias_flag, bool relu_flag> __global__ void depthwise_conv_1d(const int nthreads, const float* const din, const int num, const int channels, const int hin, const int win, const int hout, const int wout, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const dout, const float* const weight, const float* const bias) { int size_channel_in = hin * win; int size_channel_out = hout * wout; int size_kernel = kernel_h * kernel_w; CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % wout; const int ph = (index / wout) % hout; const int c = (index / size_channel_out) % channels; const int n = index / size_channel_out / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = hstart + kernel_h; int wend = wstart + kernel_w; int khstart = hstart < 0 ? 0 - hstart : 0; int kwstart = wstart < 0 ? 0 - wstart : 0; hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, hin); wend = min(wend, win); float aveval = 0; const float* const bottom_slice = din + (n * channels + c) * size_channel_in; const float* const weight_slice = weight + c * size_kernel; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * win + w] * weight_slice[(khstart + h - hstart) * kernel_w + (kwstart + w - wstart)]; } } if (bias_flag) { aveval+=bias[c]; } if (relu_flag) { aveval = max(aveval, (float)0); } dout[index] = aveval; } } template <bool relu_flag> SaberStatus saber_depthwise_conv_act(const float* input, float* output, int num, int cin, int hin, int win, int hout, int wout, int kw, int kh, int stride_w, int stride_h, int pad_w, int pad_h, const float* weights, const float* bias, hipStream_t stream) { const int count = num * cin * hout * wout; if (bias != nullptr) { hipLaunchKernelGGL(( depthwise_conv_1d<true, relu_flag>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream, count, input, num, cin, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, bias); } else { hipLaunchKernelGGL(( depthwise_conv_1d<false, relu_flag>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream, count, input, num, cin, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, nullptr); } return SaberSuccess; } #define MASK3 0xff000000 #define MASK2 0x00ff0000 #define MASK1 0x0000ff00 #define MASK0 0x000000ff template <bool bias_flag, bool relu_flag> __global__ void depthwise_conv_1d_s8_s8(const int nthreads, const void* din, const int num, const int channels, const int hin, const int win, const int hout, const int wout, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, void* dout, const void* weight, const float* bias, float alpha = 1.f) { #if __CUDA_ARCH__ > 600 int size_channel_in = hin * win; int size_channel_out = hout * wout; int size_kernel = kernel_h * kernel_w; CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % wout; const int ph = (index / wout) % hout; const int c = (index / size_channel_out) % channels; const int n = index / size_channel_out / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = hstart + kernel_h; int wend = wstart + kernel_w; int khstart = hstart < 0 ? 0 - hstart : 0; int kwstart = wstart < 0 ? 0 - wstart : 0; hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, hin); wend = min(wend, win); int aveval0 = 0; int aveval1 = 0; int aveval2 = 0; int aveval3 = 0; const int* bottom_slice = ((const int*)din); bottom_slice += (n * channels + c) * size_channel_in; const int* weight_slice= (const int*)weight; weight_slice += c * size_kernel; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int in_data = bottom_slice[h * win + w]; int weight_data = weight_slice[(khstart + h - hstart) * kernel_w + (kwstart + w - wstart)]; int mask_weight; mask_weight = MASK0 & weight_data; aveval0 = __dp4a(in_data, mask_weight, aveval0); mask_weight = MASK1 & weight_data; aveval1 = __dp4a(in_data, mask_weight, aveval1); mask_weight = MASK2 & weight_data; aveval2 = __dp4a(in_data, mask_weight, aveval2); mask_weight = MASK3 & weight_data; aveval3 = __dp4a(in_data, mask_weight, aveval3); } } float fa0 = static_cast<float>(aveval0); float fa1 = static_cast<float>(aveval1); float fa2 = static_cast<float>(aveval2); float fa3 = static_cast<float>(aveval3); fa0 *= alpha; fa1 *= alpha; fa2 *= alpha; fa3 *= alpha; if (bias_flag) { fa0 += bias[4 * c + 0]; fa1 += bias[4 * c + 1]; fa2 += bias[4 * c + 2]; fa3 += bias[4 * c + 3]; } if (relu_flag) { fa0 = max(fa0, (float)0); fa1 = max(fa1, (float)0); fa2 = max(fa2, (float)0); fa3 = max(fa3, (float)0); } char4 res = make_char4(static_cast<char>(fa0), static_cast<char>(fa1), static_cast<char>(fa2), static_cast<char>(fa3)); char4* d = ((char4*)dout); d[index] = res; } #endif } template <bool relu_flag> SaberStatus saber_depthwise_conv_act_s8_s8(const void* input, void* output, int num, int cin, int hin, int win, int hout, int wout, int kw, int kh, int stride_w, int stride_h, int pad_w, int pad_h, float alpha, const void* weights, const float* bias, hipStream_t stream) { CHECK_EQ(cin % 4, 0); int cin_4 = cin / 4; const int count = num * cin_4 * hout * wout; if (bias != nullptr) { hipLaunchKernelGGL(( depthwise_conv_1d_s8_s8<true, relu_flag>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream, count, input, num, cin_4, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, bias, alpha); } else { hipLaunchKernelGGL(( depthwise_conv_1d_s8_s8<false, relu_flag>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream, count, input, num, cin_4, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, nullptr, alpha); } return SaberSuccess; } template <bool bias_flag, bool relu_flag> __global__ void depthwise_conv_1d_s8_f32(const int nthreads, const void* din, const int num, const int channels, const int hin, const int win, const int hout, const int wout, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, void* dout, const void* weight, const float* bias, float alpha = 1.f) { #if __CUDA_ARCH__ > 600 int size_channel_in = hin * win; int size_channel_out = hout * wout; int size_kernel = kernel_h * kernel_w; CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % wout; const int ph = (index / wout) % hout; const int c = (index / size_channel_out) % channels; const int n = index / size_channel_out / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = hstart + kernel_h; int wend = wstart + kernel_w; int khstart = hstart < 0 ? 0 - hstart : 0; int kwstart = wstart < 0 ? 0 - wstart : 0; hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, hin); wend = min(wend, win); int aveval0 = 0; int aveval1 = 0; int aveval2 = 0; int aveval3 = 0; const int* bottom_slice = (const int*)din + (n * channels + c) * size_channel_in; const int* weight_slice = (const int*)weight + c * size_kernel; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int in_data = bottom_slice[h * win + w]; int weight_data = weight_slice[(khstart + h - hstart) * kernel_w + (kwstart + w - wstart)]; int mask_weight; mask_weight = MASK0 & weight_data; aveval0 = __dp4a(in_data, mask_weight, aveval0); mask_weight = MASK1 & weight_data; aveval1 = __dp4a(in_data, mask_weight, aveval1); mask_weight = MASK2 & weight_data; aveval2 = __dp4a(in_data, mask_weight, aveval2); mask_weight = MASK3 & weight_data; aveval3 = __dp4a(in_data, mask_weight, aveval3); } } float fa0 = static_cast<float>(aveval0); float fa1 = static_cast<float>(aveval1); float fa2 = static_cast<float>(aveval2); float fa3 = static_cast<float>(aveval3); fa0 *= alpha; fa1 *= alpha; fa2 *= alpha; fa3 *= alpha; if (bias_flag) { fa0 += bias[4 * c + 0]; fa1 += bias[4 * c + 1]; fa2 += bias[4 * c + 2]; fa3 += bias[4 * c + 3]; } if (relu_flag) { fa0 = max(fa0, (float)0); fa1 = max(fa1, (float)0); fa2 = max(fa2, (float)0); fa3 = max(fa3, (float)0); } int output_slice = hout * wout; int out_idx = (index % output_slice) + 4 * c * output_slice; ((float*)dout)[out_idx] = fa0; out_idx += output_slice; ((float*)dout)[out_idx] = fa1; out_idx += output_slice; ((float*)dout)[out_idx] = fa2; out_idx += output_slice; ((float*)dout)[out_idx] = fa3; } #endif } template <bool relu_flag> SaberStatus saber_depthwise_conv_act_s8_f32(const void* input, void* output, int num, int cin, int hin, int win, int hout, int wout, int kw, int kh, int stride_w, int stride_h, int pad_w, int pad_h, float alpha, const void* weights, const float* bias, hipStream_t stream) { CHECK_EQ(cin % 4, 0); int cin_4 = cin / 4; const int count = num * cin_4 * hout * wout; if (bias != nullptr) { hipLaunchKernelGGL(( depthwise_conv_1d_s8_f32<true, relu_flag>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream, count, input, num, cin_4, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, bias, alpha); } else { hipLaunchKernelGGL(( depthwise_conv_1d_s8_f32<false, relu_flag>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream, count, input, num, cin_4, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, nullptr, alpha); } return SaberSuccess; } #define INSTANCE_CONVACT(ifrelu) \ template \ SaberStatus saber_depthwise_conv_act<ifrelu> (const float* input, float* output, \ int num, int cin, int hin, int win, int hout, int wout, \ int kw, int kh, int stride_w, int stride_h, \ int pad_h, int pad_w, const float* weights, const float* bias, hipStream_t stream); #define INSTANCE_CONVACT_S8_S8(ifrelu) \ template \ SaberStatus saber_depthwise_conv_act_s8_s8<ifrelu>(const void* input, void* output, \ int num, int cin, int hin, int win, int hout, int wout, \ int kw, int kh, int stride_w, int stride_h, int pad_w, int pad_h, float alpha, \ const void* weights, const float* bias, hipStream_t stream); #define INSTANCE_CONVACT_S8_F32(ifrelu) \ template \ SaberStatus saber_depthwise_conv_act_s8_f32<ifrelu>(const void* input, void* output, \ int num, int cin, int hin, int win, int hout, int wout, \ int kw, int kh, int stride_w, int stride_h, int pad_w, int pad_h, float alpha, \ const void* weights, const float* bias, hipStream_t stream); INSTANCE_CONVACT(true); INSTANCE_CONVACT(false); INSTANCE_CONVACT_S8_S8(true); INSTANCE_CONVACT_S8_S8(false); INSTANCE_CONVACT_S8_F32(true); INSTANCE_CONVACT_S8_F32(false); } //namespace anakin } //namespace anakin
7d5333e9c39036ad6df0f9125b0dad9c8ffef172.cu
//#include "saber/funcs/impl/cuda/saber_conv_act.h" #include "saber/saber_types.h" #include "saber/core/common.h" #include <sm_61_intrinsics.h> namespace anakin{ namespace saber{ template <bool bias_flag, bool relu_flag> __global__ void depthwise_conv_1d(const int nthreads, const float* const din, const int num, const int channels, const int hin, const int win, const int hout, const int wout, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const dout, const float* const weight, const float* const bias) { int size_channel_in = hin * win; int size_channel_out = hout * wout; int size_kernel = kernel_h * kernel_w; CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % wout; const int ph = (index / wout) % hout; const int c = (index / size_channel_out) % channels; const int n = index / size_channel_out / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = hstart + kernel_h; int wend = wstart + kernel_w; int khstart = hstart < 0 ? 0 - hstart : 0; int kwstart = wstart < 0 ? 0 - wstart : 0; hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, hin); wend = min(wend, win); float aveval = 0; const float* const bottom_slice = din + (n * channels + c) * size_channel_in; const float* const weight_slice = weight + c * size_kernel; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * win + w] * weight_slice[(khstart + h - hstart) * kernel_w + (kwstart + w - wstart)]; } } if (bias_flag) { aveval+=bias[c]; } if (relu_flag) { aveval = max(aveval, (float)0); } dout[index] = aveval; } } template <bool relu_flag> SaberStatus saber_depthwise_conv_act(const float* input, float* output, int num, int cin, int hin, int win, int hout, int wout, int kw, int kh, int stride_w, int stride_h, int pad_w, int pad_h, const float* weights, const float* bias, cudaStream_t stream) { const int count = num * cin * hout * wout; if (bias != nullptr) { depthwise_conv_1d<true, relu_flag><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>( count, input, num, cin, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, bias); } else { depthwise_conv_1d<false, relu_flag><<< CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>> ( count, input, num, cin, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, nullptr); } return SaberSuccess; } #define MASK3 0xff000000 #define MASK2 0x00ff0000 #define MASK1 0x0000ff00 #define MASK0 0x000000ff template <bool bias_flag, bool relu_flag> __global__ void depthwise_conv_1d_s8_s8(const int nthreads, const void* din, const int num, const int channels, const int hin, const int win, const int hout, const int wout, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, void* dout, const void* weight, const float* bias, float alpha = 1.f) { #if __CUDA_ARCH__ > 600 int size_channel_in = hin * win; int size_channel_out = hout * wout; int size_kernel = kernel_h * kernel_w; CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % wout; const int ph = (index / wout) % hout; const int c = (index / size_channel_out) % channels; const int n = index / size_channel_out / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = hstart + kernel_h; int wend = wstart + kernel_w; int khstart = hstart < 0 ? 0 - hstart : 0; int kwstart = wstart < 0 ? 0 - wstart : 0; hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, hin); wend = min(wend, win); int aveval0 = 0; int aveval1 = 0; int aveval2 = 0; int aveval3 = 0; const int* bottom_slice = ((const int*)din); bottom_slice += (n * channels + c) * size_channel_in; const int* weight_slice= (const int*)weight; weight_slice += c * size_kernel; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int in_data = bottom_slice[h * win + w]; int weight_data = weight_slice[(khstart + h - hstart) * kernel_w + (kwstart + w - wstart)]; int mask_weight; mask_weight = MASK0 & weight_data; aveval0 = __dp4a(in_data, mask_weight, aveval0); mask_weight = MASK1 & weight_data; aveval1 = __dp4a(in_data, mask_weight, aveval1); mask_weight = MASK2 & weight_data; aveval2 = __dp4a(in_data, mask_weight, aveval2); mask_weight = MASK3 & weight_data; aveval3 = __dp4a(in_data, mask_weight, aveval3); } } float fa0 = static_cast<float>(aveval0); float fa1 = static_cast<float>(aveval1); float fa2 = static_cast<float>(aveval2); float fa3 = static_cast<float>(aveval3); fa0 *= alpha; fa1 *= alpha; fa2 *= alpha; fa3 *= alpha; if (bias_flag) { fa0 += bias[4 * c + 0]; fa1 += bias[4 * c + 1]; fa2 += bias[4 * c + 2]; fa3 += bias[4 * c + 3]; } if (relu_flag) { fa0 = max(fa0, (float)0); fa1 = max(fa1, (float)0); fa2 = max(fa2, (float)0); fa3 = max(fa3, (float)0); } char4 res = make_char4(static_cast<char>(fa0), static_cast<char>(fa1), static_cast<char>(fa2), static_cast<char>(fa3)); char4* d = ((char4*)dout); d[index] = res; } #endif } template <bool relu_flag> SaberStatus saber_depthwise_conv_act_s8_s8(const void* input, void* output, int num, int cin, int hin, int win, int hout, int wout, int kw, int kh, int stride_w, int stride_h, int pad_w, int pad_h, float alpha, const void* weights, const float* bias, cudaStream_t stream) { CHECK_EQ(cin % 4, 0); int cin_4 = cin / 4; const int count = num * cin_4 * hout * wout; if (bias != nullptr) { depthwise_conv_1d_s8_s8<true, relu_flag><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>( count, input, num, cin_4, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, bias, alpha); } else { depthwise_conv_1d_s8_s8<false, relu_flag><<< CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>> ( count, input, num, cin_4, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, nullptr, alpha); } return SaberSuccess; } template <bool bias_flag, bool relu_flag> __global__ void depthwise_conv_1d_s8_f32(const int nthreads, const void* din, const int num, const int channels, const int hin, const int win, const int hout, const int wout, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, void* dout, const void* weight, const float* bias, float alpha = 1.f) { #if __CUDA_ARCH__ > 600 int size_channel_in = hin * win; int size_channel_out = hout * wout; int size_kernel = kernel_h * kernel_w; CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % wout; const int ph = (index / wout) % hout; const int c = (index / size_channel_out) % channels; const int n = index / size_channel_out / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = hstart + kernel_h; int wend = wstart + kernel_w; int khstart = hstart < 0 ? 0 - hstart : 0; int kwstart = wstart < 0 ? 0 - wstart : 0; hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, hin); wend = min(wend, win); int aveval0 = 0; int aveval1 = 0; int aveval2 = 0; int aveval3 = 0; const int* bottom_slice = (const int*)din + (n * channels + c) * size_channel_in; const int* weight_slice = (const int*)weight + c * size_kernel; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int in_data = bottom_slice[h * win + w]; int weight_data = weight_slice[(khstart + h - hstart) * kernel_w + (kwstart + w - wstart)]; int mask_weight; mask_weight = MASK0 & weight_data; aveval0 = __dp4a(in_data, mask_weight, aveval0); mask_weight = MASK1 & weight_data; aveval1 = __dp4a(in_data, mask_weight, aveval1); mask_weight = MASK2 & weight_data; aveval2 = __dp4a(in_data, mask_weight, aveval2); mask_weight = MASK3 & weight_data; aveval3 = __dp4a(in_data, mask_weight, aveval3); } } float fa0 = static_cast<float>(aveval0); float fa1 = static_cast<float>(aveval1); float fa2 = static_cast<float>(aveval2); float fa3 = static_cast<float>(aveval3); fa0 *= alpha; fa1 *= alpha; fa2 *= alpha; fa3 *= alpha; if (bias_flag) { fa0 += bias[4 * c + 0]; fa1 += bias[4 * c + 1]; fa2 += bias[4 * c + 2]; fa3 += bias[4 * c + 3]; } if (relu_flag) { fa0 = max(fa0, (float)0); fa1 = max(fa1, (float)0); fa2 = max(fa2, (float)0); fa3 = max(fa3, (float)0); } int output_slice = hout * wout; int out_idx = (index % output_slice) + 4 * c * output_slice; ((float*)dout)[out_idx] = fa0; out_idx += output_slice; ((float*)dout)[out_idx] = fa1; out_idx += output_slice; ((float*)dout)[out_idx] = fa2; out_idx += output_slice; ((float*)dout)[out_idx] = fa3; } #endif } template <bool relu_flag> SaberStatus saber_depthwise_conv_act_s8_f32(const void* input, void* output, int num, int cin, int hin, int win, int hout, int wout, int kw, int kh, int stride_w, int stride_h, int pad_w, int pad_h, float alpha, const void* weights, const float* bias, cudaStream_t stream) { CHECK_EQ(cin % 4, 0); int cin_4 = cin / 4; const int count = num * cin_4 * hout * wout; if (bias != nullptr) { depthwise_conv_1d_s8_f32<true, relu_flag><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>( count, input, num, cin_4, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, bias, alpha); } else { depthwise_conv_1d_s8_f32<false, relu_flag><<< CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>> ( count, input, num, cin_4, hin, win, hout, wout, kh, kw, stride_h, stride_w, pad_h, pad_w, output, weights, nullptr, alpha); } return SaberSuccess; } #define INSTANCE_CONVACT(ifrelu) \ template \ SaberStatus saber_depthwise_conv_act<ifrelu> (const float* input, float* output, \ int num, int cin, int hin, int win, int hout, int wout, \ int kw, int kh, int stride_w, int stride_h, \ int pad_h, int pad_w, const float* weights, const float* bias, cudaStream_t stream); #define INSTANCE_CONVACT_S8_S8(ifrelu) \ template \ SaberStatus saber_depthwise_conv_act_s8_s8<ifrelu>(const void* input, void* output, \ int num, int cin, int hin, int win, int hout, int wout, \ int kw, int kh, int stride_w, int stride_h, int pad_w, int pad_h, float alpha, \ const void* weights, const float* bias, cudaStream_t stream); #define INSTANCE_CONVACT_S8_F32(ifrelu) \ template \ SaberStatus saber_depthwise_conv_act_s8_f32<ifrelu>(const void* input, void* output, \ int num, int cin, int hin, int win, int hout, int wout, \ int kw, int kh, int stride_w, int stride_h, int pad_w, int pad_h, float alpha, \ const void* weights, const float* bias, cudaStream_t stream); INSTANCE_CONVACT(true); INSTANCE_CONVACT(false); INSTANCE_CONVACT_S8_S8(true); INSTANCE_CONVACT_S8_S8(false); INSTANCE_CONVACT_S8_F32(true); INSTANCE_CONVACT_S8_F32(false); } //namespace anakin } //namespace anakin
ac0a4c63d1251cf24f2bc649324b9d713133ebfa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <stdbool.h> #include <stdint.h> #include <stdlib.h> extern "C" { #include "sha256.h" #include "utils.h" } #define DATASIZE 55 __global__ void kernel_sha256(BYTE *data, unsigned int* difficulty, Nonce_result *nr,unsigned int *multiplier); __device__ WORD hash2int(BYTE h[32]); inline void gpuAssert(hipError_t code, char *file, int line, bool abort) { if (code != hipSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__, true); } extern "C" bool amoveo_mine_gpu(BYTE nonce[23],unsigned int difficulty,BYTE data[55],unsigned int GDIM, unsigned int BDIM, unsigned int multiplier, unsigned int nonceRounds,double *numHashes) { //Initialize Cuda Grid variables dim3 DimGrid(GDIM,GDIM); dim3 DimBlock(BDIM,1); //Used to store a nonce if a block is mined Nonce_result h_nr; initialize_nonce_result(&h_nr); //Allocate space on Global Memory BYTE *d_data; unsigned int *d_difficulty; Nonce_result *d_nr; unsigned int *d_multiplier; CUDA_SAFE_CALL(hipMalloc((void **)&d_data, DATASIZE*sizeof(BYTE))); CUDA_SAFE_CALL(hipMalloc((void **)&d_difficulty, sizeof(unsigned int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_nr, sizeof(Nonce_result))); CUDA_SAFE_CALL(hipMalloc((void **)&d_multiplier, sizeof(unsigned int))); //Copy data to device CUDA_SAFE_CALL(hipMemcpy(d_data, (void *) data, DATASIZE*sizeof(BYTE), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_difficulty, (void *) &difficulty, sizeof(unsigned int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_nr, (void *) &h_nr, sizeof(Nonce_result), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_multiplier, (void *) &multiplier, sizeof(unsigned int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_sha256), dim3(DimGrid), dim3(DimBlock), 0, 0, d_data,d_difficulty,d_nr,d_multiplier); //Copy nonce result back to host CUDA_SAFE_CALL(hipMemcpy((void *) &h_nr, d_nr, sizeof(Nonce_result), hipMemcpyDeviceToHost)); hipDeviceSynchronize(); //Free memory on device CUDA_SAFE_CALL(hipFree(d_data)); CUDA_SAFE_CALL(hipFree(d_difficulty)); CUDA_SAFE_CALL(hipFree(d_nr)); CUDA_SAFE_CALL(hipFree(d_multiplier)); //Copy nonce if found if(h_nr.nonce_found){ for(int i=32; i<55;i++) nonce[i-32]=data[i]; for(int i=0; i<sizeof(int64_t); i++) nonce[i] = ((BYTE*)(&h_nr.nonce))[i]; } *numHashes = ((double)GDIM)*((double)GDIM)*((double)BDIM); return h_nr.nonce_found; } //Amoveo's hash2int function to calculate difficulty __device__ WORD hash2int(BYTE h[32]) { WORD x = 0; WORD z = 0; for (int i = 0; i < 31; i++) { if (h[i] == 0) { x += 8; continue; } else if (h[i] < 2) { x += 7; z = h[i+1]; } else if (h[i] < 4) { x += 6; z = (h[i+1] / 2) + ((h[i] % 2) * 128); } else if (h[i] < 8) { x += 5; z = (h[i+1] / 4) + ((h[i] % 4) * 64); } else if (h[i] < 16) { x += 4; z = (h[i+1] / 8) + ((h[i] % 8) * 32); } else if (h[i] < 32) { x += 3; z = (h[i+1] / 16) + ((h[i] % 16) * 16); } else if (h[i] < 64) { x += 2; z = (h[i+1] / 32) + ((h[i] % 32) * 8); } else if (h[i] < 128) { x += 1; z = (h[i+1] / 64) + ((h[i] % 64) * 4); } else { z = (h[i+1] / 128) + ((h[i] % 128) * 2); } break; } WORD y[2]; y[0] = x; y[1] = z; return 256*y[0]+y[1]; } //Constants for SHA-256 __device__ static const WORD k[64] = { 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5, 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174, 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da, 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967, 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85, 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070, 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3, 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 }; //SHA-256 functions taken from Brad Conte's implementation //https://github.com/B-Con/crypto-algorithms/blob/master/sha256.c __device__ void d_sha256_transform(SHA256_CTX *ctx, const BYTE data[]) { WORD a, b, c, d, e, f, g, h, i, j, t1, t2, m[64]; for (i = 0, j = 0; i < 16; ++i, j += 4) m[i] = (data[j] << 24) | (data[j + 1] << 16) | (data[j + 2] << 8) | (data[j + 3]); for ( ; i < 64; ++i) m[i] = SIG1(m[i - 2]) + m[i - 7] + SIG0(m[i - 15]) + m[i - 16]; a = ctx->state[0]; b = ctx->state[1]; c = ctx->state[2]; d = ctx->state[3]; e = ctx->state[4]; f = ctx->state[5]; g = ctx->state[6]; h = ctx->state[7]; for (i = 0; i < 64; ++i) { t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i]; t2 = EP0(a) + MAJ(a,b,c); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } ctx->state[0] += a; ctx->state[1] += b; ctx->state[2] += c; ctx->state[3] += d; ctx->state[4] += e; ctx->state[5] += f; ctx->state[6] += g; ctx->state[7] += h; } __device__ void d_sha256_init(SHA256_CTX *ctx) { ctx->datalen = 0; ctx->bitlen = 0; ctx->state[0] = 0x6a09e667; ctx->state[1] = 0xbb67ae85; ctx->state[2] = 0x3c6ef372; ctx->state[3] = 0xa54ff53a; ctx->state[4] = 0x510e527f; ctx->state[5] = 0x9b05688c; ctx->state[6] = 0x1f83d9ab; ctx->state[7] = 0x5be0cd19; } __device__ void d_sha256_update(SHA256_CTX *ctx, const BYTE data[], size_t len) { WORD i; for (i = 0; i < len; ++i) { ctx->data[ctx->datalen] = data[i]; ctx->datalen++; if (ctx->datalen == 64) { d_sha256_transform(ctx, ctx->data); ctx->bitlen += 512; ctx->datalen = 0; } } } __device__ void d_sha256_final(SHA256_CTX *ctx, BYTE hash[]) { WORD i; i = ctx->datalen; // Pad whatever data is left in the buffer. ctx->data[i++] = 0x80; while (i < 56) ctx->data[i++] = 0x00; // Append to the padding the total message's length in bits and transform. ctx->bitlen += ctx->datalen * 8; ctx->data[63] = ctx->bitlen; ctx->data[62] = ctx->bitlen >> 8; ctx->data[61] = ctx->bitlen >> 16; ctx->data[60] = ctx->bitlen >> 24; ctx->data[59] = ctx->bitlen >> 32; ctx->data[58] = ctx->bitlen >> 40; ctx->data[57] = ctx->bitlen >> 48; ctx->data[56] = ctx->bitlen >> 56; d_sha256_transform(ctx, ctx->data); // Since this implementation uses little endian byte ordering and SHA uses big endian, // reverse all the bytes when copying the final state to the output hash. for (i = 0; i < 4; ++i) { hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff; hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff; hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff; hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff; hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff; hash[i + 20] = (ctx->state[5] >> (24 - i * 8)) & 0x000000ff; hash[i + 24] = (ctx->state[6] >> (24 - i * 8)) & 0x000000ff; hash[i + 28] = (ctx->state[7] >> (24 - i * 8)) & 0x000000ff; } } #define NONCE_VAL (gridDim.x*blockDim.x*blockIdx.y + blockDim.x*blockIdx.x + threadIdx.x) __global__ void kernel_sha256(BYTE *data, unsigned int *difficulty, Nonce_result *nr, unsigned int* multiplier) { if(nr->nonce_found) return; int i; int64_t nonce = gridDim.x*gridDim.x; nonce *= blockDim.x; nonce *= *multiplier; nonce += NONCE_VAL; BYTE* byte_nonce = (BYTE *)&nonce; BYTE l_data[55]; for(i=0;i<55;i++) l_data[i] = data[i]; for(i=0;i<sizeof(int64_t);i++) l_data[32+i] = byte_nonce[i]; SHA256_CTX ctx; d_sha256_init(&ctx); d_sha256_update(&ctx,l_data,55); BYTE hash[32]; d_sha256_final(&ctx,hash); int work = hash2int(hash); if( work > *difficulty) { nr->nonce_found = true; nr->nonce = nonce; } }
ac0a4c63d1251cf24f2bc649324b9d713133ebfa.cu
#include <cstdio> #include <cstdlib> #include <stdbool.h> #include <stdint.h> #include <stdlib.h> extern "C" { #include "sha256.h" #include "utils.h" } #define DATASIZE 55 __global__ void kernel_sha256(BYTE *data, unsigned int* difficulty, Nonce_result *nr,unsigned int *multiplier); __device__ WORD hash2int(BYTE h[32]); inline void gpuAssert(cudaError_t code, char *file, int line, bool abort) { if (code != cudaSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__, true); } extern "C" bool amoveo_mine_gpu(BYTE nonce[23],unsigned int difficulty,BYTE data[55],unsigned int GDIM, unsigned int BDIM, unsigned int multiplier, unsigned int nonceRounds,double *numHashes) { //Initialize Cuda Grid variables dim3 DimGrid(GDIM,GDIM); dim3 DimBlock(BDIM,1); //Used to store a nonce if a block is mined Nonce_result h_nr; initialize_nonce_result(&h_nr); //Allocate space on Global Memory BYTE *d_data; unsigned int *d_difficulty; Nonce_result *d_nr; unsigned int *d_multiplier; CUDA_SAFE_CALL(cudaMalloc((void **)&d_data, DATASIZE*sizeof(BYTE))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_difficulty, sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_nr, sizeof(Nonce_result))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_multiplier, sizeof(unsigned int))); //Copy data to device CUDA_SAFE_CALL(cudaMemcpy(d_data, (void *) data, DATASIZE*sizeof(BYTE), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_difficulty, (void *) &difficulty, sizeof(unsigned int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_nr, (void *) &h_nr, sizeof(Nonce_result), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_multiplier, (void *) &multiplier, sizeof(unsigned int), cudaMemcpyHostToDevice)); kernel_sha256<<<DimGrid, DimBlock>>>(d_data,d_difficulty,d_nr,d_multiplier); //Copy nonce result back to host CUDA_SAFE_CALL(cudaMemcpy((void *) &h_nr, d_nr, sizeof(Nonce_result), cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); //Free memory on device CUDA_SAFE_CALL(cudaFree(d_data)); CUDA_SAFE_CALL(cudaFree(d_difficulty)); CUDA_SAFE_CALL(cudaFree(d_nr)); CUDA_SAFE_CALL(cudaFree(d_multiplier)); //Copy nonce if found if(h_nr.nonce_found){ for(int i=32; i<55;i++) nonce[i-32]=data[i]; for(int i=0; i<sizeof(int64_t); i++) nonce[i] = ((BYTE*)(&h_nr.nonce))[i]; } *numHashes = ((double)GDIM)*((double)GDIM)*((double)BDIM); return h_nr.nonce_found; } //Amoveo's hash2int function to calculate difficulty __device__ WORD hash2int(BYTE h[32]) { WORD x = 0; WORD z = 0; for (int i = 0; i < 31; i++) { if (h[i] == 0) { x += 8; continue; } else if (h[i] < 2) { x += 7; z = h[i+1]; } else if (h[i] < 4) { x += 6; z = (h[i+1] / 2) + ((h[i] % 2) * 128); } else if (h[i] < 8) { x += 5; z = (h[i+1] / 4) + ((h[i] % 4) * 64); } else if (h[i] < 16) { x += 4; z = (h[i+1] / 8) + ((h[i] % 8) * 32); } else if (h[i] < 32) { x += 3; z = (h[i+1] / 16) + ((h[i] % 16) * 16); } else if (h[i] < 64) { x += 2; z = (h[i+1] / 32) + ((h[i] % 32) * 8); } else if (h[i] < 128) { x += 1; z = (h[i+1] / 64) + ((h[i] % 64) * 4); } else { z = (h[i+1] / 128) + ((h[i] % 128) * 2); } break; } WORD y[2]; y[0] = x; y[1] = z; return 256*y[0]+y[1]; } //Constants for SHA-256 __device__ static const WORD k[64] = { 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5, 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174, 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da, 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967, 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85, 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070, 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3, 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 }; //SHA-256 functions taken from Brad Conte's implementation //https://github.com/B-Con/crypto-algorithms/blob/master/sha256.c __device__ void d_sha256_transform(SHA256_CTX *ctx, const BYTE data[]) { WORD a, b, c, d, e, f, g, h, i, j, t1, t2, m[64]; for (i = 0, j = 0; i < 16; ++i, j += 4) m[i] = (data[j] << 24) | (data[j + 1] << 16) | (data[j + 2] << 8) | (data[j + 3]); for ( ; i < 64; ++i) m[i] = SIG1(m[i - 2]) + m[i - 7] + SIG0(m[i - 15]) + m[i - 16]; a = ctx->state[0]; b = ctx->state[1]; c = ctx->state[2]; d = ctx->state[3]; e = ctx->state[4]; f = ctx->state[5]; g = ctx->state[6]; h = ctx->state[7]; for (i = 0; i < 64; ++i) { t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i]; t2 = EP0(a) + MAJ(a,b,c); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } ctx->state[0] += a; ctx->state[1] += b; ctx->state[2] += c; ctx->state[3] += d; ctx->state[4] += e; ctx->state[5] += f; ctx->state[6] += g; ctx->state[7] += h; } __device__ void d_sha256_init(SHA256_CTX *ctx) { ctx->datalen = 0; ctx->bitlen = 0; ctx->state[0] = 0x6a09e667; ctx->state[1] = 0xbb67ae85; ctx->state[2] = 0x3c6ef372; ctx->state[3] = 0xa54ff53a; ctx->state[4] = 0x510e527f; ctx->state[5] = 0x9b05688c; ctx->state[6] = 0x1f83d9ab; ctx->state[7] = 0x5be0cd19; } __device__ void d_sha256_update(SHA256_CTX *ctx, const BYTE data[], size_t len) { WORD i; for (i = 0; i < len; ++i) { ctx->data[ctx->datalen] = data[i]; ctx->datalen++; if (ctx->datalen == 64) { d_sha256_transform(ctx, ctx->data); ctx->bitlen += 512; ctx->datalen = 0; } } } __device__ void d_sha256_final(SHA256_CTX *ctx, BYTE hash[]) { WORD i; i = ctx->datalen; // Pad whatever data is left in the buffer. ctx->data[i++] = 0x80; while (i < 56) ctx->data[i++] = 0x00; // Append to the padding the total message's length in bits and transform. ctx->bitlen += ctx->datalen * 8; ctx->data[63] = ctx->bitlen; ctx->data[62] = ctx->bitlen >> 8; ctx->data[61] = ctx->bitlen >> 16; ctx->data[60] = ctx->bitlen >> 24; ctx->data[59] = ctx->bitlen >> 32; ctx->data[58] = ctx->bitlen >> 40; ctx->data[57] = ctx->bitlen >> 48; ctx->data[56] = ctx->bitlen >> 56; d_sha256_transform(ctx, ctx->data); // Since this implementation uses little endian byte ordering and SHA uses big endian, // reverse all the bytes when copying the final state to the output hash. for (i = 0; i < 4; ++i) { hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff; hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff; hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff; hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff; hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff; hash[i + 20] = (ctx->state[5] >> (24 - i * 8)) & 0x000000ff; hash[i + 24] = (ctx->state[6] >> (24 - i * 8)) & 0x000000ff; hash[i + 28] = (ctx->state[7] >> (24 - i * 8)) & 0x000000ff; } } #define NONCE_VAL (gridDim.x*blockDim.x*blockIdx.y + blockDim.x*blockIdx.x + threadIdx.x) __global__ void kernel_sha256(BYTE *data, unsigned int *difficulty, Nonce_result *nr, unsigned int* multiplier) { if(nr->nonce_found) return; int i; int64_t nonce = gridDim.x*gridDim.x; nonce *= blockDim.x; nonce *= *multiplier; nonce += NONCE_VAL; BYTE* byte_nonce = (BYTE *)&nonce; BYTE l_data[55]; for(i=0;i<55;i++) l_data[i] = data[i]; for(i=0;i<sizeof(int64_t);i++) l_data[32+i] = byte_nonce[i]; SHA256_CTX ctx; d_sha256_init(&ctx); d_sha256_update(&ctx,l_data,55); BYTE hash[32]; d_sha256_final(&ctx,hash); int work = hash2int(hash); if( work > *difficulty) { nr->nonce_found = true; nr->nonce = nonce; } }
e0e6d58befd6431717e600c34389869fbe8651ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void global_scan(float* d_out,float* d_in){ int idx = threadIdx.x; float out = 0.00f; d_out[idx] = d_in[idx]; __syncthreads();// for(int interpre=1;interpre<sizeof(d_in);interpre*=2){ if(idx-interpre>=0){ out = d_out[idx]+d_out[idx-interpre]; } __syncthreads(); if(idx-interpre>=0){ d_out[idx] = out; out = 0.00f; } } } int main(int argc,char** argv){ const int ARRAY_SIZE = 8; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for(int i=0;i<ARRAY_SIZE;i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float* d_in; float* d_out; // allocate GPU memory hipMalloc((void**) &d_in,ARRAY_BYTES); hipMalloc((void**) &d_out,ARRAY_BYTES); // transfer the array to GPU hipMemcpy(d_in,h_in,ARRAY_BYTES,hipMemcpyHostToDevice); // launch the kernel hipLaunchKernelGGL(( global_scan), dim3(1),dim3(ARRAY_SIZE), 0, 0, d_out,d_in); // copy back the result array to the GPU hipMemcpy(h_out,d_out,ARRAY_BYTES,hipMemcpyDeviceToHost); // print out the resulting array for(int i=0;i<ARRAY_SIZE;i++){ printf("%f",h_out[i]); printf(((i%4) != 3) ? "\t" : "\n"); } // free GPU memory allocation hipFree(d_in); hipFree(d_out); return 0; }
e0e6d58befd6431717e600c34389869fbe8651ca.cu
#include <stdio.h> __global__ void global_scan(float* d_out,float* d_in){ int idx = threadIdx.x; float out = 0.00f; d_out[idx] = d_in[idx]; __syncthreads();//赋值完过后,再继续操作 for(int interpre=1;interpre<sizeof(d_in);interpre*=2){ if(idx-interpre>=0){ out = d_out[idx]+d_out[idx-interpre]; } __syncthreads(); if(idx-interpre>=0){ d_out[idx] = out; out = 0.00f; } } } int main(int argc,char** argv){ const int ARRAY_SIZE = 8; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for(int i=0;i<ARRAY_SIZE;i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float* d_in; float* d_out; // allocate GPU memory cudaMalloc((void**) &d_in,ARRAY_BYTES); cudaMalloc((void**) &d_out,ARRAY_BYTES); // transfer the array to GPU cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice); // launch the kernel global_scan<<<1,ARRAY_SIZE>>>(d_out,d_in); // copy back the result array to the GPU cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost); // print out the resulting array for(int i=0;i<ARRAY_SIZE;i++){ printf("%f",h_out[i]); printf(((i%4) != 3) ? "\t" : "\n"); } // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); return 0; }
a406a38a9840697f54a0ac7824da9bc6285d6dff.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "binaryentropy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int lengthX = 1; const double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); const double *y = NULL; hipMalloc(&y, XSIZE*YSIZE); double *z = NULL; hipMalloc(&z, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( binaryentropy), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthX,x,y,z); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( binaryentropy), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthX,x,y,z); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( binaryentropy), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthX,x,y,z); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a406a38a9840697f54a0ac7824da9bc6285d6dff.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "binaryentropy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int lengthX = 1; const double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); const double *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); double *z = NULL; cudaMalloc(&z, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); binaryentropy<<<gridBlock,threadBlock>>>(lengthX,x,y,z); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { binaryentropy<<<gridBlock,threadBlock>>>(lengthX,x,y,z); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { binaryentropy<<<gridBlock,threadBlock>>>(lengthX,x,y,z); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ff6cb8226f8b234fdcbf9732a94c6030fe3acf93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zswapdblk_batched.cu normal z -> c, Fri Sep 11 18:29:22 2015 */ #include "common_magma.h" /*********************************************************/ /* * Swap diagonal blocks of two matrices. * Each thread block swaps one diagonal block. * Each thread iterates across one row of the block. */ __global__ void cswapdblk_batched_kernel( int nb, int n_mod_nb, magmaFloatComplex **dA_array, int ldda, int inca, magmaFloatComplex **dB_array, int lddb, int incb ) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int batchid = blockIdx.z; magmaFloatComplex *dA = dA_array[batchid]; magmaFloatComplex *dB = dB_array[batchid]; dA += tx + bx * nb * (ldda + inca); dB += tx + bx * nb * (lddb + incb); magmaFloatComplex tmp; if (bx < gridDim.x-1) { #pragma unroll for( int i = 0; i < nb; i++ ) { tmp = dA[i*ldda]; dA[i*ldda] = dB[i*lddb]; dB[i*lddb] = tmp; } } else { for( int i = 0; i < n_mod_nb; i++ ) { tmp = dA[i*ldda]; dA[i*ldda] = dB[i*lddb]; dB[i*lddb] = tmp; } } } /** Purpose ------- cswapdblk swaps diagonal blocks of size nb x nb between matrices dA and dB on the GPU. It swaps nblocks = ceil(n/nb) blocks. For i = 1 .. nblocks, submatrices dA( i*nb*inca, i*nb ) and dB( i*nb*incb, i*nb ) are swapped. Arguments --------- @param[in] n INTEGER The number of columns of the matrices dA and dB. N >= 0. @param[in] nb INTEGER The size of diagonal blocks. NB > 0 and NB <= maximum threads per CUDA block (512 or 1024). @param[in,out] dA_array Array of pointers, dimension (batchCount). Each is a COMPLEX array dA, dimension (ldda,n) The matrix dA. @param[in] ldda INTEGER The leading dimension of each array dA. ldda >= (nblocks - 1)*nb*inca + nb. @param[in] inca INTEGER The row increment between diagonal blocks of dA. inca >= 0. For example, inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb), inca = 0 means blocks are stored side-by-side at dA(0, i*nb). @param[in,out] dB_array Array of pointers, dimension (batchCount). Each is a COMPLEX array dB, dimension (lddb,n) The matrix dB. @param[in] lddb INTEGER The leading dimension of each array dB. lddb >= (nblocks - 1)*nb*incb + nb. @param[in] incb INTEGER The row increment between diagonal blocks of dB. incb >= 0. See inca. @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_cswapdblk_batched_q( magma_int_t n, magma_int_t nb, magmaFloatComplex **dA_array, magma_int_t ldda, magma_int_t inca, magmaFloatComplex **dB_array, magma_int_t lddb, magma_int_t incb, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t nblocks = magma_ceildiv( n, nb ); magma_int_t n_mod_nb = n % nb; magma_int_t info = 0; if (n < 0) { info = -1; } else if (nb < 1 || nb > 1024) { info = -2; } else if (ldda < (nblocks-1)*nb*inca + nb) { info = -4; } else if (inca < 0) { info = -5; } else if (lddb < (nblocks-1)*nb*incb + nb) { info = -7; } else if (incb < 0) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if (n_mod_nb == 0) nblocks += 1; // a dummy thread block for cleanup code dim3 dimGrid(nblocks, 1, batchCount); dim3 dimBlock(nb); if ( nblocks > 0 ) { hipLaunchKernelGGL(( cswapdblk_batched_kernel), dim3(dimGrid), dim3(dimBlock), 0, queue , nb, n_mod_nb, dA_array, ldda, inca, dB_array, lddb, incb ); } } /** @see magmablas_cswapdblk_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_cswapdblk_batched( magma_int_t n, magma_int_t nb, magmaFloatComplex **dA_array, magma_int_t ldda, magma_int_t inca, magmaFloatComplex **dB_array, magma_int_t lddb, magma_int_t incb, magma_int_t batchCount) { magmablas_cswapdblk_batched_q( n, nb, dA_array, ldda, inca, dB_array, lddb, incb, batchCount, magma_stream ); }
ff6cb8226f8b234fdcbf9732a94c6030fe3acf93.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zswapdblk_batched.cu normal z -> c, Fri Sep 11 18:29:22 2015 */ #include "common_magma.h" /*********************************************************/ /* * Swap diagonal blocks of two matrices. * Each thread block swaps one diagonal block. * Each thread iterates across one row of the block. */ __global__ void cswapdblk_batched_kernel( int nb, int n_mod_nb, magmaFloatComplex **dA_array, int ldda, int inca, magmaFloatComplex **dB_array, int lddb, int incb ) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int batchid = blockIdx.z; magmaFloatComplex *dA = dA_array[batchid]; magmaFloatComplex *dB = dB_array[batchid]; dA += tx + bx * nb * (ldda + inca); dB += tx + bx * nb * (lddb + incb); magmaFloatComplex tmp; if (bx < gridDim.x-1) { #pragma unroll for( int i = 0; i < nb; i++ ) { tmp = dA[i*ldda]; dA[i*ldda] = dB[i*lddb]; dB[i*lddb] = tmp; } } else { for( int i = 0; i < n_mod_nb; i++ ) { tmp = dA[i*ldda]; dA[i*ldda] = dB[i*lddb]; dB[i*lddb] = tmp; } } } /** Purpose ------- cswapdblk swaps diagonal blocks of size nb x nb between matrices dA and dB on the GPU. It swaps nblocks = ceil(n/nb) blocks. For i = 1 .. nblocks, submatrices dA( i*nb*inca, i*nb ) and dB( i*nb*incb, i*nb ) are swapped. Arguments --------- @param[in] n INTEGER The number of columns of the matrices dA and dB. N >= 0. @param[in] nb INTEGER The size of diagonal blocks. NB > 0 and NB <= maximum threads per CUDA block (512 or 1024). @param[in,out] dA_array Array of pointers, dimension (batchCount). Each is a COMPLEX array dA, dimension (ldda,n) The matrix dA. @param[in] ldda INTEGER The leading dimension of each array dA. ldda >= (nblocks - 1)*nb*inca + nb. @param[in] inca INTEGER The row increment between diagonal blocks of dA. inca >= 0. For example, inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb), inca = 0 means blocks are stored side-by-side at dA(0, i*nb). @param[in,out] dB_array Array of pointers, dimension (batchCount). Each is a COMPLEX array dB, dimension (lddb,n) The matrix dB. @param[in] lddb INTEGER The leading dimension of each array dB. lddb >= (nblocks - 1)*nb*incb + nb. @param[in] incb INTEGER The row increment between diagonal blocks of dB. incb >= 0. See inca. @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_cswapdblk_batched_q( magma_int_t n, magma_int_t nb, magmaFloatComplex **dA_array, magma_int_t ldda, magma_int_t inca, magmaFloatComplex **dB_array, magma_int_t lddb, magma_int_t incb, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t nblocks = magma_ceildiv( n, nb ); magma_int_t n_mod_nb = n % nb; magma_int_t info = 0; if (n < 0) { info = -1; } else if (nb < 1 || nb > 1024) { info = -2; } else if (ldda < (nblocks-1)*nb*inca + nb) { info = -4; } else if (inca < 0) { info = -5; } else if (lddb < (nblocks-1)*nb*incb + nb) { info = -7; } else if (incb < 0) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if (n_mod_nb == 0) nblocks += 1; // a dummy thread block for cleanup code dim3 dimGrid(nblocks, 1, batchCount); dim3 dimBlock(nb); if ( nblocks > 0 ) { cswapdblk_batched_kernel<<< dimGrid, dimBlock, 0, queue >>> ( nb, n_mod_nb, dA_array, ldda, inca, dB_array, lddb, incb ); } } /** @see magmablas_cswapdblk_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_cswapdblk_batched( magma_int_t n, magma_int_t nb, magmaFloatComplex **dA_array, magma_int_t ldda, magma_int_t inca, magmaFloatComplex **dB_array, magma_int_t lddb, magma_int_t incb, magma_int_t batchCount) { magmablas_cswapdblk_batched_q( n, nb, dA_array, ldda, inca, dB_array, lddb, incb, batchCount, magma_stream ); }
79edeade14647ccf73cb5ca66d23a94f6d740028.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <SOIL/SOIL.h> #define DIM 500 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } }; __device__ int julia(int x, int y) { const float scale = 1; float jx = scale * (float) (DIM/2 - x)/(DIM/2); float jy = scale * (float) (DIM/2 - y)/(DIM/2); hipComplex c(-0.8,0.156); hipComplex a(jx,jy); for(int i=0; i<200;i++) { a = a * a + c; if(a.magnitude2() > 1000) return 0; } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int julia_value = julia(x,y); ptr[offset*4+0] = 255 * julia_value; ptr[offset*4+1] = 0; ptr[offset*4+2] = 0; ptr[offset*4+3] = 255; } int main(void) { unsigned char *pixels = new unsigned char[DIM * DIM * 4]; unsigned char *dev_pixels; hipMalloc((void**)&dev_pixels, DIM*DIM*4); dim3 grid(DIM,DIM); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(1), 0, 0, dev_pixels); hipMemcpy(pixels, dev_pixels, DIM * DIM * 4, hipMemcpyDeviceToHost); int err = SOIL_save_image("img.bmp", SOIL_SAVE_TYPE_BMP, DIM, DIM, 4,&pixels[0]); hipFree(dev_pixels); return 0; }
79edeade14647ccf73cb5ca66d23a94f6d740028.cu
#include <stdio.h> #include <SOIL/SOIL.h> #define DIM 500 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } }; __device__ int julia(int x, int y) { const float scale = 1; float jx = scale * (float) (DIM/2 - x)/(DIM/2); float jy = scale * (float) (DIM/2 - y)/(DIM/2); cuComplex c(-0.8,0.156); cuComplex a(jx,jy); for(int i=0; i<200;i++) { a = a * a + c; if(a.magnitude2() > 1000) return 0; } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int julia_value = julia(x,y); ptr[offset*4+0] = 255 * julia_value; ptr[offset*4+1] = 0; ptr[offset*4+2] = 0; ptr[offset*4+3] = 255; } int main(void) { unsigned char *pixels = new unsigned char[DIM * DIM * 4]; unsigned char *dev_pixels; cudaMalloc((void**)&dev_pixels, DIM*DIM*4); dim3 grid(DIM,DIM); kernel<<<grid, 1>>>(dev_pixels); cudaMemcpy(pixels, dev_pixels, DIM * DIM * 4, cudaMemcpyDeviceToHost); int err = SOIL_save_image("img.bmp", SOIL_SAVE_TYPE_BMP, DIM, DIM, 4,&pixels[0]); cudaFree(dev_pixels); return 0; }
c87eb202cb22163f5f82cb0bb096136f3145ac25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdlib.h> #include <string> #include <fstream> #include <sstream> #include <omp.h> #include <limits.h> #include <vector> #include <queue> // std::queue using namespace std; int *row; int *col; int *path; bool *visited; int N; __global__ void top_down_step(int *load,int *path,int dist,int *row,int *col) { unsigned int ind = threadIdx.x + blockIdx.x * blockDim.x; if(path[ind] == dist) { int start = row[ind]; int end = row[ind+1]; for(int I = start; I < end; I++) { int i = col[I]; if(path[i] == -1) { path[i] = dist+1; //*load = 1; atomicAdd(load, 1); } } } } __global__ void bottom_up_step(int *load,int *path,int dist,int *row,int *col) { unsigned int ind = threadIdx.x + blockIdx.x * blockDim.x; if(path[ind] == -1) { int start = row[ind]; int end = row[ind+1]; for(int I = start; I < end; I++) { int i = col[I]; if(path[i] == dist) { path[ind] = dist+1; atomicAdd(load, 1); break; } } } } int main(int argc, const char **argv) { if(argc != 2) { cout << "wrong input " << endl; return 0; } srand (time(NULL)); // tested on coPapersDBLP.mtx string filename = argv[1]; //"coPapersDBLP.mtx"; // edit //string filepath = "/home/cema/Documents/CS406/project/data/" + filename; ifstream file(filename.c_str()); if(file.fail()) { cout << "Path not found" << endl; return 0; } string line; getline(file,line); stringstream s(line); bool symmetric = false; string word; while(s >> word) // check for symmetric if(word == "symmetric") symmetric = true; while(file.peek() == '%')//dirty boi file.ignore(2048, '\n'); int M, edge; file >> N >> M >> edge; cout << "Graph has " << N << " nodes and " << (1+symmetric*1)*edge << " edges and symmetric " << symmetric << endl; int n; bool based0 = false; while(file >> n) { if (n == 0) { based0 = true; break; } } file.close(); if(based0) cout << "Graph is 0 based" << endl; else cout << "Graph is 1 based and is being turned in to 0 base." << endl; file.open(filename.c_str()); while(file.peek() == '%') file.ignore(2048, '\n'); file >> n >> n >> n; //tmp int i,j; vector<vector<int> > v_inv(N, vector<int>(0)); vector<vector<int> > v(N, vector<int>(0)); for(int k = 0; k < edge; k++) { file >> i >> j; if(!based0) { // make 0 based; i--; j--; } v[i].push_back(j); v_inv[j].push_back(i); // inverse graph if(symmetric) v[j].push_back(i); } file.close(); edge *= 1 + (int)symmetric; int *row_inv = new int[N+1]; int *col_inv = new int[edge]; row = new int[N+1]; col = new int[edge]; row[0] = 0; int index = 0; for(int i = 0; i < N; i++) { for(int j = 0; j < v[i].size(); j++) { col[index] = v[i][j]; index++; } row[i+1] = index; } //inv graph porcessing row_inv[0] = 0; index = 0; for(int i = 0; i < N; i++) { for(int j = 0; j < v_inv[i].size(); j++) { col_inv[index] = v_inv[i][j]; index++; } row_inv[i+1] = index; } path = new int[N]; visited = new bool[N]; // int *levels = new int[N]; for(int i = 0; i < N; i++) path[i] = -1;//INT_MAX; cout << "Preprocessing complete " << endl; // algorithm starts here //************************************************* hipSetDevice(0); int root = rand() % (N-1); root = 0; path[root] = 0; int root_begin = row[root]; int root_end = row[root+1]; int root_begin_inv = row_inv[root]; int root_end_inv = row_inv[root+1]; for(int I = root_begin; I < root_end; I++){ path[col[I]] = 1; } // copy graph to device int *d_row; int *d_col; int *d_row_inv; int *d_col_inv; int *d_path; int *d_load; int size_row = (N+1)*sizeof(int); int size_col = edge*sizeof(int); int size_path = N*sizeof(int); int load = 0; // for the row check up hipMalloc((void **)&d_row,size_row); hipMalloc((void **)&d_col,size_col); hipMalloc((void **)&d_row_inv,size_row); hipMalloc((void **)&d_col_inv,size_col); // path array hipMalloc((void **)&d_path,size_path); // integer for check operation hipMalloc((void **)&d_load,sizeof(int)); cout << "Memory allocated" <<endl; // move memory hipMemcpy(d_row,row,size_row,hipMemcpyHostToDevice); hipMemcpy(d_col,col,size_col,hipMemcpyHostToDevice); hipMemcpy(d_path,path,size_path,hipMemcpyHostToDevice); hipMemcpy(d_load,&load,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(d_row_inv,row_inv,size_row,hipMemcpyHostToDevice); hipMemcpy(d_col_inv,col_inv,size_col,hipMemcpyHostToDevice); hipDeviceSynchronize(); cout << "Graph copied to device" << endl; hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); unsigned int threads = prop.maxThreadsPerBlock; unsigned int threadDIM = prop.maxThreadsDim[3]; cout << "Here are the specs\n"; cout << "The threads: " << threads << "\n"; cout << "Thread DIM: " << threadDIM << "\n"; int BlockDIM = int(ceil(double(N)/threads)); // BlockDIM ->> number of blocks necessary int level = 1; cout << "Algo starts now... Hold on to your seats" << endl; load = 1; double start = omp_get_wtime(); int zeroman = 0; while(load != 0) { hipMemcpy(d_load,&zeroman,sizeof(int),hipMemcpyHostToDevice); if(double(load)/N <0.1) { //cout << "topdown " << load << endl; hipLaunchKernelGGL(( top_down_step), dim3(BlockDIM),dim3(threads), 0, 0, d_load,d_path,level,d_row,d_col); } else { //cout << "bottomup " << load << endl; if(symmetric) hipLaunchKernelGGL(( bottom_up_step), dim3(BlockDIM),dim3(threads), 0, 0, d_load,d_path,level,d_row,d_col); else hipLaunchKernelGGL(( bottom_up_step), dim3(BlockDIM),dim3(threads), 0, 0, d_load,d_path,level,d_row_inv,d_col_inv); } hipMemcpy(&load,d_load,sizeof(int),hipMemcpyDeviceToHost); level++; } double finish = omp_get_wtime(); hipMemcpy(path,d_path,size_path,hipMemcpyDeviceToHost); hipDeviceSynchronize(); int count = 0; for(int i = 0; i < N; i++) { if(path[i] == -1) { count++; } } if(count == 0) cout << "BFS Complete" << endl; else cout << count << " unvisited nodes" << endl; double elapsed = finish - start; cout << "Time: " << elapsed << endl; hipFree(d_row); hipFree(d_col); hipFree(d_path); hipFree(d_row_inv); hipFree(d_col_inv); delete[] col; delete[] row; delete[] col_inv; delete[] row_inv; //delete[] path; cout << "Graph deleted" << endl; ofstream ofile; ofile.open("results.txt"); for(int i = 0; i < N; i++) ofile << path[i] << "\n"; ofile.close(); delete[] path; return 0; }
c87eb202cb22163f5f82cb0bb096136f3145ac25.cu
#include <iostream> #include <stdlib.h> #include <string> #include <fstream> #include <sstream> #include <omp.h> #include <limits.h> #include <vector> #include <queue> // std::queue using namespace std; int *row; int *col; int *path; bool *visited; int N; __global__ void top_down_step(int *load,int *path,int dist,int *row,int *col) { unsigned int ind = threadIdx.x + blockIdx.x * blockDim.x; if(path[ind] == dist) { int start = row[ind]; int end = row[ind+1]; for(int I = start; I < end; I++) { int i = col[I]; if(path[i] == -1) { path[i] = dist+1; //*load = 1; atomicAdd(load, 1); } } } } __global__ void bottom_up_step(int *load,int *path,int dist,int *row,int *col) { unsigned int ind = threadIdx.x + blockIdx.x * blockDim.x; if(path[ind] == -1) { int start = row[ind]; int end = row[ind+1]; for(int I = start; I < end; I++) { int i = col[I]; if(path[i] == dist) { path[ind] = dist+1; atomicAdd(load, 1); break; } } } } int main(int argc, const char **argv) { if(argc != 2) { cout << "wrong input " << endl; return 0; } srand (time(NULL)); // tested on coPapersDBLP.mtx string filename = argv[1]; //"coPapersDBLP.mtx"; // edit //string filepath = "/home/cema/Documents/CS406/project/data/" + filename; ifstream file(filename.c_str()); if(file.fail()) { cout << "Path not found" << endl; return 0; } string line; getline(file,line); stringstream s(line); bool symmetric = false; string word; while(s >> word) // check for symmetric if(word == "symmetric") symmetric = true; while(file.peek() == '%')//dirty boi file.ignore(2048, '\n'); int M, edge; file >> N >> M >> edge; cout << "Graph has " << N << " nodes and " << (1+symmetric*1)*edge << " edges and symmetric " << symmetric << endl; int n; bool based0 = false; while(file >> n) { if (n == 0) { based0 = true; break; } } file.close(); if(based0) cout << "Graph is 0 based" << endl; else cout << "Graph is 1 based and is being turned in to 0 base." << endl; file.open(filename.c_str()); while(file.peek() == '%') file.ignore(2048, '\n'); file >> n >> n >> n; //tmp int i,j; vector<vector<int> > v_inv(N, vector<int>(0)); vector<vector<int> > v(N, vector<int>(0)); for(int k = 0; k < edge; k++) { file >> i >> j; if(!based0) { // make 0 based; i--; j--; } v[i].push_back(j); v_inv[j].push_back(i); // inverse graph if(symmetric) v[j].push_back(i); } file.close(); edge *= 1 + (int)symmetric; int *row_inv = new int[N+1]; int *col_inv = new int[edge]; row = new int[N+1]; col = new int[edge]; row[0] = 0; int index = 0; for(int i = 0; i < N; i++) { for(int j = 0; j < v[i].size(); j++) { col[index] = v[i][j]; index++; } row[i+1] = index; } //inv graph porcessing row_inv[0] = 0; index = 0; for(int i = 0; i < N; i++) { for(int j = 0; j < v_inv[i].size(); j++) { col_inv[index] = v_inv[i][j]; index++; } row_inv[i+1] = index; } path = new int[N]; visited = new bool[N]; // int *levels = new int[N]; for(int i = 0; i < N; i++) path[i] = -1;//INT_MAX; cout << "Preprocessing complete " << endl; // algorithm starts here //************************************************* cudaSetDevice(0); int root = rand() % (N-1); root = 0; path[root] = 0; int root_begin = row[root]; int root_end = row[root+1]; int root_begin_inv = row_inv[root]; int root_end_inv = row_inv[root+1]; for(int I = root_begin; I < root_end; I++){ path[col[I]] = 1; } // copy graph to device int *d_row; int *d_col; int *d_row_inv; int *d_col_inv; int *d_path; int *d_load; int size_row = (N+1)*sizeof(int); int size_col = edge*sizeof(int); int size_path = N*sizeof(int); int load = 0; // for the row check up cudaMalloc((void **)&d_row,size_row); cudaMalloc((void **)&d_col,size_col); cudaMalloc((void **)&d_row_inv,size_row); cudaMalloc((void **)&d_col_inv,size_col); // path array cudaMalloc((void **)&d_path,size_path); // integer for check operation cudaMalloc((void **)&d_load,sizeof(int)); cout << "Memory allocated" <<endl; // move memory cudaMemcpy(d_row,row,size_row,cudaMemcpyHostToDevice); cudaMemcpy(d_col,col,size_col,cudaMemcpyHostToDevice); cudaMemcpy(d_path,path,size_path,cudaMemcpyHostToDevice); cudaMemcpy(d_load,&load,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_row_inv,row_inv,size_row,cudaMemcpyHostToDevice); cudaMemcpy(d_col_inv,col_inv,size_col,cudaMemcpyHostToDevice); cudaDeviceSynchronize(); cout << "Graph copied to device" << endl; cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); unsigned int threads = prop.maxThreadsPerBlock; unsigned int threadDIM = prop.maxThreadsDim[3]; cout << "Here are the specs\n"; cout << "The threads: " << threads << "\n"; cout << "Thread DIM: " << threadDIM << "\n"; int BlockDIM = int(ceil(double(N)/threads)); // BlockDIM ->> number of blocks necessary int level = 1; cout << "Algo starts now... Hold on to your seats" << endl; load = 1; double start = omp_get_wtime(); int zeroman = 0; while(load != 0) { cudaMemcpy(d_load,&zeroman,sizeof(int),cudaMemcpyHostToDevice); if(double(load)/N <0.1) { //cout << "topdown " << load << endl; top_down_step<<<BlockDIM,threads>>>(d_load,d_path,level,d_row,d_col); } else { //cout << "bottomup " << load << endl; if(symmetric) bottom_up_step<<<BlockDIM,threads>>>(d_load,d_path,level,d_row,d_col); else bottom_up_step<<<BlockDIM,threads>>>(d_load,d_path,level,d_row_inv,d_col_inv); } cudaMemcpy(&load,d_load,sizeof(int),cudaMemcpyDeviceToHost); level++; } double finish = omp_get_wtime(); cudaMemcpy(path,d_path,size_path,cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); int count = 0; for(int i = 0; i < N; i++) { if(path[i] == -1) { count++; } } if(count == 0) cout << "BFS Complete" << endl; else cout << count << " unvisited nodes" << endl; double elapsed = finish - start; cout << "Time: " << elapsed << endl; cudaFree(d_row); cudaFree(d_col); cudaFree(d_path); cudaFree(d_row_inv); cudaFree(d_col_inv); delete[] col; delete[] row; delete[] col_inv; delete[] row_inv; //delete[] path; cout << "Graph deleted" << endl; ofstream ofile; ofile.open("results.txt"); for(int i = 0; i < N; i++) ofile << path[i] << "\n"; ofile.close(); delete[] path; return 0; }
367744aa35a9d22ef1be78188aac7e4aa9f1888a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <vector> #include <fstream> #include <string> #include <stdio.h> #include <Windows.h> #include <time.h> #include <thread> #include <omp.h> #include <vector> #include <math.h> #include <cmath> using namespace std; struct img_param { int size; int pixels_adress; int width; int height; short int bits_per_pixel; }; struct px_arr { unsigned int *r; unsigned int *g; unsigned int *b; }; px_arr reading(int dimensions1, int dimensions2, ifstream &file) { unsigned int r = 0; unsigned int g = 0; unsigned int b = 0; px_arr old; old.r = new unsigned int[dimensions1 * dimensions2]; old.g = new unsigned int[dimensions1 * dimensions2]; old.b = new unsigned int[dimensions1 * dimensions2]; for (int i = 0; i < dimensions1 * dimensions2; i++) { file.read((char*)&b, 1); file.read((char*)&g, 1); file.read((char*)&r, 1); old.r[i] = r; old.g[i] = g; old.b[i] = b; } return old; } px_arr small_width(px_arr old, int dimensions1, int dimensions2, int new_width) { int dop = 4 - dimensions1 % 4; if (dop == 4) { dop = 0; } int null = 0; px_arr young; young.r = new unsigned int[new_width * dimensions2]; young.g = new unsigned int[new_width * dimensions2]; young.b = new unsigned int[new_width * dimensions2]; for (int i = 0; i < dimensions2; i++) { for (int j = 0; j < new_width; j++) { young.r[new_width * i + j] = old.r[(int)round(((float)dimensions1 / (float)new_width) * j + dimensions1 * i)]; young.g[new_width * i + j] = old.g[(int)round(((float)dimensions1 / (float)new_width) * j + dimensions1 * i)]; young.b[new_width * i + j] = old.b[(int)round(((float)dimensions1 / (float)new_width) * j + dimensions1 * i)]; } } return young; } px_arr small_height(px_arr old, int new_width, int dimensions2, int new_height) { px_arr young; young.r = new unsigned int[new_height * new_width]; young.g = new unsigned int[new_height * new_width]; young.b = new unsigned int[new_height * new_width]; for (int i = 0; i < new_height; i++) { for (int j = 0; j < new_width; j++) { young.r[new_width * i + j] = old.r[(int)round(((float)dimensions2 / (float)new_height) * i) * new_width + j]; young.g[new_width * i + j] = old.g[(int)round(((float)dimensions2 / (float)new_height) * i) * new_width + j]; young.b[new_width * i + j] = old.b[(int)round(((float)dimensions2 / (float)new_height) * i) * new_width + j]; } } return young; } void writing(px_arr young, int new_height, ofstream &os, int new_width) { for (int i = 0; i < new_height; i++) { for (int j = 0; j < new_width; j++) { os.write(reinterpret_cast<char*>(&young.b[new_width * i + j]), sizeof(char)); os.write(reinterpret_cast<char*>(&young.g[new_width * i + j]), sizeof(char)); os.write(reinterpret_cast<char*>(&young.r[new_width * i + j]), sizeof(char)); } } } int main(int argc, char **argv) { setlocale(LC_CTYPE, "rus"); int size = 0, pixels_adress = 0, width = 0, height = 0; short int bits_per_pixel = 0; ifstream file("nature.bmp", ios::in | ios::binary); // 2 file.seekg(2, ios::beg); // file.read((char*)&size, sizeof(int)); std::cout << "Size: " << size << endl; // 10 file.seekg(10, ios::beg); // , file.read((char*)&pixels_adress, sizeof(int)); std::cout << "pixels_adress: " << pixels_adress << endl; // 18 file.seekg(18, ios::beg); // file.read((char*)&width, sizeof(int)); std::cout << "width: " << width << endl; // 22 file.seekg(22, ios::beg); // file.read((char*)&height, sizeof(int)); std::cout << "height: " << height << endl; // 28 file.seekg(28, ios::beg); // file.read((char*)&bits_per_pixel, sizeof(short int)); std::cout << "bits_per_pixel: " << bits_per_pixel << endl; // file.seekg(pixels_adress, ios::beg); float new_width, new_height; std::cout << " ( 4)" << endl; std::cin >> new_width; std::cout << endl; std::cout << " " << endl; std::cin >> new_height; std::cout << endl; std::ofstream os("temp_0.bmp", std::ios::binary); unsigned char signature[2] = { 'B', 'M' }; unsigned int fileSize = 14 + 40 + new_width * new_height * 3; unsigned int reserved = 0; unsigned int offset = 14 + 40; unsigned int headerSize = 40; unsigned int dimensions1 = new_width; unsigned int dimensions2 = new_height; unsigned short colorPlanes = 1; unsigned short bpp = 24; unsigned int compression = 0; unsigned int imgSize = new_width * new_height * 3; unsigned int resolution[2] = { 2795, 2795 }; unsigned int pltColors = 0; unsigned int impColors = 0; os.write(reinterpret_cast<char*>(signature), sizeof(signature)); os.write(reinterpret_cast<char*>(&fileSize), sizeof(fileSize)); os.write(reinterpret_cast<char*>(&reserved), sizeof(reserved)); os.write(reinterpret_cast<char*>(&offset), sizeof(offset)); os.write(reinterpret_cast<char*>(&headerSize), sizeof(headerSize)); os.write(reinterpret_cast<char*>(&dimensions1), sizeof(dimensions1)); os.write(reinterpret_cast<char*>(&dimensions2), sizeof(dimensions2)); os.write(reinterpret_cast<char*>(&colorPlanes), sizeof(colorPlanes)); os.write(reinterpret_cast<char*>(&bpp), sizeof(bpp)); os.write(reinterpret_cast<char*>(&compression), sizeof(compression)); os.write(reinterpret_cast<char*>(&imgSize), sizeof(imgSize)); os.write(reinterpret_cast<char*>(resolution), sizeof(resolution)); os.write(reinterpret_cast<char*>(&pltColors), sizeof(pltColors)); os.write(reinterpret_cast<char*>(&impColors), sizeof(impColors)); px_arr old, young_w, young_wh; old = reading(width, height, file); young_w = small_width(old, width, height, new_width); young_wh = small_height(young_w, new_width, height, new_height); writing(young_wh, new_height, os, new_width); os.close(); return 0; }
367744aa35a9d22ef1be78188aac7e4aa9f1888a.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <vector> #include <fstream> #include <string> #include <stdio.h> #include <Windows.h> #include <time.h> #include <thread> #include <omp.h> #include <vector> #include <math.h> #include <cmath> using namespace std; struct img_param { int size; int pixels_adress; int width; int height; short int bits_per_pixel; }; struct px_arr { unsigned int *r; unsigned int *g; unsigned int *b; }; px_arr reading(int dimensions1, int dimensions2, ifstream &file) { unsigned int r = 0; unsigned int g = 0; unsigned int b = 0; px_arr old; old.r = new unsigned int[dimensions1 * dimensions2]; old.g = new unsigned int[dimensions1 * dimensions2]; old.b = new unsigned int[dimensions1 * dimensions2]; for (int i = 0; i < dimensions1 * dimensions2; i++) { file.read((char*)&b, 1); file.read((char*)&g, 1); file.read((char*)&r, 1); old.r[i] = r; old.g[i] = g; old.b[i] = b; } return old; } px_arr small_width(px_arr old, int dimensions1, int dimensions2, int new_width) { int dop = 4 - dimensions1 % 4; if (dop == 4) { dop = 0; } int null = 0; px_arr young; young.r = new unsigned int[new_width * dimensions2]; young.g = new unsigned int[new_width * dimensions2]; young.b = new unsigned int[new_width * dimensions2]; for (int i = 0; i < dimensions2; i++) { for (int j = 0; j < new_width; j++) { young.r[new_width * i + j] = old.r[(int)round(((float)dimensions1 / (float)new_width) * j + dimensions1 * i)]; young.g[new_width * i + j] = old.g[(int)round(((float)dimensions1 / (float)new_width) * j + dimensions1 * i)]; young.b[new_width * i + j] = old.b[(int)round(((float)dimensions1 / (float)new_width) * j + dimensions1 * i)]; } } return young; } px_arr small_height(px_arr old, int new_width, int dimensions2, int new_height) { px_arr young; young.r = new unsigned int[new_height * new_width]; young.g = new unsigned int[new_height * new_width]; young.b = new unsigned int[new_height * new_width]; for (int i = 0; i < new_height; i++) { for (int j = 0; j < new_width; j++) { young.r[new_width * i + j] = old.r[(int)round(((float)dimensions2 / (float)new_height) * i) * new_width + j]; young.g[new_width * i + j] = old.g[(int)round(((float)dimensions2 / (float)new_height) * i) * new_width + j]; young.b[new_width * i + j] = old.b[(int)round(((float)dimensions2 / (float)new_height) * i) * new_width + j]; } } return young; } void writing(px_arr young, int new_height, ofstream &os, int new_width) { for (int i = 0; i < new_height; i++) { for (int j = 0; j < new_width; j++) { os.write(reinterpret_cast<char*>(&young.b[new_width * i + j]), sizeof(char)); os.write(reinterpret_cast<char*>(&young.g[new_width * i + j]), sizeof(char)); os.write(reinterpret_cast<char*>(&young.r[new_width * i + j]), sizeof(char)); } } } int main(int argc, char **argv) { setlocale(LC_CTYPE, "rus"); int size = 0, pixels_adress = 0, width = 0, height = 0; short int bits_per_pixel = 0; ifstream file("nature.bmp", ios::in | ios::binary); // Переходим на 2 байт file.seekg(2, ios::beg); // Считываем размер файла file.read((char*)&size, sizeof(int)); std::cout << "Size: " << size << endl; // Переходим на 10 байт file.seekg(10, ios::beg); // Считываем адрес, где лежит информация о пикселях file.read((char*)&pixels_adress, sizeof(int)); std::cout << "pixels_adress: " << pixels_adress << endl; // Переходим на 18 байт file.seekg(18, ios::beg); //Считываем ширину картинки file.read((char*)&width, sizeof(int)); std::cout << "width: " << width << endl; // Переходим на 22 байт file.seekg(22, ios::beg); //Считываем высоту картинки file.read((char*)&height, sizeof(int)); std::cout << "height: " << height << endl; // Переходим на 28 байт file.seekg(28, ios::beg); //Считываем количество бит на пиксель file.read((char*)&bits_per_pixel, sizeof(short int)); std::cout << "bits_per_pixel: " << bits_per_pixel << endl; //двигаемся в зону цветов пикселей file.seekg(pixels_adress, ios::beg); float new_width, new_height; std::cout << "новая ширина изображения в пикселях (меньше текушей и делится на 4)" << endl; std::cin >> new_width; std::cout << endl; std::cout << "новая высота изображения в пикселях" << endl; std::cin >> new_height; std::cout << endl; std::ofstream os("temp_0.bmp", std::ios::binary); unsigned char signature[2] = { 'B', 'M' }; unsigned int fileSize = 14 + 40 + new_width * new_height * 3; unsigned int reserved = 0; unsigned int offset = 14 + 40; unsigned int headerSize = 40; unsigned int dimensions1 = new_width; unsigned int dimensions2 = new_height; unsigned short colorPlanes = 1; unsigned short bpp = 24; unsigned int compression = 0; unsigned int imgSize = new_width * new_height * 3; unsigned int resolution[2] = { 2795, 2795 }; unsigned int pltColors = 0; unsigned int impColors = 0; os.write(reinterpret_cast<char*>(signature), sizeof(signature)); os.write(reinterpret_cast<char*>(&fileSize), sizeof(fileSize)); os.write(reinterpret_cast<char*>(&reserved), sizeof(reserved)); os.write(reinterpret_cast<char*>(&offset), sizeof(offset)); os.write(reinterpret_cast<char*>(&headerSize), sizeof(headerSize)); os.write(reinterpret_cast<char*>(&dimensions1), sizeof(dimensions1)); os.write(reinterpret_cast<char*>(&dimensions2), sizeof(dimensions2)); os.write(reinterpret_cast<char*>(&colorPlanes), sizeof(colorPlanes)); os.write(reinterpret_cast<char*>(&bpp), sizeof(bpp)); os.write(reinterpret_cast<char*>(&compression), sizeof(compression)); os.write(reinterpret_cast<char*>(&imgSize), sizeof(imgSize)); os.write(reinterpret_cast<char*>(resolution), sizeof(resolution)); os.write(reinterpret_cast<char*>(&pltColors), sizeof(pltColors)); os.write(reinterpret_cast<char*>(&impColors), sizeof(impColors)); px_arr old, young_w, young_wh; old = reading(width, height, file); young_w = small_width(old, width, height, new_width); young_wh = small_height(young_w, new_width, height, new_height); writing(young_wh, new_height, os, new_width); os.close(); return 0; }
ad3d2fe5f21cee2246590299fd261cc4c3cdf1aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Voxel sampling GPU implementation * Author Zhaoyu SU * All Rights Reserved. Sep., 2019. */ #include <stdio.h> #include <iostream> #include <float.h> // import FLT_EPSILON __device__ int get_batch_id(int* accu_list, int batch_size, int id) { for (int b=0; b<batch_size-1; b++) { if (id >= accu_list[b]) { if(id < accu_list[b+1]) return b; } } return batch_size - 1; } __global__ void output_init_gpu_kernel(int roi_num, int voxel_num, int channels, float padding_value, float* output_features, int* output_idx) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id < roi_num * voxel_num) { int start_loc = thread_id * channels; for (int c=0; c<channels; c++) { output_features[start_loc + c] = padding_value; output_idx[start_loc + c] = -1; } } } __global__ void roi_pooling_register_gpu_kernel(int batch_size, int max_batch_input_point_num, int roi_num, int voxel_size, int pooling_size, const float* input_coors, const float* roi_attrs, const int* input_num_list, int* input_accu_list, int* roi_accu_list, int* temp_pool, int* temp_count) { const int voxel_num = voxel_size * voxel_size * voxel_size; const int half_voxel_size = (voxel_size - 1) / 2; const int center_offset = voxel_size * voxel_size * half_voxel_size + \ voxel_size * half_voxel_size + \ half_voxel_size; int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id < roi_num * max_batch_input_point_num) { int roi_id = thread_id / max_batch_input_point_num; int input_id = thread_id % max_batch_input_point_num; int batch_id = get_batch_id(roi_accu_list, batch_size, roi_id); if (input_id < input_num_list[batch_id]) { float roi_w = roi_attrs[roi_id*7 + 0]; float roi_l = roi_attrs[roi_id*7 + 1]; float roi_h = roi_attrs[roi_id*7 + 2]; float roi_x = roi_attrs[roi_id*7 + 3]; float roi_y = roi_attrs[roi_id*7 + 4]; float roi_z = roi_attrs[roi_id*7 + 5]; float roi_r = roi_attrs[roi_id*7 + 6]; float grid_length_x = roi_w / voxel_size; float grid_length_y = roi_l / voxel_size; float grid_length_z = roi_h / voxel_size; int batch_id = get_batch_id(roi_accu_list, batch_size, roi_id); float point_x = input_coors[input_accu_list[batch_id]*3 + input_id*3 + 0]; float point_y = input_coors[input_accu_list[batch_id]*3 + input_id*3 + 1]; float point_z = input_coors[input_accu_list[batch_id]*3 + input_id*3 + 2]; float rel_point_x = point_x - roi_x; float rel_point_y = point_y - roi_y; float rel_point_z = point_z - roi_z; float rot_rel_point_x = rel_point_x*cosf(-roi_r) - rel_point_y*sinf(-roi_r); float rot_rel_point_y = rel_point_x*sinf(-roi_r) + rel_point_y*cosf(-roi_r); if (abs(rot_rel_point_x)<roi_w / 2 && abs(rot_rel_point_y)<roi_l / 2 && abs(rel_point_z)<roi_h / 2) { int grid_coor_x = __float2int_rz(rot_rel_point_x / (grid_length_x + FLT_EPSILON) + 0.5 * fabsf(rot_rel_point_x) / rot_rel_point_x + FLT_EPSILON); int grid_coor_y = __float2int_rz(rot_rel_point_y / (grid_length_y + FLT_EPSILON) + 0.5 * fabsf(rot_rel_point_y) / rot_rel_point_y + FLT_EPSILON); int grid_coor_z = __float2int_rz(rel_point_z / (grid_length_z + FLT_EPSILON) + 0.5 * fabsf(rel_point_z) / rel_point_z + FLT_EPSILON); int voxel_coor = roi_id * voxel_num + center_offset + \ voxel_size * voxel_size * grid_coor_x + \ voxel_size * grid_coor_y + \ grid_coor_z; int pool_count = atomicAdd(&temp_count[voxel_coor], 1); if (pool_count < pooling_size) { temp_pool[voxel_coor * pooling_size + pool_count] = input_accu_list[batch_id] + input_id; } } } } } __global__ void roi_pooling_fill_gpu_kernel(int roi_num, int voxel_num, int channels, int pooling_size, const float* input_features, int* temp_count, int* temp_pool, float* output_features, int* output_idx) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id < roi_num * voxel_num) { int pool_count = min(temp_count[thread_id], pooling_size); if (pool_count > 0) { for (int c=0; c<channels; c++) { float max_feature = -1e9; for (int p=0; p<pool_count; p++) { int pool_id = thread_id * pooling_size + p; int input_id = temp_pool[pool_id]; if (input_features[input_id * channels + c] > max_feature) { max_feature = input_features[input_id * channels + c]; output_features[thread_id * channels + c] = max_feature; output_idx[thread_id * channels + c] = input_id; } } } } } } __global__ void roi_pooling_grad_gpu_kernel(int ncenters, int voxel_num, int channels, const int* output_idx, const float* output_features_grad, float* input_features_grad) { int center_id = threadIdx.x + blockIdx.x * blockDim.x; if (center_id < ncenters) { for (int i=0; i<voxel_num; i++) { int voxel_coor = center_id*voxel_num + i; for (int c=0; c<channels; c++) { int point_id = output_idx[voxel_coor * channels + c]; if (point_id != -1) { atomicAdd(&input_features_grad[point_id*channels + c], output_features_grad[voxel_coor*channels + c]); } } } } } void roi_pooling_gpu_launcher(int batch_size, int input_point_num, int channels, int roi_num, int voxel_size, int pooling_size, float padding_value, const float* input_coors, const float* input_features, const float* roi_attrs, const int* input_num_list, const int* roi_num_list, int* input_num_list_host, int* input_accu_list, int* roi_accu_list, int* temp_pool, int* temp_count, float* output_features, int* output_idx) { if (batch_size * channels <= 0) { printf("RoiPoolingOp ERROR: Invalid CUDA input dimensions.\n"); return; } if (input_point_num <= 0) return; const int voxel_num = voxel_size * voxel_size * voxel_size; int max_batch_input_point_num = 0; for (int i=0; i<batch_size; i++) max_batch_input_point_num = max(max_batch_input_point_num, input_num_list_host[i]); int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, output_init_gpu_kernel, 0, roi_num * voxel_num); gridSize = (roi_num * voxel_num + blockSize - 1) / blockSize; hipLaunchKernelGGL(( output_init_gpu_kernel), dim3(gridSize),dim3(blockSize), 0, 0, roi_num, voxel_num, channels, padding_value, output_features, output_idx); hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, roi_pooling_register_gpu_kernel, 0, roi_num * max_batch_input_point_num); gridSize = (roi_num * max_batch_input_point_num + blockSize - 1) / blockSize; hipLaunchKernelGGL(( roi_pooling_register_gpu_kernel), dim3(gridSize),dim3(blockSize), 0, 0, batch_size, max_batch_input_point_num, roi_num, voxel_size, pooling_size, input_coors, roi_attrs, input_num_list, input_accu_list, roi_accu_list, temp_pool, temp_count); hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, roi_pooling_fill_gpu_kernel, 0, roi_num * voxel_num); gridSize = (roi_num * voxel_num + blockSize - 1) / blockSize; hipLaunchKernelGGL(( roi_pooling_fill_gpu_kernel), dim3(gridSize),dim3(blockSize), 0, 0, roi_num, voxel_num, channels, pooling_size, input_features, temp_count, temp_pool, output_features, output_idx); } void roi_pooling_grad_gpu_launcher(int ncenters, int voxel_num, int channels, const int* output_idx, const float* output_features_grad, float* input_features_grad) { if (channels <= 0) { printf("RoiPoolingGradOp ERROR: Invalid CUDA input dimensions.\n"); return; } if (ncenters == 0) return; int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, roi_pooling_grad_gpu_kernel, 0, ncenters); gridSize = (ncenters + blockSize - 1) / blockSize; hipLaunchKernelGGL(( roi_pooling_grad_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, ncenters, voxel_num, channels, output_idx, output_features_grad, input_features_grad); }
ad3d2fe5f21cee2246590299fd261cc4c3cdf1aa.cu
/* Voxel sampling GPU implementation * Author Zhaoyu SU * All Rights Reserved. Sep., 2019. */ #include <stdio.h> #include <iostream> #include <float.h> // import FLT_EPSILON __device__ int get_batch_id(int* accu_list, int batch_size, int id) { for (int b=0; b<batch_size-1; b++) { if (id >= accu_list[b]) { if(id < accu_list[b+1]) return b; } } return batch_size - 1; } __global__ void output_init_gpu_kernel(int roi_num, int voxel_num, int channels, float padding_value, float* output_features, int* output_idx) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id < roi_num * voxel_num) { int start_loc = thread_id * channels; for (int c=0; c<channels; c++) { output_features[start_loc + c] = padding_value; output_idx[start_loc + c] = -1; } } } __global__ void roi_pooling_register_gpu_kernel(int batch_size, int max_batch_input_point_num, int roi_num, int voxel_size, int pooling_size, const float* input_coors, const float* roi_attrs, const int* input_num_list, int* input_accu_list, int* roi_accu_list, int* temp_pool, int* temp_count) { const int voxel_num = voxel_size * voxel_size * voxel_size; const int half_voxel_size = (voxel_size - 1) / 2; const int center_offset = voxel_size * voxel_size * half_voxel_size + \ voxel_size * half_voxel_size + \ half_voxel_size; int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id < roi_num * max_batch_input_point_num) { int roi_id = thread_id / max_batch_input_point_num; int input_id = thread_id % max_batch_input_point_num; int batch_id = get_batch_id(roi_accu_list, batch_size, roi_id); if (input_id < input_num_list[batch_id]) { float roi_w = roi_attrs[roi_id*7 + 0]; float roi_l = roi_attrs[roi_id*7 + 1]; float roi_h = roi_attrs[roi_id*7 + 2]; float roi_x = roi_attrs[roi_id*7 + 3]; float roi_y = roi_attrs[roi_id*7 + 4]; float roi_z = roi_attrs[roi_id*7 + 5]; float roi_r = roi_attrs[roi_id*7 + 6]; float grid_length_x = roi_w / voxel_size; float grid_length_y = roi_l / voxel_size; float grid_length_z = roi_h / voxel_size; int batch_id = get_batch_id(roi_accu_list, batch_size, roi_id); float point_x = input_coors[input_accu_list[batch_id]*3 + input_id*3 + 0]; float point_y = input_coors[input_accu_list[batch_id]*3 + input_id*3 + 1]; float point_z = input_coors[input_accu_list[batch_id]*3 + input_id*3 + 2]; float rel_point_x = point_x - roi_x; float rel_point_y = point_y - roi_y; float rel_point_z = point_z - roi_z; float rot_rel_point_x = rel_point_x*cosf(-roi_r) - rel_point_y*sinf(-roi_r); float rot_rel_point_y = rel_point_x*sinf(-roi_r) + rel_point_y*cosf(-roi_r); if (abs(rot_rel_point_x)<roi_w / 2 && abs(rot_rel_point_y)<roi_l / 2 && abs(rel_point_z)<roi_h / 2) { int grid_coor_x = __float2int_rz(rot_rel_point_x / (grid_length_x + FLT_EPSILON) + 0.5 * fabsf(rot_rel_point_x) / rot_rel_point_x + FLT_EPSILON); int grid_coor_y = __float2int_rz(rot_rel_point_y / (grid_length_y + FLT_EPSILON) + 0.5 * fabsf(rot_rel_point_y) / rot_rel_point_y + FLT_EPSILON); int grid_coor_z = __float2int_rz(rel_point_z / (grid_length_z + FLT_EPSILON) + 0.5 * fabsf(rel_point_z) / rel_point_z + FLT_EPSILON); int voxel_coor = roi_id * voxel_num + center_offset + \ voxel_size * voxel_size * grid_coor_x + \ voxel_size * grid_coor_y + \ grid_coor_z; int pool_count = atomicAdd(&temp_count[voxel_coor], 1); if (pool_count < pooling_size) { temp_pool[voxel_coor * pooling_size + pool_count] = input_accu_list[batch_id] + input_id; } } } } } __global__ void roi_pooling_fill_gpu_kernel(int roi_num, int voxel_num, int channels, int pooling_size, const float* input_features, int* temp_count, int* temp_pool, float* output_features, int* output_idx) { int thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id < roi_num * voxel_num) { int pool_count = min(temp_count[thread_id], pooling_size); if (pool_count > 0) { for (int c=0; c<channels; c++) { float max_feature = -1e9; for (int p=0; p<pool_count; p++) { int pool_id = thread_id * pooling_size + p; int input_id = temp_pool[pool_id]; if (input_features[input_id * channels + c] > max_feature) { max_feature = input_features[input_id * channels + c]; output_features[thread_id * channels + c] = max_feature; output_idx[thread_id * channels + c] = input_id; } } } } } } __global__ void roi_pooling_grad_gpu_kernel(int ncenters, int voxel_num, int channels, const int* output_idx, const float* output_features_grad, float* input_features_grad) { int center_id = threadIdx.x + blockIdx.x * blockDim.x; if (center_id < ncenters) { for (int i=0; i<voxel_num; i++) { int voxel_coor = center_id*voxel_num + i; for (int c=0; c<channels; c++) { int point_id = output_idx[voxel_coor * channels + c]; if (point_id != -1) { atomicAdd(&input_features_grad[point_id*channels + c], output_features_grad[voxel_coor*channels + c]); } } } } } void roi_pooling_gpu_launcher(int batch_size, int input_point_num, int channels, int roi_num, int voxel_size, int pooling_size, float padding_value, const float* input_coors, const float* input_features, const float* roi_attrs, const int* input_num_list, const int* roi_num_list, int* input_num_list_host, int* input_accu_list, int* roi_accu_list, int* temp_pool, int* temp_count, float* output_features, int* output_idx) { if (batch_size * channels <= 0) { printf("RoiPoolingOp ERROR: Invalid CUDA input dimensions.\n"); return; } if (input_point_num <= 0) return; const int voxel_num = voxel_size * voxel_size * voxel_size; int max_batch_input_point_num = 0; for (int i=0; i<batch_size; i++) max_batch_input_point_num = max(max_batch_input_point_num, input_num_list_host[i]); int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, output_init_gpu_kernel, 0, roi_num * voxel_num); gridSize = (roi_num * voxel_num + blockSize - 1) / blockSize; output_init_gpu_kernel<<<gridSize,blockSize>>>(roi_num, voxel_num, channels, padding_value, output_features, output_idx); cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, roi_pooling_register_gpu_kernel, 0, roi_num * max_batch_input_point_num); gridSize = (roi_num * max_batch_input_point_num + blockSize - 1) / blockSize; roi_pooling_register_gpu_kernel<<<gridSize,blockSize>>>(batch_size, max_batch_input_point_num, roi_num, voxel_size, pooling_size, input_coors, roi_attrs, input_num_list, input_accu_list, roi_accu_list, temp_pool, temp_count); cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, roi_pooling_fill_gpu_kernel, 0, roi_num * voxel_num); gridSize = (roi_num * voxel_num + blockSize - 1) / blockSize; roi_pooling_fill_gpu_kernel<<<gridSize,blockSize>>>(roi_num, voxel_num, channels, pooling_size, input_features, temp_count, temp_pool, output_features, output_idx); } void roi_pooling_grad_gpu_launcher(int ncenters, int voxel_num, int channels, const int* output_idx, const float* output_features_grad, float* input_features_grad) { if (channels <= 0) { printf("RoiPoolingGradOp ERROR: Invalid CUDA input dimensions.\n"); return; } if (ncenters == 0) return; int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, roi_pooling_grad_gpu_kernel, 0, ncenters); gridSize = (ncenters + blockSize - 1) / blockSize; roi_pooling_grad_gpu_kernel<<<gridSize, blockSize>>>(ncenters, voxel_num, channels, output_idx, output_features_grad, input_features_grad); }
0b4e8bfc8089ba0a9e1ff3945897e9e7187c0849.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zmdot.cu normal z -> d, Fri Sep 11 18:29:42 2015 @author Hartwig Anzt */ #include "common_magma.h" #define BLOCK_SIZE 256 #define PRECISION_d // initialize arrays with zero __global__ void magma_dgpumemzero( double * d, int n, int k ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i < n ){ for( int j=0; j<k; j++) d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); } } // dot product __global__ void magma_ddot_kernel( int Gs, int n, double * v, double * r, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // dot product for multiple vectors __global__ void magma_dblockdot_kernel( int Gs, int n, int k, double * v, double * r, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // k vectors v(i) if (i<n){ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = v[i+j*n] * r[i]; } else { for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for multiple vectors __global__ void magma_dblockreduce_kernel( int Gs, int n, int k, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ] : MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // accelerated reduction for one vector __global__ void magma_dreduce_kernel_fast( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated block reduction for multiple vectors __global__ void magma_dblockreduce_kernel_fast( int Gs, int n, int k, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<k; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc( int n, int k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = (k)* (local_block_size) * sizeof( double ); // k vecs magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; if (k>1) { hipLaunchKernelGGL(( magma_dblockdot_kernel), dim3(Gs), dim3(Bs), Ms, queue, Gs.x, n, k, v, r, d1 ); } else { hipLaunchKernelGGL(( magma_ddot_kernel), dim3(Gs), dim3(Bs), Ms, queue, Gs.x, n, v, r, d1 ); } /* // not necessary to zero GPU mem magma_dgpumemzero<<<Gs, Bs, 0>>>( d1, n*k,1 ); magma_dgpumemzero<<<Gs, Bs, 0>>>( d2, n*k,1 ); //magmablas_dlaset( MagmaUpperLower, n, k, d1, n ); //magmablas_dlaset( MagmaUpperLower, n, k, d2, n ); while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x; magma_dblockreduce_kernel<<< Gs_next.x, Bs.x, Ms >>> ( Gs.x, n, k, aux1, aux2 ); Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } for( int j=0; j<k; j++) { magma_dcopyvector( 1, aux1+j*n, 1, skp+j, 1 ); } */ if ( k>1) { while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x; if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue , Gs.x, n, k, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } else { while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x; if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } for( int j=0; j<k; j++) { magma_dcopyvector_async( 1, aux1+j*n, 1, skp+j, 1, queue ); } magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dgemvmdot( int n, int k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { int rows_left = k; int offset = 0; int chunk_size = 4; // process in chunks of 10 - has to be adapted to hardware and precision while( rows_left > (chunk_size) ) { magma_dmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue ); offset = offset + chunk_size; rows_left = rows_left-chunk_size; } // process rest magma_dmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue ); return MAGMA_SUCCESS; }
0b4e8bfc8089ba0a9e1ff3945897e9e7187c0849.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zmdot.cu normal z -> d, Fri Sep 11 18:29:42 2015 @author Hartwig Anzt */ #include "common_magma.h" #define BLOCK_SIZE 256 #define PRECISION_d // initialize arrays with zero __global__ void magma_dgpumemzero( double * d, int n, int k ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i < n ){ for( int j=0; j<k; j++) d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); } } // dot product __global__ void magma_ddot_kernel( int Gs, int n, double * v, double * r, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // dot product for multiple vectors __global__ void magma_dblockdot_kernel( int Gs, int n, int k, double * v, double * r, double * vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // k vectors v(i) if (i<n){ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = v[i+j*n] * r[i]; } else { for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for multiple vectors __global__ void magma_dblockreduce_kernel( int Gs, int n, int k, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ] : MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // accelerated reduction for one vector __global__ void magma_dreduce_kernel_fast( int Gs, int n, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated block reduction for multiple vectors __global__ void magma_dblockreduce_kernel_fast( int Gs, int n, int k, double * vtmp, double * vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<k; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc( int n, int k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = (k)* (local_block_size) * sizeof( double ); // k vecs magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; if (k>1) { magma_dblockdot_kernel<<<Gs, Bs, Ms, queue>>>( Gs.x, n, k, v, r, d1 ); } else { magma_ddot_kernel<<<Gs, Bs, Ms, queue>>>( Gs.x, n, v, r, d1 ); } /* // not necessary to zero GPU mem magma_dgpumemzero<<<Gs, Bs, 0>>>( d1, n*k,1 ); magma_dgpumemzero<<<Gs, Bs, 0>>>( d2, n*k,1 ); //magmablas_dlaset( MagmaUpperLower, n, k, d1, n ); //magmablas_dlaset( MagmaUpperLower, n, k, d2, n ); while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x; magma_dblockreduce_kernel<<< Gs_next.x, Bs.x, Ms >>> ( Gs.x, n, k, aux1, aux2 ); Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } for( int j=0; j<k; j++) { magma_dcopyvector( 1, aux1+j*n, 1, skp+j, 1 ); } */ if ( k>1) { while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x; if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue >>> ( Gs.x, n, k, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } else { while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x; if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } for( int j=0; j<k; j++) { magma_dcopyvector_async( 1, aux1+j*n, 1, skp+j, 1, queue ); } magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dgemvmdot( int n, int k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { int rows_left = k; int offset = 0; int chunk_size = 4; // process in chunks of 10 - has to be adapted to hardware and precision while( rows_left > (chunk_size) ) { magma_dmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue ); offset = offset + chunk_size; rows_left = rows_left-chunk_size; } // process rest magma_dmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue ); return MAGMA_SUCCESS; }
1eea648cb16a15800ef65e3b8b7547895b3fa611.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017 Rory mitchell */ #include <thrust/binary_search.h> #include <thrust/count.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <hipcub/hipcub.hpp> #include <algorithm> #include <functional> #include <future> #include <numeric> #include "common_hip.cuh" #include "device_helpers_hip.cuh" #include "dmlc/timer.h" #include "gpu_hist_builder_hip.cuh" namespace xgboost { namespace tree { void DeviceGMat::Init(int device_idx, const common::GHistIndexMatrix& gmat, bst_ulong element_begin, bst_ulong element_end, bst_ulong row_begin, bst_ulong row_end, int n_bins) { dh::safe_cuda(hipSetDevice(device_idx)); CHECK(gidx_buffer.size()) << "gidx_buffer must be externally allocated"; CHECK_EQ(row_ptr.size(), (row_end - row_begin) + 1) << "row_ptr must be externally allocated"; common::CompressedBufferWriter cbw(n_bins); std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size()); cbw.Write(host_buffer.data(), gmat.index.begin() + element_begin, gmat.index.begin() + element_end); gidx_buffer = host_buffer; gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), n_bins); // row_ptr thrust::copy(gmat.row_ptr.data() + row_begin, gmat.row_ptr.data() + row_end + 1, row_ptr.tbegin()); // normalise row_ptr size_t start = gmat.row_ptr[row_begin]; thrust::transform(row_ptr.tbegin(), row_ptr.tend(), row_ptr.tbegin(), [=] __device__(size_t val) { return val - start; }); } void DeviceHist::Init(int n_bins_in) { this->n_bins = n_bins_in; CHECK(!data.empty()) << "DeviceHist must be externally allocated"; } void DeviceHist::Reset(int device_idx) { hipSetDevice(device_idx); data.fill(bst_gpair()); } bst_gpair* DeviceHist::GetLevelPtr(int depth) { return data.data() + n_nodes(depth - 1) * n_bins; } int DeviceHist::LevelSize(int depth) { return n_bins * n_nodes_level(depth); } HistBuilder DeviceHist::GetBuilder() { return HistBuilder(data.data(), n_bins); } HistBuilder::HistBuilder(bst_gpair* ptr, int n_bins) : d_hist(ptr), n_bins(n_bins) {} __device__ void HistBuilder::Add(bst_gpair gpair, int gidx, int nidx) const { int hist_idx = nidx * n_bins + gidx; atomicAdd(&(d_hist[hist_idx].grad), gpair.grad); // OPTMARK: This and below // line lead to about 3X // slowdown due to memory // dependency and access // pattern issues. atomicAdd(&(d_hist[hist_idx].hess), gpair.hess); } __device__ bst_gpair HistBuilder::Get(int gidx, int nidx) const { return d_hist[nidx * n_bins + gidx]; } GPUHistBuilder::GPUHistBuilder() : initialised(false), is_dense(false), p_last_fmat_(nullptr), prediction_cache_initialised(false) {} GPUHistBuilder::~GPUHistBuilder() { if (initialised) { for (int d_idx = 0; d_idx < n_devices; ++d_idx) { ncclCommDestroy(comms[d_idx]); dh::safe_cuda(hipSetDevice(dList[d_idx])); dh::safe_cuda(hipStreamDestroy(*(streams[d_idx]))); } for (int num_d = 1; num_d <= n_devices; ++num_d) { // loop over number of devices used for (int d_idx = 0; d_idx < n_devices; ++d_idx) { ncclCommDestroy(find_split_comms[num_d - 1][d_idx]); } } } } void GPUHistBuilder::Init(const TrainParam& param) { CHECK(param.max_depth < 16) << "Tree depth too large."; CHECK(param.grow_policy != TrainParam::kLossGuide) << "Loss guided growth policy not supported. Use CPU algorithm."; this->param = param; CHECK(param.n_gpus != 0) << "Must have at least one device"; int n_devices_all = dh::n_devices_all(param.n_gpus); for (int device_idx = 0; device_idx < n_devices_all; device_idx++) { if (!param.silent) { size_t free_memory = dh::available_memory(device_idx); const int mb_size = 1048576; LOG(CONSOLE) << "Device: [" << device_idx << "] " << dh::device_name(device_idx) << " with " << free_memory / mb_size << " MB available device memory."; } } } void GPUHistBuilder::InitData(const std::vector<bst_gpair>& gpair, DMatrix& fmat, // NOLINT const RegTree& tree) { // set member num_rows and n_devices for rest of GPUHistBuilder members info = &fmat.info(); num_rows = info->num_row; n_devices = dh::n_devices(param.n_gpus, num_rows); if (!initialised) { // set dList member dList.resize(n_devices); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices(); dList[d_idx] = device_idx; } // initialize nccl comms.resize(n_devices); streams.resize(n_devices); dh::safe_nccl(ncclCommInitAll(comms.data(), n_devices, dList.data())); // initialize communicator // (One communicator per // process) // printf("# NCCL: Using devices\n"); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { streams[d_idx] = reinterpret_cast<hipStream_t*>(malloc(sizeof(hipStream_t))); dh::safe_cuda(hipSetDevice(dList[d_idx])); dh::safe_cuda(hipStreamCreate(streams[d_idx])); int cudaDev; int rank; hipDeviceProp_t prop; dh::safe_nccl(ncclCommCuDevice(comms[d_idx], &cudaDev)); dh::safe_nccl(ncclCommUserRank(comms[d_idx], &rank)); dh::safe_cuda(hipGetDeviceProperties(&prop, cudaDev)); // printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev, // prop.pciBusID, prop.name); fflush(stdout); } // local find_split group of comms for each case of reduced number of GPUs // to use find_split_comms.resize( n_devices, std::vector<ncclComm_t>(n_devices)); // TODO(JCM): Excessive, but // ok, and best to do // here instead of // repeatedly for (int num_d = 1; num_d <= n_devices; ++num_d) { // loop over number of devices used dh::safe_nccl(ncclCommInitAll(find_split_comms[num_d - 1].data(), num_d, dList.data())); // initialize communicator // (One communicator per // process) } CHECK(fmat.SingleColBlock()) << "grow_gpu_hist: must have single column " "block. Try setting 'tree_method' " "parameter to 'exact'"; is_dense = info->num_nonzero == info->num_col * info->num_row; hmat_.Init(&fmat, param.max_bin); gmat_.cut = &hmat_; gmat_.Init(&fmat); int n_bins = hmat_.row_ptr.back(); int n_features = hmat_.row_ptr.size() - 1; // deliniate data onto multiple gpus device_row_segments.push_back(0); device_element_segments.push_back(0); bst_uint offset = 0; bst_uint shard_size = ::ceil(static_cast<double>(num_rows) / n_devices); for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; offset += shard_size; offset = ::min(offset, num_rows); device_row_segments.push_back(offset); device_element_segments.push_back(gmat_.row_ptr[offset]); } // Build feature segments std::vector<int> h_feature_segments; for (int node = 0; node < n_nodes_level(param.max_depth - 1); node++) { for (int fidx = 0; fidx < n_features; fidx++) { h_feature_segments.push_back(hmat_.row_ptr[fidx] + node * n_bins); } } h_feature_segments.push_back(n_nodes_level(param.max_depth - 1) * n_bins); // Construct feature map std::vector<int> h_gidx_feature_map(n_bins); for (int fidx = 0; fidx < n_features; fidx++) { for (int i = hmat_.row_ptr[fidx]; i < hmat_.row_ptr[fidx + 1]; i++) { h_gidx_feature_map[i] = fidx; } } int level_max_bins = n_nodes_level(param.max_depth - 1) * n_bins; // allocate unique common data that reside on master device (NOTE: None // currently) // int master_device=dList[0]; // ba.allocate(master_device, ); // allocate vectors across all devices temp_memory.resize(n_devices); hist_vec.resize(n_devices); nodes.resize(n_devices); nodes_temp.resize(n_devices); nodes_child_temp.resize(n_devices); left_child_smallest.resize(n_devices); left_child_smallest_temp.resize(n_devices); feature_flags.resize(n_devices); fidx_min_map.resize(n_devices); feature_segments.resize(n_devices); prediction_cache.resize(n_devices); position.resize(n_devices); position_tmp.resize(n_devices); device_matrix.resize(n_devices); device_gpair.resize(n_devices); gidx_feature_map.resize(n_devices); gidx_fvalue_map.resize(n_devices); int find_split_n_devices = ::pow(2, ::floor(std::log2(n_devices))); find_split_n_devices = ::min(n_nodes_level(param.max_depth), find_split_n_devices); int max_num_nodes_device = n_nodes_level(param.max_depth) / find_split_n_devices; // num_rows_segment: for sharding rows onto gpus for splitting data // num_elements_segment: for sharding rows (of elements) onto gpus for // splitting data // max_num_nodes_device: for sharding nodes onto gpus for split finding // All other variables have full copy on gpu, with copy either being // identical or just current portion (like for histogram) before AllReduce for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; bst_uint num_rows_segment = device_row_segments[d_idx + 1] - device_row_segments[d_idx]; bst_ulong num_elements_segment = device_element_segments[d_idx + 1] - device_element_segments[d_idx]; ba.allocate( device_idx, &(hist_vec[d_idx].data), n_nodes(param.max_depth - 1) * n_bins, &nodes[d_idx], n_nodes(param.max_depth), &nodes_temp[d_idx], max_num_nodes_device, &nodes_child_temp[d_idx], max_num_nodes_device, &left_child_smallest[d_idx], n_nodes(param.max_depth), &left_child_smallest_temp[d_idx], max_num_nodes_device, &feature_flags[d_idx], n_features, // may change but same on all devices &fidx_min_map[d_idx], hmat_.min_val.size(), // constant and same on all devices &feature_segments[d_idx], h_feature_segments.size(), // constant and same on all devices &prediction_cache[d_idx], num_rows_segment, &position[d_idx], num_rows_segment, &position_tmp[d_idx], num_rows_segment, &device_gpair[d_idx], num_rows_segment, &device_matrix[d_idx].gidx_buffer, common::CompressedBufferWriter::CalculateBufferSize( num_elements_segment, n_bins), // constant and same on all devices &device_matrix[d_idx].row_ptr, num_rows_segment + 1, &gidx_feature_map[d_idx], n_bins, // constant and same on all devices &gidx_fvalue_map[d_idx], hmat_.cut.size()); // constant and same on all devices // Copy Host to Device (assumes comes after ba.allocate that sets device) device_matrix[d_idx].Init( device_idx, gmat_, device_element_segments[d_idx], device_element_segments[d_idx + 1], device_row_segments[d_idx], device_row_segments[d_idx + 1], n_bins); gidx_feature_map[d_idx] = h_gidx_feature_map; gidx_fvalue_map[d_idx] = hmat_.cut; feature_segments[d_idx] = h_feature_segments; fidx_min_map[d_idx] = hmat_.min_val; // Initialize, no copy hist_vec[d_idx].Init(n_bins); // init host object prediction_cache[d_idx].fill(0); // init device object (assumes comes // after ba.allocate that sets device) feature_flags[d_idx].fill(1); // init device object (assumes comes after // ba.allocate that sets device) } if (!param.silent) { const int mb_size = 1048576; LOG(CONSOLE) << "Allocated " << ba.size() / mb_size << " MB"; } initialised = true; } // copy or init to do every iteration for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); nodes[d_idx].fill(Node()); nodes_temp[d_idx].fill(Node()); nodes_child_temp[d_idx].fill(Node()); position[d_idx].fill(0); device_gpair[d_idx].copy(gpair.begin() + device_row_segments[d_idx], gpair.begin() + device_row_segments[d_idx + 1]); subsample_gpair(&device_gpair[d_idx], param.subsample, device_row_segments[d_idx]); hist_vec[d_idx].Reset(device_idx); // left_child_smallest and left_child_smallest_temp don't need to be // initialized } dh::synchronize_n_devices(n_devices, dList); p_last_fmat_ = &fmat; } void GPUHistBuilder::BuildHist(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t begin = device_element_segments[d_idx]; size_t end = device_element_segments[d_idx + 1]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; auto d_gidx = device_matrix[d_idx].gidx; auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin(); auto d_position = position[d_idx].data(); auto d_gpair = device_gpair[d_idx].data(); auto d_left_child_smallest = left_child_smallest[d_idx].data(); auto hist_builder = hist_vec[d_idx].GetBuilder(); dh::TransformLbs( device_idx, &temp_memory[d_idx], end - begin, d_row_ptr, row_end - row_begin, [=] __device__(size_t local_idx, int local_ridx) { int nidx = d_position[local_ridx]; // OPTMARK: latency if (!is_active(nidx, depth)) return; // Only increment smallest node bool is_smallest = (d_left_child_smallest[parent_nidx(nidx)] && is_left_child(nidx)) || (!d_left_child_smallest[parent_nidx(nidx)] && !is_left_child(nidx)); if (!is_smallest && depth > 0) return; int gidx = d_gidx[local_idx]; bst_gpair gpair = d_gpair[local_ridx]; hist_builder.Add(gpair, gidx, nidx); // OPTMARK: This is slow, could use // shared memory or cache results // intead of writing to global // memory every time in atomic way. }); } dh::synchronize_n_devices(n_devices, dList); // time.printElapsed("Add Time"); // (in-place) reduce each element of histogram (for only current level) across // multiple gpus // TODO(JCM): use out of place with pre-allocated buffer, but then have to // copy // back on device // fprintf(stderr,"sizeof(bst_gpair)/sizeof(float)=%d\n",sizeof(bst_gpair)/sizeof(float)); for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); dh::safe_nccl(ncclAllReduce( reinterpret_cast<const void*>(hist_vec[d_idx].GetLevelPtr(depth)), reinterpret_cast<void*>(hist_vec[d_idx].GetLevelPtr(depth)), hist_vec[d_idx].LevelSize(depth) * sizeof(bst_gpair) / sizeof(float), ncclFloat, ncclSum, comms[d_idx], *(streams[d_idx]))); } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx]))); } // if no NCCL, then presume only 1 GPU, then already correct // time.printElapsed("Reduce-Add Time"); // Subtraction trick (applied to all devices in same way -- to avoid doing on // master and then Bcast) if (depth > 0) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); auto hist_builder = hist_vec[d_idx].GetBuilder(); auto d_left_child_smallest = left_child_smallest[d_idx].data(); int n_sub_bins = (n_nodes_level(depth) / 2) * hist_builder.n_bins; dh::launch_n(device_idx, n_sub_bins, [=] __device__(int idx) { int nidx = n_nodes(depth - 1) + ((idx / hist_builder.n_bins) * 2); bool left_smallest = d_left_child_smallest[parent_nidx(nidx)]; if (left_smallest) { nidx++; // If left is smallest switch to right child } int gidx = idx % hist_builder.n_bins; bst_gpair parent = hist_builder.Get(gidx, parent_nidx(nidx)); int other_nidx = left_smallest ? nidx - 1 : nidx + 1; bst_gpair other = hist_builder.Get(gidx, other_nidx); hist_builder.Add(parent - other, gidx, nidx); // OPTMARK: This is slow, could use shared // memory or cache results intead of writing to // global memory every time in atomic way. }); } dh::synchronize_n_devices(n_devices, dList); } } template <int BLOCK_THREADS> __global__ void find_split_kernel( const bst_gpair* d_level_hist, int* d_feature_segments, int depth, int n_features, int n_bins, Node* d_nodes, Node* d_nodes_temp, Node* d_nodes_child_temp, int nodes_offset_device, float* d_fidx_min_map, float* d_gidx_fvalue_map, GPUTrainingParam gpu_param, bool* d_left_child_smallest_temp, bool colsample, int* d_feature_flags) { typedef hipcub::KeyValuePair<int, float> ArgMaxT; typedef hipcub::BlockScan<bst_gpair, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> BlockScanT; typedef hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT; typedef hipcub::BlockReduce<bst_gpair, BLOCK_THREADS> SumReduceT; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; struct UninitializedSplit : cub::Uninitialized<Split> {}; struct UninitializedGpair : cub::Uninitialized<bst_gpair> {}; __shared__ UninitializedSplit uninitialized_split; Split& split = uninitialized_split.Alias(); __shared__ UninitializedGpair uninitialized_sum; bst_gpair& shared_sum = uninitialized_sum.Alias(); __shared__ ArgMaxT block_max; __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { split = Split(); } __syncthreads(); // below two are for accessing full-sized node list stored on each device // always one block per node, BLOCK_THREADS threads per block int level_node_idx = blockIdx.x + nodes_offset_device; int node_idx = n_nodes(depth - 1) + level_node_idx; for (int fidx = 0; fidx < n_features; fidx++) { if (colsample && d_feature_flags[fidx] == 0) continue; int begin = d_feature_segments[level_node_idx * n_features + fidx]; int end = d_feature_segments[level_node_idx * n_features + fidx + 1]; int gidx = (begin - (level_node_idx * n_bins)) + threadIdx.x; bool thread_active = threadIdx.x < end - begin; bst_gpair feature_sum = bst_gpair(); for (int reduce_begin = begin; reduce_begin < end; reduce_begin += BLOCK_THREADS) { // Scan histogram bst_gpair bin = thread_active ? d_level_hist[reduce_begin + threadIdx.x] : bst_gpair(); feature_sum += SumReduceT(temp_storage.sum_reduce).Reduce(bin, hipcub::Sum()); } if (threadIdx.x == 0) { shared_sum = feature_sum; } // __syncthreads(); // no need to synch because below there is a Scan GpairCallbackOp prefix_op = GpairCallbackOp(); for (int scan_begin = begin; scan_begin < end; scan_begin += BLOCK_THREADS) { bst_gpair bin = thread_active ? d_level_hist[scan_begin + threadIdx.x] : bst_gpair(); BlockScanT(temp_storage.scan) .ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op); // Calculate gain bst_gpair parent_sum = d_nodes[node_idx].sum_gradients; float parent_gain = d_nodes[node_idx].root_gain; bst_gpair missing = parent_sum - shared_sum; bool missing_left; float gain = thread_active ? loss_chg_missing(bin, missing, parent_sum, parent_gain, gpu_param, missing_left) : -FLT_MAX; __syncthreads(); // Find thread with best gain ArgMaxT tuple(threadIdx.x, gain); ArgMaxT best = MaxReduceT(temp_storage.max_reduce).Reduce(tuple, hipcub::ArgMax()); if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { float fvalue; if (threadIdx.x == 0 && begin == scan_begin) { // check at start of first tile fvalue = d_fidx_min_map[fidx]; } else { fvalue = d_gidx_fvalue_map[gidx - 1]; } bst_gpair left = missing_left ? bin + missing : bin; bst_gpair right = parent_sum - left; split.Update(gain, missing_left, fvalue, fidx, left, right, gpu_param); } __syncthreads(); } // end scan } // end over features // Create node if (threadIdx.x == 0) { if (d_nodes_temp == NULL) { d_nodes[node_idx].split = split; } else { d_nodes_temp[blockIdx.x] = d_nodes[node_idx]; // first copy node values d_nodes_temp[blockIdx.x].split = split; // now assign split } // if (depth == 0) { // split.Print(); // } Node *Nodeleft, *Noderight; bool* left_child_smallest; if (d_nodes_temp == NULL) { Nodeleft = &d_nodes[left_child_nidx(node_idx)]; Noderight = &d_nodes[right_child_nidx(node_idx)]; left_child_smallest = &d_left_child_smallest_temp[node_idx]; // NOTE: not per level, even // though _temp variable name } else { Nodeleft = &d_nodes_child_temp[blockIdx.x * 2 + 0]; Noderight = &d_nodes_child_temp[blockIdx.x * 2 + 1]; left_child_smallest = &d_left_child_smallest_temp[blockIdx.x]; } *Nodeleft = Node(split.left_sum, CalcGain(gpu_param, split.left_sum.grad, split.left_sum.hess), CalcWeight(gpu_param, split.left_sum.grad, split.left_sum.hess)); *Noderight = Node(split.right_sum, CalcGain(gpu_param, split.right_sum.grad, split.right_sum.hess), CalcWeight(gpu_param, split.right_sum.grad, split.right_sum.hess)); // Record smallest node if (split.left_sum.hess <= split.right_sum.hess) { *left_child_smallest = true; } else { *left_child_smallest = false; } } } #define MIN_BLOCK_THREADS 32 #define CHUNK_BLOCK_THREADS 32 // MAX_BLOCK_THREADS of 1024 is hard-coded maximum block size due // to CUDA compatibility 35 and above requirement // for Maximum number of threads per block #define MAX_BLOCK_THREADS 1024 void GPUHistBuilder::FindSplit(int depth) { // Specialised based on max_bins this->FindSplitSpecialize<MIN_BLOCK_THREADS>(depth); } template <> void GPUHistBuilder::FindSplitSpecialize<MAX_BLOCK_THREADS>(int depth) { LaunchFindSplit<MAX_BLOCK_THREADS>(depth); } template <int BLOCK_THREADS> void GPUHistBuilder::FindSplitSpecialize(int depth) { if (param.max_bin <= BLOCK_THREADS) { LaunchFindSplit<BLOCK_THREADS>(depth); } else { this->FindSplitSpecialize<BLOCK_THREADS + CHUNK_BLOCK_THREADS>(depth); } } template <int BLOCK_THREADS> void GPUHistBuilder::LaunchFindSplit(int depth) { bool colsample = param.colsample_bylevel < 1.0 || param.colsample_bytree < 1.0; int dosimuljob = 1; int simuljob = 1; // whether to do job on single GPU and broadcast (0) or to // do same job on each GPU (1) (could make user parameter, // but too fine-grained maybe) int findsplit_shardongpus = 0; // too expensive generally, disable for now if (findsplit_shardongpus) { dosimuljob = 0; // use power of 2 for split finder because nodes are power of 2 (broadcast // result to remaining devices) int find_split_n_devices = ::pow(2, ::floor(std::log2(n_devices))); find_split_n_devices = ::min(n_nodes_level(depth), find_split_n_devices); int num_nodes_device = n_nodes_level(depth) / find_split_n_devices; int num_nodes_child_device = n_nodes_level(depth + 1) / find_split_n_devices; const int GRID_SIZE = num_nodes_device; // NOTE: No need to scatter before gather as all devices have same copy of // nodes, and within find_split_kernel() nodes_temp is given values from // nodes // for all nodes (split among devices) find best split per node for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); int nodes_offset_device = d_idx * num_nodes_device; hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0, (const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)), feature_segments[d_idx].data(), depth, (info->num_col), (hmat_.row_ptr.back()), nodes[d_idx].data(), nodes_temp[d_idx].data(), nodes_child_temp[d_idx].data(), nodes_offset_device, fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param), left_child_smallest_temp[d_idx].data(), colsample, feature_flags[d_idx].data()); } // nccl only on devices that did split dh::synchronize_n_devices(find_split_n_devices, dList); for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); dh::safe_nccl(ncclAllGather( reinterpret_cast<const void*>(nodes_temp[d_idx].data()), num_nodes_device * sizeof(Node) / sizeof(char), ncclChar, reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)), find_split_comms[find_split_n_devices - 1][d_idx], *(streams[d_idx]))); if (depth != param.max_depth) { // don't copy over children nodes if no more nodes dh::safe_nccl(ncclAllGather( reinterpret_cast<const void*>(nodes_child_temp[d_idx].data()), num_nodes_child_device * sizeof(Node) / sizeof(char), ncclChar, reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)), find_split_comms[find_split_n_devices - 1][d_idx], *(streams[d_idx]))); // Note offset by n_nodes(depth) // for recvbuff for child nodes } dh::safe_nccl(ncclAllGather( reinterpret_cast<const void*>(left_child_smallest_temp[d_idx].data()), num_nodes_device * sizeof(bool) / sizeof(char), ncclChar, reinterpret_cast<void*>(left_child_smallest[d_idx].data() + n_nodes(depth - 1)), find_split_comms[find_split_n_devices - 1][d_idx], *(streams[d_idx]))); } for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx]))); } if (n_devices > find_split_n_devices && n_devices > 1) { // if n_devices==1, no need to Bcast // if find_split_n_devices==1, this is just a copy operation, else it // copies // from master to all nodes in case extra devices not involved in split for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); int master_device = dList[0]; dh::safe_nccl(ncclBcast( reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)), n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); if (depth != param.max_depth) { // don't copy over children nodes if no // more nodes dh::safe_nccl(ncclBcast( reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)), n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); } dh::safe_nccl(ncclBcast( reinterpret_cast<void*>(left_child_smallest[d_idx].data() + n_nodes(depth - 1)), n_nodes_level(depth) * sizeof(bool) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx]))); } } } else if (simuljob == 0) { dosimuljob = 0; int num_nodes_device = n_nodes_level(depth); const int GRID_SIZE = num_nodes_device; int d_idx = 0; int master_device = dList[d_idx]; int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); int nodes_offset_device = d_idx * num_nodes_device; hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0, (const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)), feature_segments[d_idx].data(), depth, (info->num_col), (hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL, nodes_offset_device, fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param), left_child_smallest[d_idx].data(), colsample, feature_flags[d_idx].data()); // broadcast result for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); dh::safe_nccl(ncclBcast( reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)), n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); if (depth != param.max_depth) { // don't copy over children nodes if no more nodes dh::safe_nccl(ncclBcast( reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)), n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); } dh::safe_nccl( ncclBcast(reinterpret_cast<void*>(left_child_smallest[d_idx].data() + n_nodes(depth - 1)), n_nodes_level(depth) * sizeof(bool) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx]))); } } else { dosimuljob = 1; } if (dosimuljob) { // if no NCCL or simuljob==1, do this int num_nodes_device = n_nodes_level(depth); const int GRID_SIZE = num_nodes_device; // all GPUs do same work for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); int nodes_offset_device = 0; hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0, (const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)), feature_segments[d_idx].data(), depth, (info->num_col), (hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL, nodes_offset_device, fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param), left_child_smallest[d_idx].data(), colsample, feature_flags[d_idx].data()); } } // NOTE: No need to syncrhonize with host as all above pure P2P ops or // on-device ops } void GPUHistBuilder::InitFirstNode(const std::vector<bst_gpair>& gpair) { // Perform asynchronous reduction on each gpu std::vector<bst_gpair> device_sums(n_devices); #pragma omp parallel for num_threads(n_devices) for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); auto begin = device_gpair[d_idx].tbegin(); auto end = device_gpair[d_idx].tend(); bst_gpair init = bst_gpair(); auto binary_op = thrust::plus<bst_gpair>(); device_sums[d_idx] = thrust::reduce(begin, end, init, binary_op); } bst_gpair sum = bst_gpair(); for (int d_idx = 0; d_idx < n_devices; d_idx++) { sum += device_sums[d_idx]; } // Setup first node so all devices have same first node (here done same on all // devices, or could have done one device and Bcast if worried about exact // precision issues) for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_nodes = nodes[d_idx].data(); auto gpu_param = GPUTrainingParam(param); dh::launch_n(device_idx, 1, [=] __device__(int idx) { bst_gpair sum_gradients = sum; d_nodes[idx] = Node(sum_gradients, CalcGain(gpu_param, sum_gradients.grad, sum_gradients.hess), CalcWeight(gpu_param, sum_gradients.grad, sum_gradients.hess)); }); } // synch all devices to host before moving on (No, can avoid because BuildHist // calls another kernel in default stream) // dh::synchronize_n_devices(n_devices, dList); } void GPUHistBuilder::UpdatePosition(int depth) { if (is_dense) { this->UpdatePositionDense(depth); } else { this->UpdatePositionSparse(depth); } } void GPUHistBuilder::UpdatePositionDense(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_position = position[d_idx].data(); Node* d_nodes = nodes[d_idx].data(); auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data(); auto d_gidx = device_matrix[d_idx].gidx; int n_columns = info->num_col; size_t begin = device_row_segments[d_idx]; size_t end = device_row_segments[d_idx + 1]; dh::launch_n(device_idx, end - begin, [=] __device__(size_t local_idx) { int pos = d_position[local_idx]; if (!is_active(pos, depth)) { return; } Node node = d_nodes[pos]; if (node.IsLeaf()) { return; } int gidx = d_gidx[local_idx * static_cast<size_t>(n_columns) + static_cast<size_t>(node.split.findex)]; float fvalue = d_gidx_fvalue_map[gidx]; if (fvalue <= node.split.fvalue) { d_position[local_idx] = left_child_nidx(pos); } else { d_position[local_idx] = right_child_nidx(pos); } }); } dh::synchronize_n_devices(n_devices, dList); // dh::safe_cuda(hipDeviceSynchronize()); } void GPUHistBuilder::UpdatePositionSparse(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_position = position[d_idx].data(); auto d_position_tmp = position_tmp[d_idx].data(); Node* d_nodes = nodes[d_idx].data(); auto d_gidx_feature_map = gidx_feature_map[d_idx].data(); auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data(); auto d_gidx = device_matrix[d_idx].gidx; auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin(); size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; size_t element_begin = device_element_segments[d_idx]; size_t element_end = device_element_segments[d_idx + 1]; // Update missing direction dh::launch_n(device_idx, row_end - row_begin, [=] __device__(int local_idx) { int pos = d_position[local_idx]; if (!is_active(pos, depth)) { d_position_tmp[local_idx] = pos; return; } Node node = d_nodes[pos]; if (node.IsLeaf()) { d_position_tmp[local_idx] = pos; return; } else if (node.split.missing_left) { d_position_tmp[local_idx] = pos * 2 + 1; } else { d_position_tmp[local_idx] = pos * 2 + 2; } }); // Update node based on fvalue where exists // OPTMARK: This kernel is very inefficient for both compute and memory, // dominated by memory dependency / access patterns dh::TransformLbs( device_idx, &temp_memory[d_idx], element_end - element_begin, d_row_ptr, row_end - row_begin, [=] __device__(size_t local_idx, int local_ridx) { int pos = d_position[local_ridx]; if (!is_active(pos, depth)) { return; } Node node = d_nodes[pos]; if (node.IsLeaf()) { return; } int gidx = d_gidx[local_idx]; int findex = d_gidx_feature_map[gidx]; // OPTMARK: slowest global // memory access, maybe setup // position, gidx, etc. as // combined structure? if (findex == node.split.findex) { float fvalue = d_gidx_fvalue_map[gidx]; if (fvalue <= node.split.fvalue) { d_position_tmp[local_ridx] = left_child_nidx(pos); } else { d_position_tmp[local_ridx] = right_child_nidx(pos); } } }); position[d_idx] = position_tmp[d_idx]; } dh::synchronize_n_devices(n_devices, dList); } void GPUHistBuilder::ColSampleTree() { if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return; feature_set_tree.resize(info->num_col); std::iota(feature_set_tree.begin(), feature_set_tree.end(), 0); feature_set_tree = col_sample(feature_set_tree, param.colsample_bytree); } void GPUHistBuilder::ColSampleLevel() { if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return; feature_set_level.resize(feature_set_tree.size()); feature_set_level = col_sample(feature_set_tree, param.colsample_bylevel); std::vector<int> h_feature_flags(info->num_col, 0); for (auto fidx : feature_set_level) { h_feature_flags[fidx] = 1; } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(hipSetDevice(device_idx)); feature_flags[d_idx] = h_feature_flags; } dh::synchronize_n_devices(n_devices, dList); } bool GPUHistBuilder::UpdatePredictionCache( const DMatrix* data, std::vector<bst_float>* p_out_preds) { std::vector<bst_float>& out_preds = *p_out_preds; if (nodes.empty() || !p_last_fmat_ || data != p_last_fmat_) { return false; } if (!prediction_cache_initialised) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; prediction_cache[d_idx].copy(out_preds.begin() + row_begin, out_preds.begin() + row_end); } prediction_cache_initialised = true; } dh::synchronize_n_devices(n_devices, dList); float eps = param.learning_rate; for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; auto d_nodes = nodes[d_idx].data(); auto d_position = position[d_idx].data(); auto d_prediction_cache = prediction_cache[d_idx].data(); dh::launch_n(device_idx, prediction_cache[d_idx].size(), [=] __device__(int local_idx) { int pos = d_position[local_idx]; d_prediction_cache[local_idx] += d_nodes[pos].weight * eps; }); thrust::copy(prediction_cache[d_idx].tbegin(), prediction_cache[d_idx].tend(), &out_preds[row_begin]); } dh::synchronize_n_devices(n_devices, dList); return true; } void GPUHistBuilder::Update(const std::vector<bst_gpair>& gpair, DMatrix* p_fmat, RegTree* p_tree) { this->InitData(gpair, *p_fmat, *p_tree); this->InitFirstNode(gpair); this->ColSampleTree(); for (int depth = 0; depth < param.max_depth; depth++) { this->ColSampleLevel(); this->BuildHist(depth); this->FindSplit(depth); this->UpdatePosition(depth); } // done with multi-GPU, pass back result from master to tree on host int master_device = dList[0]; dh::safe_cuda(hipSetDevice(master_device)); dense2sparse_tree(p_tree, nodes[0].tbegin(), nodes[0].tend(), param); } } // namespace tree } // namespace xgboost
1eea648cb16a15800ef65e3b8b7547895b3fa611.cu
/*! * Copyright 2017 Rory mitchell */ #include <thrust/binary_search.h> #include <thrust/count.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <cub/cub.cuh> #include <algorithm> #include <functional> #include <future> #include <numeric> #include "common.cuh" #include "device_helpers.cuh" #include "dmlc/timer.h" #include "gpu_hist_builder.cuh" namespace xgboost { namespace tree { void DeviceGMat::Init(int device_idx, const common::GHistIndexMatrix& gmat, bst_ulong element_begin, bst_ulong element_end, bst_ulong row_begin, bst_ulong row_end, int n_bins) { dh::safe_cuda(cudaSetDevice(device_idx)); CHECK(gidx_buffer.size()) << "gidx_buffer must be externally allocated"; CHECK_EQ(row_ptr.size(), (row_end - row_begin) + 1) << "row_ptr must be externally allocated"; common::CompressedBufferWriter cbw(n_bins); std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size()); cbw.Write(host_buffer.data(), gmat.index.begin() + element_begin, gmat.index.begin() + element_end); gidx_buffer = host_buffer; gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), n_bins); // row_ptr thrust::copy(gmat.row_ptr.data() + row_begin, gmat.row_ptr.data() + row_end + 1, row_ptr.tbegin()); // normalise row_ptr size_t start = gmat.row_ptr[row_begin]; thrust::transform(row_ptr.tbegin(), row_ptr.tend(), row_ptr.tbegin(), [=] __device__(size_t val) { return val - start; }); } void DeviceHist::Init(int n_bins_in) { this->n_bins = n_bins_in; CHECK(!data.empty()) << "DeviceHist must be externally allocated"; } void DeviceHist::Reset(int device_idx) { cudaSetDevice(device_idx); data.fill(bst_gpair()); } bst_gpair* DeviceHist::GetLevelPtr(int depth) { return data.data() + n_nodes(depth - 1) * n_bins; } int DeviceHist::LevelSize(int depth) { return n_bins * n_nodes_level(depth); } HistBuilder DeviceHist::GetBuilder() { return HistBuilder(data.data(), n_bins); } HistBuilder::HistBuilder(bst_gpair* ptr, int n_bins) : d_hist(ptr), n_bins(n_bins) {} __device__ void HistBuilder::Add(bst_gpair gpair, int gidx, int nidx) const { int hist_idx = nidx * n_bins + gidx; atomicAdd(&(d_hist[hist_idx].grad), gpair.grad); // OPTMARK: This and below // line lead to about 3X // slowdown due to memory // dependency and access // pattern issues. atomicAdd(&(d_hist[hist_idx].hess), gpair.hess); } __device__ bst_gpair HistBuilder::Get(int gidx, int nidx) const { return d_hist[nidx * n_bins + gidx]; } GPUHistBuilder::GPUHistBuilder() : initialised(false), is_dense(false), p_last_fmat_(nullptr), prediction_cache_initialised(false) {} GPUHistBuilder::~GPUHistBuilder() { if (initialised) { for (int d_idx = 0; d_idx < n_devices; ++d_idx) { ncclCommDestroy(comms[d_idx]); dh::safe_cuda(cudaSetDevice(dList[d_idx])); dh::safe_cuda(cudaStreamDestroy(*(streams[d_idx]))); } for (int num_d = 1; num_d <= n_devices; ++num_d) { // loop over number of devices used for (int d_idx = 0; d_idx < n_devices; ++d_idx) { ncclCommDestroy(find_split_comms[num_d - 1][d_idx]); } } } } void GPUHistBuilder::Init(const TrainParam& param) { CHECK(param.max_depth < 16) << "Tree depth too large."; CHECK(param.grow_policy != TrainParam::kLossGuide) << "Loss guided growth policy not supported. Use CPU algorithm."; this->param = param; CHECK(param.n_gpus != 0) << "Must have at least one device"; int n_devices_all = dh::n_devices_all(param.n_gpus); for (int device_idx = 0; device_idx < n_devices_all; device_idx++) { if (!param.silent) { size_t free_memory = dh::available_memory(device_idx); const int mb_size = 1048576; LOG(CONSOLE) << "Device: [" << device_idx << "] " << dh::device_name(device_idx) << " with " << free_memory / mb_size << " MB available device memory."; } } } void GPUHistBuilder::InitData(const std::vector<bst_gpair>& gpair, DMatrix& fmat, // NOLINT const RegTree& tree) { // set member num_rows and n_devices for rest of GPUHistBuilder members info = &fmat.info(); num_rows = info->num_row; n_devices = dh::n_devices(param.n_gpus, num_rows); if (!initialised) { // set dList member dList.resize(n_devices); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices(); dList[d_idx] = device_idx; } // initialize nccl comms.resize(n_devices); streams.resize(n_devices); dh::safe_nccl(ncclCommInitAll(comms.data(), n_devices, dList.data())); // initialize communicator // (One communicator per // process) // printf("# NCCL: Using devices\n"); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { streams[d_idx] = reinterpret_cast<cudaStream_t*>(malloc(sizeof(cudaStream_t))); dh::safe_cuda(cudaSetDevice(dList[d_idx])); dh::safe_cuda(cudaStreamCreate(streams[d_idx])); int cudaDev; int rank; cudaDeviceProp prop; dh::safe_nccl(ncclCommCuDevice(comms[d_idx], &cudaDev)); dh::safe_nccl(ncclCommUserRank(comms[d_idx], &rank)); dh::safe_cuda(cudaGetDeviceProperties(&prop, cudaDev)); // printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev, // prop.pciBusID, prop.name); fflush(stdout); } // local find_split group of comms for each case of reduced number of GPUs // to use find_split_comms.resize( n_devices, std::vector<ncclComm_t>(n_devices)); // TODO(JCM): Excessive, but // ok, and best to do // here instead of // repeatedly for (int num_d = 1; num_d <= n_devices; ++num_d) { // loop over number of devices used dh::safe_nccl(ncclCommInitAll(find_split_comms[num_d - 1].data(), num_d, dList.data())); // initialize communicator // (One communicator per // process) } CHECK(fmat.SingleColBlock()) << "grow_gpu_hist: must have single column " "block. Try setting 'tree_method' " "parameter to 'exact'"; is_dense = info->num_nonzero == info->num_col * info->num_row; hmat_.Init(&fmat, param.max_bin); gmat_.cut = &hmat_; gmat_.Init(&fmat); int n_bins = hmat_.row_ptr.back(); int n_features = hmat_.row_ptr.size() - 1; // deliniate data onto multiple gpus device_row_segments.push_back(0); device_element_segments.push_back(0); bst_uint offset = 0; bst_uint shard_size = std::ceil(static_cast<double>(num_rows) / n_devices); for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; offset += shard_size; offset = std::min(offset, num_rows); device_row_segments.push_back(offset); device_element_segments.push_back(gmat_.row_ptr[offset]); } // Build feature segments std::vector<int> h_feature_segments; for (int node = 0; node < n_nodes_level(param.max_depth - 1); node++) { for (int fidx = 0; fidx < n_features; fidx++) { h_feature_segments.push_back(hmat_.row_ptr[fidx] + node * n_bins); } } h_feature_segments.push_back(n_nodes_level(param.max_depth - 1) * n_bins); // Construct feature map std::vector<int> h_gidx_feature_map(n_bins); for (int fidx = 0; fidx < n_features; fidx++) { for (int i = hmat_.row_ptr[fidx]; i < hmat_.row_ptr[fidx + 1]; i++) { h_gidx_feature_map[i] = fidx; } } int level_max_bins = n_nodes_level(param.max_depth - 1) * n_bins; // allocate unique common data that reside on master device (NOTE: None // currently) // int master_device=dList[0]; // ba.allocate(master_device, ); // allocate vectors across all devices temp_memory.resize(n_devices); hist_vec.resize(n_devices); nodes.resize(n_devices); nodes_temp.resize(n_devices); nodes_child_temp.resize(n_devices); left_child_smallest.resize(n_devices); left_child_smallest_temp.resize(n_devices); feature_flags.resize(n_devices); fidx_min_map.resize(n_devices); feature_segments.resize(n_devices); prediction_cache.resize(n_devices); position.resize(n_devices); position_tmp.resize(n_devices); device_matrix.resize(n_devices); device_gpair.resize(n_devices); gidx_feature_map.resize(n_devices); gidx_fvalue_map.resize(n_devices); int find_split_n_devices = std::pow(2, std::floor(std::log2(n_devices))); find_split_n_devices = std::min(n_nodes_level(param.max_depth), find_split_n_devices); int max_num_nodes_device = n_nodes_level(param.max_depth) / find_split_n_devices; // num_rows_segment: for sharding rows onto gpus for splitting data // num_elements_segment: for sharding rows (of elements) onto gpus for // splitting data // max_num_nodes_device: for sharding nodes onto gpus for split finding // All other variables have full copy on gpu, with copy either being // identical or just current portion (like for histogram) before AllReduce for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; bst_uint num_rows_segment = device_row_segments[d_idx + 1] - device_row_segments[d_idx]; bst_ulong num_elements_segment = device_element_segments[d_idx + 1] - device_element_segments[d_idx]; ba.allocate( device_idx, &(hist_vec[d_idx].data), n_nodes(param.max_depth - 1) * n_bins, &nodes[d_idx], n_nodes(param.max_depth), &nodes_temp[d_idx], max_num_nodes_device, &nodes_child_temp[d_idx], max_num_nodes_device, &left_child_smallest[d_idx], n_nodes(param.max_depth), &left_child_smallest_temp[d_idx], max_num_nodes_device, &feature_flags[d_idx], n_features, // may change but same on all devices &fidx_min_map[d_idx], hmat_.min_val.size(), // constant and same on all devices &feature_segments[d_idx], h_feature_segments.size(), // constant and same on all devices &prediction_cache[d_idx], num_rows_segment, &position[d_idx], num_rows_segment, &position_tmp[d_idx], num_rows_segment, &device_gpair[d_idx], num_rows_segment, &device_matrix[d_idx].gidx_buffer, common::CompressedBufferWriter::CalculateBufferSize( num_elements_segment, n_bins), // constant and same on all devices &device_matrix[d_idx].row_ptr, num_rows_segment + 1, &gidx_feature_map[d_idx], n_bins, // constant and same on all devices &gidx_fvalue_map[d_idx], hmat_.cut.size()); // constant and same on all devices // Copy Host to Device (assumes comes after ba.allocate that sets device) device_matrix[d_idx].Init( device_idx, gmat_, device_element_segments[d_idx], device_element_segments[d_idx + 1], device_row_segments[d_idx], device_row_segments[d_idx + 1], n_bins); gidx_feature_map[d_idx] = h_gidx_feature_map; gidx_fvalue_map[d_idx] = hmat_.cut; feature_segments[d_idx] = h_feature_segments; fidx_min_map[d_idx] = hmat_.min_val; // Initialize, no copy hist_vec[d_idx].Init(n_bins); // init host object prediction_cache[d_idx].fill(0); // init device object (assumes comes // after ba.allocate that sets device) feature_flags[d_idx].fill(1); // init device object (assumes comes after // ba.allocate that sets device) } if (!param.silent) { const int mb_size = 1048576; LOG(CONSOLE) << "Allocated " << ba.size() / mb_size << " MB"; } initialised = true; } // copy or init to do every iteration for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); nodes[d_idx].fill(Node()); nodes_temp[d_idx].fill(Node()); nodes_child_temp[d_idx].fill(Node()); position[d_idx].fill(0); device_gpair[d_idx].copy(gpair.begin() + device_row_segments[d_idx], gpair.begin() + device_row_segments[d_idx + 1]); subsample_gpair(&device_gpair[d_idx], param.subsample, device_row_segments[d_idx]); hist_vec[d_idx].Reset(device_idx); // left_child_smallest and left_child_smallest_temp don't need to be // initialized } dh::synchronize_n_devices(n_devices, dList); p_last_fmat_ = &fmat; } void GPUHistBuilder::BuildHist(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t begin = device_element_segments[d_idx]; size_t end = device_element_segments[d_idx + 1]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; auto d_gidx = device_matrix[d_idx].gidx; auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin(); auto d_position = position[d_idx].data(); auto d_gpair = device_gpair[d_idx].data(); auto d_left_child_smallest = left_child_smallest[d_idx].data(); auto hist_builder = hist_vec[d_idx].GetBuilder(); dh::TransformLbs( device_idx, &temp_memory[d_idx], end - begin, d_row_ptr, row_end - row_begin, [=] __device__(size_t local_idx, int local_ridx) { int nidx = d_position[local_ridx]; // OPTMARK: latency if (!is_active(nidx, depth)) return; // Only increment smallest node bool is_smallest = (d_left_child_smallest[parent_nidx(nidx)] && is_left_child(nidx)) || (!d_left_child_smallest[parent_nidx(nidx)] && !is_left_child(nidx)); if (!is_smallest && depth > 0) return; int gidx = d_gidx[local_idx]; bst_gpair gpair = d_gpair[local_ridx]; hist_builder.Add(gpair, gidx, nidx); // OPTMARK: This is slow, could use // shared memory or cache results // intead of writing to global // memory every time in atomic way. }); } dh::synchronize_n_devices(n_devices, dList); // time.printElapsed("Add Time"); // (in-place) reduce each element of histogram (for only current level) across // multiple gpus // TODO(JCM): use out of place with pre-allocated buffer, but then have to // copy // back on device // fprintf(stderr,"sizeof(bst_gpair)/sizeof(float)=%d\n",sizeof(bst_gpair)/sizeof(float)); for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); dh::safe_nccl(ncclAllReduce( reinterpret_cast<const void*>(hist_vec[d_idx].GetLevelPtr(depth)), reinterpret_cast<void*>(hist_vec[d_idx].GetLevelPtr(depth)), hist_vec[d_idx].LevelSize(depth) * sizeof(bst_gpair) / sizeof(float), ncclFloat, ncclSum, comms[d_idx], *(streams[d_idx]))); } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx]))); } // if no NCCL, then presume only 1 GPU, then already correct // time.printElapsed("Reduce-Add Time"); // Subtraction trick (applied to all devices in same way -- to avoid doing on // master and then Bcast) if (depth > 0) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); auto hist_builder = hist_vec[d_idx].GetBuilder(); auto d_left_child_smallest = left_child_smallest[d_idx].data(); int n_sub_bins = (n_nodes_level(depth) / 2) * hist_builder.n_bins; dh::launch_n(device_idx, n_sub_bins, [=] __device__(int idx) { int nidx = n_nodes(depth - 1) + ((idx / hist_builder.n_bins) * 2); bool left_smallest = d_left_child_smallest[parent_nidx(nidx)]; if (left_smallest) { nidx++; // If left is smallest switch to right child } int gidx = idx % hist_builder.n_bins; bst_gpair parent = hist_builder.Get(gidx, parent_nidx(nidx)); int other_nidx = left_smallest ? nidx - 1 : nidx + 1; bst_gpair other = hist_builder.Get(gidx, other_nidx); hist_builder.Add(parent - other, gidx, nidx); // OPTMARK: This is slow, could use shared // memory or cache results intead of writing to // global memory every time in atomic way. }); } dh::synchronize_n_devices(n_devices, dList); } } template <int BLOCK_THREADS> __global__ void find_split_kernel( const bst_gpair* d_level_hist, int* d_feature_segments, int depth, int n_features, int n_bins, Node* d_nodes, Node* d_nodes_temp, Node* d_nodes_child_temp, int nodes_offset_device, float* d_fidx_min_map, float* d_gidx_fvalue_map, GPUTrainingParam gpu_param, bool* d_left_child_smallest_temp, bool colsample, int* d_feature_flags) { typedef cub::KeyValuePair<int, float> ArgMaxT; typedef cub::BlockScan<bst_gpair, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> BlockScanT; typedef cub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT; typedef cub::BlockReduce<bst_gpair, BLOCK_THREADS> SumReduceT; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; struct UninitializedSplit : cub::Uninitialized<Split> {}; struct UninitializedGpair : cub::Uninitialized<bst_gpair> {}; __shared__ UninitializedSplit uninitialized_split; Split& split = uninitialized_split.Alias(); __shared__ UninitializedGpair uninitialized_sum; bst_gpair& shared_sum = uninitialized_sum.Alias(); __shared__ ArgMaxT block_max; __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { split = Split(); } __syncthreads(); // below two are for accessing full-sized node list stored on each device // always one block per node, BLOCK_THREADS threads per block int level_node_idx = blockIdx.x + nodes_offset_device; int node_idx = n_nodes(depth - 1) + level_node_idx; for (int fidx = 0; fidx < n_features; fidx++) { if (colsample && d_feature_flags[fidx] == 0) continue; int begin = d_feature_segments[level_node_idx * n_features + fidx]; int end = d_feature_segments[level_node_idx * n_features + fidx + 1]; int gidx = (begin - (level_node_idx * n_bins)) + threadIdx.x; bool thread_active = threadIdx.x < end - begin; bst_gpair feature_sum = bst_gpair(); for (int reduce_begin = begin; reduce_begin < end; reduce_begin += BLOCK_THREADS) { // Scan histogram bst_gpair bin = thread_active ? d_level_hist[reduce_begin + threadIdx.x] : bst_gpair(); feature_sum += SumReduceT(temp_storage.sum_reduce).Reduce(bin, cub::Sum()); } if (threadIdx.x == 0) { shared_sum = feature_sum; } // __syncthreads(); // no need to synch because below there is a Scan GpairCallbackOp prefix_op = GpairCallbackOp(); for (int scan_begin = begin; scan_begin < end; scan_begin += BLOCK_THREADS) { bst_gpair bin = thread_active ? d_level_hist[scan_begin + threadIdx.x] : bst_gpair(); BlockScanT(temp_storage.scan) .ExclusiveScan(bin, bin, cub::Sum(), prefix_op); // Calculate gain bst_gpair parent_sum = d_nodes[node_idx].sum_gradients; float parent_gain = d_nodes[node_idx].root_gain; bst_gpair missing = parent_sum - shared_sum; bool missing_left; float gain = thread_active ? loss_chg_missing(bin, missing, parent_sum, parent_gain, gpu_param, missing_left) : -FLT_MAX; __syncthreads(); // Find thread with best gain ArgMaxT tuple(threadIdx.x, gain); ArgMaxT best = MaxReduceT(temp_storage.max_reduce).Reduce(tuple, cub::ArgMax()); if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { float fvalue; if (threadIdx.x == 0 && begin == scan_begin) { // check at start of first tile fvalue = d_fidx_min_map[fidx]; } else { fvalue = d_gidx_fvalue_map[gidx - 1]; } bst_gpair left = missing_left ? bin + missing : bin; bst_gpair right = parent_sum - left; split.Update(gain, missing_left, fvalue, fidx, left, right, gpu_param); } __syncthreads(); } // end scan } // end over features // Create node if (threadIdx.x == 0) { if (d_nodes_temp == NULL) { d_nodes[node_idx].split = split; } else { d_nodes_temp[blockIdx.x] = d_nodes[node_idx]; // first copy node values d_nodes_temp[blockIdx.x].split = split; // now assign split } // if (depth == 0) { // split.Print(); // } Node *Nodeleft, *Noderight; bool* left_child_smallest; if (d_nodes_temp == NULL) { Nodeleft = &d_nodes[left_child_nidx(node_idx)]; Noderight = &d_nodes[right_child_nidx(node_idx)]; left_child_smallest = &d_left_child_smallest_temp[node_idx]; // NOTE: not per level, even // though _temp variable name } else { Nodeleft = &d_nodes_child_temp[blockIdx.x * 2 + 0]; Noderight = &d_nodes_child_temp[blockIdx.x * 2 + 1]; left_child_smallest = &d_left_child_smallest_temp[blockIdx.x]; } *Nodeleft = Node(split.left_sum, CalcGain(gpu_param, split.left_sum.grad, split.left_sum.hess), CalcWeight(gpu_param, split.left_sum.grad, split.left_sum.hess)); *Noderight = Node(split.right_sum, CalcGain(gpu_param, split.right_sum.grad, split.right_sum.hess), CalcWeight(gpu_param, split.right_sum.grad, split.right_sum.hess)); // Record smallest node if (split.left_sum.hess <= split.right_sum.hess) { *left_child_smallest = true; } else { *left_child_smallest = false; } } } #define MIN_BLOCK_THREADS 32 #define CHUNK_BLOCK_THREADS 32 // MAX_BLOCK_THREADS of 1024 is hard-coded maximum block size due // to CUDA compatibility 35 and above requirement // for Maximum number of threads per block #define MAX_BLOCK_THREADS 1024 void GPUHistBuilder::FindSplit(int depth) { // Specialised based on max_bins this->FindSplitSpecialize<MIN_BLOCK_THREADS>(depth); } template <> void GPUHistBuilder::FindSplitSpecialize<MAX_BLOCK_THREADS>(int depth) { LaunchFindSplit<MAX_BLOCK_THREADS>(depth); } template <int BLOCK_THREADS> void GPUHistBuilder::FindSplitSpecialize(int depth) { if (param.max_bin <= BLOCK_THREADS) { LaunchFindSplit<BLOCK_THREADS>(depth); } else { this->FindSplitSpecialize<BLOCK_THREADS + CHUNK_BLOCK_THREADS>(depth); } } template <int BLOCK_THREADS> void GPUHistBuilder::LaunchFindSplit(int depth) { bool colsample = param.colsample_bylevel < 1.0 || param.colsample_bytree < 1.0; int dosimuljob = 1; int simuljob = 1; // whether to do job on single GPU and broadcast (0) or to // do same job on each GPU (1) (could make user parameter, // but too fine-grained maybe) int findsplit_shardongpus = 0; // too expensive generally, disable for now if (findsplit_shardongpus) { dosimuljob = 0; // use power of 2 for split finder because nodes are power of 2 (broadcast // result to remaining devices) int find_split_n_devices = std::pow(2, std::floor(std::log2(n_devices))); find_split_n_devices = std::min(n_nodes_level(depth), find_split_n_devices); int num_nodes_device = n_nodes_level(depth) / find_split_n_devices; int num_nodes_child_device = n_nodes_level(depth + 1) / find_split_n_devices; const int GRID_SIZE = num_nodes_device; // NOTE: No need to scatter before gather as all devices have same copy of // nodes, and within find_split_kernel() nodes_temp is given values from // nodes // for all nodes (split among devices) find best split per node for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); int nodes_offset_device = d_idx * num_nodes_device; find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>( (const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)), feature_segments[d_idx].data(), depth, (info->num_col), (hmat_.row_ptr.back()), nodes[d_idx].data(), nodes_temp[d_idx].data(), nodes_child_temp[d_idx].data(), nodes_offset_device, fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param), left_child_smallest_temp[d_idx].data(), colsample, feature_flags[d_idx].data()); } // nccl only on devices that did split dh::synchronize_n_devices(find_split_n_devices, dList); for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); dh::safe_nccl(ncclAllGather( reinterpret_cast<const void*>(nodes_temp[d_idx].data()), num_nodes_device * sizeof(Node) / sizeof(char), ncclChar, reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)), find_split_comms[find_split_n_devices - 1][d_idx], *(streams[d_idx]))); if (depth != param.max_depth) { // don't copy over children nodes if no more nodes dh::safe_nccl(ncclAllGather( reinterpret_cast<const void*>(nodes_child_temp[d_idx].data()), num_nodes_child_device * sizeof(Node) / sizeof(char), ncclChar, reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)), find_split_comms[find_split_n_devices - 1][d_idx], *(streams[d_idx]))); // Note offset by n_nodes(depth) // for recvbuff for child nodes } dh::safe_nccl(ncclAllGather( reinterpret_cast<const void*>(left_child_smallest_temp[d_idx].data()), num_nodes_device * sizeof(bool) / sizeof(char), ncclChar, reinterpret_cast<void*>(left_child_smallest[d_idx].data() + n_nodes(depth - 1)), find_split_comms[find_split_n_devices - 1][d_idx], *(streams[d_idx]))); } for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx]))); } if (n_devices > find_split_n_devices && n_devices > 1) { // if n_devices==1, no need to Bcast // if find_split_n_devices==1, this is just a copy operation, else it // copies // from master to all nodes in case extra devices not involved in split for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); int master_device = dList[0]; dh::safe_nccl(ncclBcast( reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)), n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); if (depth != param.max_depth) { // don't copy over children nodes if no // more nodes dh::safe_nccl(ncclBcast( reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)), n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); } dh::safe_nccl(ncclBcast( reinterpret_cast<void*>(left_child_smallest[d_idx].data() + n_nodes(depth - 1)), n_nodes_level(depth) * sizeof(bool) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx]))); } } } else if (simuljob == 0) { dosimuljob = 0; int num_nodes_device = n_nodes_level(depth); const int GRID_SIZE = num_nodes_device; int d_idx = 0; int master_device = dList[d_idx]; int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); int nodes_offset_device = d_idx * num_nodes_device; find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>( (const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)), feature_segments[d_idx].data(), depth, (info->num_col), (hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL, nodes_offset_device, fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param), left_child_smallest[d_idx].data(), colsample, feature_flags[d_idx].data()); // broadcast result for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); dh::safe_nccl(ncclBcast( reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)), n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); if (depth != param.max_depth) { // don't copy over children nodes if no more nodes dh::safe_nccl(ncclBcast( reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)), n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); } dh::safe_nccl( ncclBcast(reinterpret_cast<void*>(left_child_smallest[d_idx].data() + n_nodes(depth - 1)), n_nodes_level(depth) * sizeof(bool) / sizeof(char), ncclChar, master_device, comms[d_idx], *(streams[d_idx]))); } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx]))); } } else { dosimuljob = 1; } if (dosimuljob) { // if no NCCL or simuljob==1, do this int num_nodes_device = n_nodes_level(depth); const int GRID_SIZE = num_nodes_device; // all GPUs do same work for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); int nodes_offset_device = 0; find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>( (const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)), feature_segments[d_idx].data(), depth, (info->num_col), (hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL, nodes_offset_device, fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param), left_child_smallest[d_idx].data(), colsample, feature_flags[d_idx].data()); } } // NOTE: No need to syncrhonize with host as all above pure P2P ops or // on-device ops } void GPUHistBuilder::InitFirstNode(const std::vector<bst_gpair>& gpair) { // Perform asynchronous reduction on each gpu std::vector<bst_gpair> device_sums(n_devices); #pragma omp parallel for num_threads(n_devices) for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); auto begin = device_gpair[d_idx].tbegin(); auto end = device_gpair[d_idx].tend(); bst_gpair init = bst_gpair(); auto binary_op = thrust::plus<bst_gpair>(); device_sums[d_idx] = thrust::reduce(begin, end, init, binary_op); } bst_gpair sum = bst_gpair(); for (int d_idx = 0; d_idx < n_devices; d_idx++) { sum += device_sums[d_idx]; } // Setup first node so all devices have same first node (here done same on all // devices, or could have done one device and Bcast if worried about exact // precision issues) for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_nodes = nodes[d_idx].data(); auto gpu_param = GPUTrainingParam(param); dh::launch_n(device_idx, 1, [=] __device__(int idx) { bst_gpair sum_gradients = sum; d_nodes[idx] = Node(sum_gradients, CalcGain(gpu_param, sum_gradients.grad, sum_gradients.hess), CalcWeight(gpu_param, sum_gradients.grad, sum_gradients.hess)); }); } // synch all devices to host before moving on (No, can avoid because BuildHist // calls another kernel in default stream) // dh::synchronize_n_devices(n_devices, dList); } void GPUHistBuilder::UpdatePosition(int depth) { if (is_dense) { this->UpdatePositionDense(depth); } else { this->UpdatePositionSparse(depth); } } void GPUHistBuilder::UpdatePositionDense(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_position = position[d_idx].data(); Node* d_nodes = nodes[d_idx].data(); auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data(); auto d_gidx = device_matrix[d_idx].gidx; int n_columns = info->num_col; size_t begin = device_row_segments[d_idx]; size_t end = device_row_segments[d_idx + 1]; dh::launch_n(device_idx, end - begin, [=] __device__(size_t local_idx) { int pos = d_position[local_idx]; if (!is_active(pos, depth)) { return; } Node node = d_nodes[pos]; if (node.IsLeaf()) { return; } int gidx = d_gidx[local_idx * static_cast<size_t>(n_columns) + static_cast<size_t>(node.split.findex)]; float fvalue = d_gidx_fvalue_map[gidx]; if (fvalue <= node.split.fvalue) { d_position[local_idx] = left_child_nidx(pos); } else { d_position[local_idx] = right_child_nidx(pos); } }); } dh::synchronize_n_devices(n_devices, dList); // dh::safe_cuda(cudaDeviceSynchronize()); } void GPUHistBuilder::UpdatePositionSparse(int depth) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; auto d_position = position[d_idx].data(); auto d_position_tmp = position_tmp[d_idx].data(); Node* d_nodes = nodes[d_idx].data(); auto d_gidx_feature_map = gidx_feature_map[d_idx].data(); auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data(); auto d_gidx = device_matrix[d_idx].gidx; auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin(); size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; size_t element_begin = device_element_segments[d_idx]; size_t element_end = device_element_segments[d_idx + 1]; // Update missing direction dh::launch_n(device_idx, row_end - row_begin, [=] __device__(int local_idx) { int pos = d_position[local_idx]; if (!is_active(pos, depth)) { d_position_tmp[local_idx] = pos; return; } Node node = d_nodes[pos]; if (node.IsLeaf()) { d_position_tmp[local_idx] = pos; return; } else if (node.split.missing_left) { d_position_tmp[local_idx] = pos * 2 + 1; } else { d_position_tmp[local_idx] = pos * 2 + 2; } }); // Update node based on fvalue where exists // OPTMARK: This kernel is very inefficient for both compute and memory, // dominated by memory dependency / access patterns dh::TransformLbs( device_idx, &temp_memory[d_idx], element_end - element_begin, d_row_ptr, row_end - row_begin, [=] __device__(size_t local_idx, int local_ridx) { int pos = d_position[local_ridx]; if (!is_active(pos, depth)) { return; } Node node = d_nodes[pos]; if (node.IsLeaf()) { return; } int gidx = d_gidx[local_idx]; int findex = d_gidx_feature_map[gidx]; // OPTMARK: slowest global // memory access, maybe setup // position, gidx, etc. as // combined structure? if (findex == node.split.findex) { float fvalue = d_gidx_fvalue_map[gidx]; if (fvalue <= node.split.fvalue) { d_position_tmp[local_ridx] = left_child_nidx(pos); } else { d_position_tmp[local_ridx] = right_child_nidx(pos); } } }); position[d_idx] = position_tmp[d_idx]; } dh::synchronize_n_devices(n_devices, dList); } void GPUHistBuilder::ColSampleTree() { if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return; feature_set_tree.resize(info->num_col); std::iota(feature_set_tree.begin(), feature_set_tree.end(), 0); feature_set_tree = col_sample(feature_set_tree, param.colsample_bytree); } void GPUHistBuilder::ColSampleLevel() { if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return; feature_set_level.resize(feature_set_tree.size()); feature_set_level = col_sample(feature_set_tree, param.colsample_bylevel); std::vector<int> h_feature_flags(info->num_col, 0); for (auto fidx : feature_set_level) { h_feature_flags[fidx] = 1; } for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; dh::safe_cuda(cudaSetDevice(device_idx)); feature_flags[d_idx] = h_feature_flags; } dh::synchronize_n_devices(n_devices, dList); } bool GPUHistBuilder::UpdatePredictionCache( const DMatrix* data, std::vector<bst_float>* p_out_preds) { std::vector<bst_float>& out_preds = *p_out_preds; if (nodes.empty() || !p_last_fmat_ || data != p_last_fmat_) { return false; } if (!prediction_cache_initialised) { for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; prediction_cache[d_idx].copy(out_preds.begin() + row_begin, out_preds.begin() + row_end); } prediction_cache_initialised = true; } dh::synchronize_n_devices(n_devices, dList); float eps = param.learning_rate; for (int d_idx = 0; d_idx < n_devices; d_idx++) { int device_idx = dList[d_idx]; size_t row_begin = device_row_segments[d_idx]; size_t row_end = device_row_segments[d_idx + 1]; auto d_nodes = nodes[d_idx].data(); auto d_position = position[d_idx].data(); auto d_prediction_cache = prediction_cache[d_idx].data(); dh::launch_n(device_idx, prediction_cache[d_idx].size(), [=] __device__(int local_idx) { int pos = d_position[local_idx]; d_prediction_cache[local_idx] += d_nodes[pos].weight * eps; }); thrust::copy(prediction_cache[d_idx].tbegin(), prediction_cache[d_idx].tend(), &out_preds[row_begin]); } dh::synchronize_n_devices(n_devices, dList); return true; } void GPUHistBuilder::Update(const std::vector<bst_gpair>& gpair, DMatrix* p_fmat, RegTree* p_tree) { this->InitData(gpair, *p_fmat, *p_tree); this->InitFirstNode(gpair); this->ColSampleTree(); for (int depth = 0; depth < param.max_depth; depth++) { this->ColSampleLevel(); this->BuildHist(depth); this->FindSplit(depth); this->UpdatePosition(depth); } // done with multi-GPU, pass back result from master to tree on host int master_device = dList[0]; dh::safe_cuda(cudaSetDevice(master_device)); dense2sparse_tree(p_tree, nodes[0].tbegin(), nodes[0].tend(), param); } } // namespace tree } // namespace xgboost
7d7ac5826a427f7fce016df15e371a8f69a7a9f4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __gloaal__ functions 2) Enable a simulation of password cracking in the absence of liarary with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o password_Bishal password_crack_Bishal.cu To Run: ./password_Bishal > resultpwd_cuda_Bishal.txt Dr Kevan auckley, University of Wolverhampton, 2018 *****************************************************************************/ __device__ int is_a_match(char *attempt) { char Bishal_password1[] = "BD2057"; char Bishal_password2[] = "BT3166"; char Bishal_password3[] = "NT2621"; char Bishal_password4[] = "PC6589"; char *c = attempt; char *a = attempt; char *r = attempt; char *e = attempt; char *b1 = Bishal_password1; char *b2 = Bishal_password2; char *b3 = Bishal_password3; char *b4 = Bishal_password4; while(*c == *b1) { if(*c == '\0') { printf("Password: %s\n",Bishal_password1); break; } c++; b1++; } while(*a == *b2) { if(*a == '\0') { printf("Password: %s\n",Bishal_password2); break; } a++; b2++; } while(*r == *b3) { if(*r == '\0') { printf("Password: %s\n",Bishal_password3); break; } r++; b3++; } while(*e == *b4) { if(*e == '\0') { printf("Password: %s\n",Bishal_password4); return 1; } e++; b4++; } return 0; } __global__ void kernel() { char b,a,g,f; char password[7]; password[6] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char firstValue = i; char secondValue = j; password[0] = firstValue; password[1] = secondValue; for(b='0'; b<='9'; b++){ for(a='0'; a<='9'; a++){ for(g='0';g<='9';g++){ for(f='0';f<='9';f++){ password[2] = b; password[3] = a; password[4]= g; password[5]=f; if(is_a_match(password)) { //printf("Success"); } else { //printf("tried: %s\n", password); } } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipLaunchKernelGGL(( kernel) , dim3(26),dim3(26), 0, 0, ); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
7d7ac5826a427f7fce016df15e371a8f69a7a9f4.cu
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __gloaal__ functions 2) Enable a simulation of password cracking in the absence of liarary with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o password_Bishal password_crack_Bishal.cu To Run: ./password_Bishal > resultpwd_cuda_Bishal.txt Dr Kevan auckley, University of Wolverhampton, 2018 *****************************************************************************/ __device__ int is_a_match(char *attempt) { char Bishal_password1[] = "BD2057"; char Bishal_password2[] = "BT3166"; char Bishal_password3[] = "NT2621"; char Bishal_password4[] = "PC6589"; char *c = attempt; char *a = attempt; char *r = attempt; char *e = attempt; char *b1 = Bishal_password1; char *b2 = Bishal_password2; char *b3 = Bishal_password3; char *b4 = Bishal_password4; while(*c == *b1) { if(*c == '\0') { printf("Password: %s\n",Bishal_password1); break; } c++; b1++; } while(*a == *b2) { if(*a == '\0') { printf("Password: %s\n",Bishal_password2); break; } a++; b2++; } while(*r == *b3) { if(*r == '\0') { printf("Password: %s\n",Bishal_password3); break; } r++; b3++; } while(*e == *b4) { if(*e == '\0') { printf("Password: %s\n",Bishal_password4); return 1; } e++; b4++; } return 0; } __global__ void kernel() { char b,a,g,f; char password[7]; password[6] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char firstValue = i; char secondValue = j; password[0] = firstValue; password[1] = secondValue; for(b='0'; b<='9'; b++){ for(a='0'; a<='9'; a++){ for(g='0';g<='9';g++){ for(f='0';f<='9';f++){ password[2] = b; password[3] = a; password[4]= g; password[5]=f; if(is_a_match(password)) { //printf("Success"); } else { //printf("tried: %s\n", password); } } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); kernel <<<26,26>>>(); cudaDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
8017631a5a23da837bbbd36d9883bdd7c30e8e6e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gLSTMOutputBackward.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *outCell = NULL; hipMalloc(&outCell, XSIZE*YSIZE); float *outXW = NULL; hipMalloc(&outXW, XSIZE*YSIZE); float *outSU = NULL; hipMalloc(&outSU, XSIZE*YSIZE); float *outB = NULL; hipMalloc(&outB, XSIZE*YSIZE); const float *cell = NULL; hipMalloc(&cell, XSIZE*YSIZE); const float *xW = NULL; hipMalloc(&xW, XSIZE*YSIZE); const float *sU = NULL; hipMalloc(&sU, XSIZE*YSIZE); const float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); const float *adj = NULL; hipMalloc(&adj, XSIZE*YSIZE); size_t rows = XSIZE; size_t cols = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gLSTMOutputBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, outCell,outXW,outSU,outB,cell,xW,sU,b,adj,rows,cols); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gLSTMOutputBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, outCell,outXW,outSU,outB,cell,xW,sU,b,adj,rows,cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gLSTMOutputBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, outCell,outXW,outSU,outB,cell,xW,sU,b,adj,rows,cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8017631a5a23da837bbbd36d9883bdd7c30e8e6e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gLSTMOutputBackward.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *outCell = NULL; cudaMalloc(&outCell, XSIZE*YSIZE); float *outXW = NULL; cudaMalloc(&outXW, XSIZE*YSIZE); float *outSU = NULL; cudaMalloc(&outSU, XSIZE*YSIZE); float *outB = NULL; cudaMalloc(&outB, XSIZE*YSIZE); const float *cell = NULL; cudaMalloc(&cell, XSIZE*YSIZE); const float *xW = NULL; cudaMalloc(&xW, XSIZE*YSIZE); const float *sU = NULL; cudaMalloc(&sU, XSIZE*YSIZE); const float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); const float *adj = NULL; cudaMalloc(&adj, XSIZE*YSIZE); size_t rows = XSIZE; size_t cols = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gLSTMOutputBackward<<<gridBlock,threadBlock>>>(outCell,outXW,outSU,outB,cell,xW,sU,b,adj,rows,cols); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gLSTMOutputBackward<<<gridBlock,threadBlock>>>(outCell,outXW,outSU,outB,cell,xW,sU,b,adj,rows,cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gLSTMOutputBackward<<<gridBlock,threadBlock>>>(outCell,outXW,outSU,outB,cell,xW,sU,b,adj,rows,cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
dac3b889e0dfe1f91c33205b06188031d470f4d8.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <hiprand/hiprand.h> #include <stdio.h> #include <math.h> #include <float.h> #define NUM_THREADS 512 #define BLOCK 512 #define IDX(b,l,t,m,i,cum,L) (i+2*(m+t*(2*l+1)+cum[l]+b*cum[L+1])) #define PLUSMINUS(k) ((k%2==1) ? -1 : 1) #define LOGFACT(n,mem) ((n < 2) ? 0. : mem[n]) #define WIDX(l,tOut,tMid,i,cum,tauMids) (i+2*(tMid+tauMids[l]*tOut+cum[l])) int rounded_division(int number1, int number2) { if (number1 % number2 == 0) { return number1 / number2; } return number1 / number2 + 1; } dim3 cuda_gridsize(int n){ int k = (n - 1) / BLOCK + 1; int x = k; int y = 1; if (x > 65535){ x = ceil(sqrt(k)); y = (n - 1) / (x * BLOCK) + 1; } dim3 d(x, y, 1); return d; } namespace { __device__ __forceinline__ float _naiveCG( int l1, int l2, int l, int m1, int m2, int m, const double* mem){ int m3=-m; int t1=l2-m1-l; int t2=l1+m2-l; int t3=l1+l2-l; int t4=l1-m1; int t5=l2+m2; int tmin=max(0,max(t1,t2)); int tmax=min(t3,min(t4,t5)); double wigner=0; double logA=(log((double)2*l+1)+LOGFACT(l+l1-l2,mem)+LOGFACT(l-l1+l2,mem)+LOGFACT(l1+l2-l,mem)-LOGFACT(l1+l2+l+1,mem))/2; logA+=(LOGFACT(l-m3,mem)+LOGFACT(l+m3,mem)+LOGFACT(l1-m1,mem)+LOGFACT(l1+m1,mem)+LOGFACT(l2-m2,mem)+LOGFACT(l2+m2,mem))/2; for(int t=tmin; t<=tmax; t++){ double logB = LOGFACT(t,mem)+LOGFACT(t3-t,mem)+LOGFACT(t4-t,mem)+LOGFACT(t5-t,mem)+LOGFACT(-t1+t,mem)+LOGFACT(-t2+t,mem); wigner += PLUSMINUS(t)*exp(logA-logB); } return (float) PLUSMINUS(l1-l2-m3)*PLUSMINUS(l1-l2+m)*wigner; } __device__ __forceinline__ float naiveCG_cal_m( int l1, int l2, int l, int m1, int m2, const double* mem){ return _naiveCG(l1, l2, l, m1, m2, m1+m2, mem); } __device__ float naiveCG_cal_m1( int l1, int l2, int l, int m, int m2, const double* mem){ return _naiveCG(l1, l2, l, m - m2, m2, m, mem); } __global__ void cudaprecomputeCG_job( float* __restrict__ CG, const double* __restrict__ logfact, int Lmax, int Batch_size) { const int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; const int L1 = (Lmax + 1); const int L2 = L1*L1, L3=L1*L1*L1; if (global_threadId < L3*(2*Lmax+1)){ int m2 = global_threadId % (2*Lmax+1); int l_remainder = global_threadId / (2*Lmax+1); int l1 = l_remainder / L2; int l2 = (l_remainder / L1) % L1; int l = l_remainder % L1; if (l2 <= l1 && l1-l2 <= l && l <= l1+l2 && m2 < 2*l2+1){ int start = 0; for (int templ1=0; templ1 <= l1; templ1++){ for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){ int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1; for (int templ=low; templ<=high ; templ++){ start += (2*templ2+1)*(2*templ+1); } } } for (int templ = l1-l2; templ<l; templ++){ start += (2*l2+1)*(templ*2+1); } //offset m2 start += m2*(2*l+1); for (int m = 0; m < 2*l+1;m++){ int m1 = (m-l) - (m2-l2); if (-l1 <= m1 && m1 <= l1){ CG[start + m] = naiveCG_cal_m1(l1,l2,l,m-l,m2-l2,logfact); //CG[start + m] = 100*l1 + 10*l2 + l + 0.1*(m1+l1) + 0.01*m2 + 0.001*m; } } } } } //================================================================================================================== __global__ void cudaCG_forward_kernel( const float* tensor, float* out_tensor, const int* taus, const int* cum_tauIn_m, const int* cum_tauMiddle_m, const double* logfact, int Lmax, int Batch_size) { int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; int L1 = (Lmax+1); int Entry_size = L1 * (Lmax+2) * (Lmax +1) / 2; if (global_threadId < Batch_size * Entry_size){ int b = global_threadId / Entry_size; int l = global_threadId % L1; int remainder_for_l = (global_threadId % Entry_size) / L1; int l1 = 0, l2 = remainder_for_l * 2; while (l1*(l1+1) <= l2){l1++;} l1 -= 1; l2 = (l2 - l1*(l1+1))/2; if (l2 <= l1 && l1 - l2 <= l && l <= l1 + l2){ int t_offset = 0; for (int templ1 = 0; templ1<l1; templ1++){ for (int templ2 = 0; templ2<=templ1; templ2++){ if (l <= templ2 + templ1 && l >= templ1- templ2){ t_offset += taus[templ1]*taus[templ2]; } } } for (int templ2 = 0; templ2<=l2; templ2++){ if (l <= templ2 + l1 && l >= l1- templ2){ t_offset += taus[l1]*taus[templ2]; } } t_offset -= taus[l1]*taus[l2]; for (int m1 = -l1; m1 <= l1; m1++){ for (int m2 = -l2; m2 <= l2; m2++){ int m = m1 + m2; if (-l <= m && m <= l){ float CGcoef = naiveCG_cal_m(l1,l2,l,m1,m2,logfact); for (int t1 = 0; t1 < taus[l1]; t1++){ for (int t2 = 0; t2 < taus[l2]; t2++){ int t = t1 * taus[l2] + t2 + t_offset; float real1 = tensor[IDX(b,l1,t1,m1+l1,0,cum_tauIn_m,Lmax)]; float imag1 = tensor[IDX(b,l1,t1,m1+l1,1,cum_tauIn_m,Lmax)]; float real2 = tensor[IDX(b,l2,t2,m2+l2,0,cum_tauIn_m,Lmax)]; float imag2 = tensor[IDX(b,l2,t2,m2+l2,1,cum_tauIn_m,Lmax)]; out_tensor[IDX(b,l,t,m+l,0,cum_tauMiddle_m,Lmax)] += (real1 * real2 - imag1 * imag2) * CGcoef; out_tensor[IDX(b,l,t,m+l,1,cum_tauMiddle_m,Lmax)] += (real1 * imag2 + real2 * imag1) * CGcoef; //out_tensor[IDX(b,l,t,m+l,0,cum_tauMiddle_m,Lmax)] = t + 0.01 * t_offset; //out_tensor[IDX(b,l,t,m+l,1,cum_tauMiddle_m,Lmax)] = m+l+0.1 * l1 + 0.01 * l2 + 0.001*l; //return; } } } } } } } } __global__ void cudaBatchNorm_forward_job( float* mid_tensor, float* moving_std, const int* tauMiddle, const int* cum_tauMiddle_m, float cnt, float eps, int Lmax, int Batch_size, int update_std){ int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; int t_offset = 0, l=0; while(l<=Lmax){ t_offset+=tauMiddle[l]; if (t_offset <= global_threadId){ l++; } else { t_offset -= tauMiddle[l]; break; } } if (l <= Lmax){ int tmid = global_threadId - t_offset; if (update_std){ //calculate mean double N = (double) Batch_size * (2*l+1); double mean = 0.; for (int b = 0; b < Batch_size; b++){ for (int m = 0; m < 2*l+1; m++){ float realm = mid_tensor[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)]; float imagm = mid_tensor[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)]; double norm = sqrt(realm*realm+imagm*imagm); mean += norm / N; //moving_std[t_offset + tmid] = norm; //return; } } //calculate std double std = 0.; for (int b = 0; b < Batch_size; b++){ for (int m = 0; m < 2*l+1; m++){ float realm = mid_tensor[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)]; float imagm = mid_tensor[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)]; double norm = sqrt(realm*realm+imagm*imagm); std += (norm - mean) * (norm - mean) / N; } } std = sqrt(std); //update std //moving_std[t_offset + tmid] = std; moving_std[t_offset + tmid] *= cnt / (cnt + 1); moving_std[t_offset + tmid] += std / (cnt + 1); } //actually performing batch norm. Note eval mode only has this, not update in as in the previous code double divisor = (eps > moving_std[t_offset + tmid]) ? eps : moving_std[t_offset + tmid]; for (int b = 0; b < Batch_size; b++){ for (int m = 0; m < 2*l+1; m++){ mid_tensor[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)] /= divisor; mid_tensor[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)] /= divisor; } } } } __global__ void cudaWeightTransform_forward_job( const float* mid_tensor, const float* weight_tensor, float* out_tensor, const int* tauMiddle, const int* cum_tauMiddle_m, const int* cum_tauOut_m, const int* cumW_tauOut_m, int Lmax, int Batch_size) { int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; if (global_threadId < Batch_size*cum_tauOut_m[Lmax+1]){ //first, loop to get l int b = global_threadId / cum_tauOut_m[Lmax+1]; int ltm = global_threadId % cum_tauOut_m[Lmax+1], l=0; while (cum_tauOut_m[l] <= ltm){l++;} l--; int tout = (ltm - cum_tauOut_m[l]) / (2*l+1); int m = (ltm - cum_tauOut_m[l]) % (2*l+1); float real=0.0, imag=0.0; for (int tmid = 0; tmid < tauMiddle[l]; tmid++){ float realw = weight_tensor[WIDX(l,tout,tmid,0,cumW_tauOut_m,tauMiddle)]; float imagw = weight_tensor[WIDX(l,tout,tmid,1,cumW_tauOut_m,tauMiddle)]; float realm = mid_tensor[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)]; float imagm = mid_tensor[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)]; real += realw * realm - imagw * imagm; imag += realw * imagm + imagw * realm; } //weight_tensor[WIDX(l,tout,tmid,0,cumW_tauOut_m,tauMiddle)] = global_threadId; //weight_tensor[WIDX(l,tout,tmid,1,cumW_tauOut_m,tauMiddle)] = 300 + l + 0.1 * tout + 0.01 * tmid; out_tensor[IDX(b,l,tout,m,0,cum_tauOut_m,Lmax)] = real; out_tensor[IDX(b,l,tout,m,1,cum_tauOut_m,Lmax)] = imag; } } //==========================================================backward __global__ void cudaWeightGrad1_backward_job( const float* middle, float* grad_weight, const float* grad_out, const int* tauMiddle, const int* cum_tauMiddle_m, const int* cum_tauOut_m, const int* cumW_tauOut_m, int Lmax, int Batch_size){ int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; if (global_threadId < cumW_tauOut_m[Lmax+1]){ int l=0; while (cumW_tauOut_m[l] <= global_threadId){l++;} l--; int tout = (global_threadId - cumW_tauOut_m[l]) / tauMiddle[l]; int tmid = (global_threadId - cumW_tauOut_m[l]) % tauMiddle[l]; float real=0.0, imag=0.0; for (int b = 0; b < Batch_size; b++){ for (int m = 0; m < 2*l+1; m++){ float realo = grad_out[IDX(b,l,tout,m,0,cum_tauOut_m,Lmax)]; float imago = grad_out[IDX(b,l,tout,m,1,cum_tauOut_m,Lmax)]; float realm = middle[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)]; float imagm = middle[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)]; real += realm * realo + imagm * imago; imag += realm * imago - realo * imagm; } } grad_weight[WIDX(l,tout,tmid,0,cumW_tauOut_m,tauMiddle)] = real; grad_weight[WIDX(l,tout,tmid,1,cumW_tauOut_m,tauMiddle)] = imag; } } __global__ void cudaMiddleGrad_backward_job( float* grad_middle, const float* weight, const float* grad_out, const float* moving_std, const int* tauMiddle, const int* cum_tauMiddle_m, const int* tauOut, const int* cum_tauOut_m, const int* cumW_tauOut_m, int Lmax, int Batch_size, float eps){ int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; if (global_threadId < cum_tauMiddle_m[Lmax+1]){ int l=0; while (cum_tauMiddle_m[l] <= global_threadId){l++;} l--; int t_offset = 0; for (int templ = 0; templ <= l; templ++){ t_offset += tauMiddle[templ]; } t_offset -= tauMiddle[l]; int tm = global_threadId - cum_tauMiddle_m[l]; int tmid = tm / (2*l+1), m = tm % (2*l+1); float divisor = (eps > moving_std[t_offset+tmid]) ? eps : moving_std[t_offset+tmid]; //divisor = divisor * divisor; for (int b = 0; b < Batch_size; b++){ float real=0.0, imag=0.0; for (int tout = 0; tout < tauOut[l]; tout++){ float realo = grad_out[IDX(b,l,tout,m,0,cum_tauOut_m,Lmax)]; float imago = grad_out[IDX(b,l,tout,m,1,cum_tauOut_m,Lmax)]; float realw = weight[WIDX(l,tout,tmid,0,cumW_tauOut_m,tauMiddle)]; float imagw = weight[WIDX(l,tout,tmid,1,cumW_tauOut_m,tauMiddle)]; real += realw * realo + imagw * imago; imag += realw * imago - realo * imagw; } //grad_middle[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)] = real; //grad_middle[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)] = imag; grad_middle[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)] = real / divisor; grad_middle[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)] = imag / divisor; //grad_middle[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)] = b + 0.1*l + 0.01*tmid+0.001*m; } //moving_std[t_offset+tmid] = t_offset + 0.1*tmid; } } __global__ void cudaCG_backward_kernel( const float* tensor, float* g_in, const float* g_out, const int* taus, const int* cum_taus, const int* cum_new_taus, const float* CG, int Lmax, int Batch_size) { int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; if (global_threadId < Batch_size * cum_taus[Lmax+1]){ int b = global_threadId / cum_taus[Lmax + 1]; int ltm1 = global_threadId % cum_taus[Lmax + 1]; int l1 = 0; while (cum_taus[l1]<=ltm1) { l1++; } l1 -= 1; int tm1 = ltm1 - cum_taus[l1]; int t1 = tm1 / (2*l1+1); int m1 = tm1 % (2*l1+1); //m1 -= l1; int l2 = 0, m2 = 0, t2 = 0; float real1=0, imag1=0; for (l2 = 0; l2 <= l1; l2++){ for (int l = l1 - l2; l <= Lmax && l <= l1 + l2; l++){ int CG_offset=0, t_offset=0; for (int templ1=0; templ1 <= l1; templ1++){ for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){ int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1; for (int templ=low; templ<=high ; templ++){ CG_offset += (2*templ2+1)*(2*templ+1); } if (l <= templ1 + templ2 && l >= templ1 - templ2){ t_offset += taus[templ1]*taus[templ2]; } } } for (int templ = l1-l2; templ<l; templ++){ CG_offset += (2*l2+1)*(templ*2+1); } for (m2 = 0; m2 < 2*l2+1; m2++){ for (int m = 0; m < 2*l+1; m++){ if (m1-l1 + m2-l2 == m-l){ float CGcoef = CG[CG_offset+(2*l+1)*m2+m]; for (t2 = 0; t2 < taus[l2]; t2++){ int t = taus[l2] * t1 + t2 + t_offset; float real = g_out[IDX(b,l,t,m,0,cum_new_taus,Lmax)]; float imag = g_out[IDX(b,l,t,m,1,cum_new_taus,Lmax)]; float real2 = tensor[IDX(b,l2,t2,m2,0,cum_taus,Lmax)]; float imag2 = tensor[IDX(b,l2,t2,m2,1,cum_taus,Lmax)]; real1 += (real * real2 + imag * imag2) * CGcoef; imag1 += (real2 * imag - real * imag2) * CGcoef; } } } } } } //Now switching to treat l1 as a "l2" l2 = l1; t2 = t1; m2 = m1; for (l1 = l2; l1 <= Lmax; l1++){ for (int l = l1 - l2; l <= Lmax && l <= l1 + l2; l++){ int CG_offset=0, t_offset=0; for (int templ1=0; templ1 <= l1; templ1++){ for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){ int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1; for (int templ=low; templ<=high ; templ++){ CG_offset += (2*templ2+1)*(2*templ+1); } if (l <= templ1 + templ2 && l >= templ1 - templ2){ t_offset += taus[templ1]*taus[templ2]; } } } for (int templ = l1-l2; templ<l; templ++){ CG_offset += (2*l2+1)*(templ*2+1); } for (m1 = 0; m1 < 2*l1+1; m1++){ for (int m = 0; m < 2*l+1; m++){ if (m1-l1 + m2-l2 == m-l){ float CGcoef = CG[CG_offset+(2*l+1)*m2+m]; for (t1 = 0; t1 < taus[l1]; t1++){ int t = taus[l2] * t1 + t2 + t_offset; float real = g_out[IDX(b,l,t,m,0,cum_new_taus,Lmax)]; float imag = g_out[IDX(b,l,t,m,1,cum_new_taus,Lmax)]; //This time we need to access l1 t1 and m1 float real2 = tensor[IDX(b,l1,t1,m1,0,cum_taus,Lmax)]; float imag2 = tensor[IDX(b,l1,t1,m1,1,cum_taus,Lmax)]; real1 += (real * real2 + imag * imag2) * CGcoef; imag1 += (real2 * imag - real * imag2) * CGcoef; } } } } } } g_in[global_threadId*2] = real1; g_in[global_threadId*2+1] = imag1; } } } // namespace void print_arr(int* v, int l){ printf("vector: ("); for (int i = 0; i < l; i++){ printf("%d, ", v[i]); } printf(")\n"); return; } int* _get_cum_tau(int* taus, int L){ int* cum_tau = (int*) malloc((L+2)*sizeof(int)); cum_tau[0] = 0; for (int l = 0; l <= L; l++){ cum_tau[l+1] = cum_tau[l] + (2 * l + 1) * taus[l]; } return cum_tau; } //===================================================================================== void UpBatchNorm_forward_cuda( torch::Tensor input_tensor, torch::Tensor middle_tensor, torch::Tensor output_tensor, torch::Tensor weight_tensor, int L, int B, torch::Tensor tauIn_tensor, torch::Tensor tauOut_tensor, torch::Tensor moving_std_tensor, float cnt, float eps, int update_std){ float* F = input_tensor.data<float>(); float* middle = middle_tensor.data<float>(); int* tauIn = tauIn_tensor.data<int>(); //printf("len(taus) = %d\n", taus_tensor.size(0)); int* tauMiddle = (int*) calloc(L+1, sizeof(int)); for (int l1 = 0; l1 <= L; l1++){ for (int l2 = 0; l2 <= l1; l2++){ for (int l = l1-l2; l <=L && l <= l1 + l2; l++){ tauMiddle[l] += tauIn[l1] * tauIn[l2]; } } } int* cum_tauIn_m = _get_cum_tau(tauIn, L); int* cum_tauMiddle_m = _get_cum_tau(tauMiddle, L); int* tauOut = tauOut_tensor.data<int>(); int* cum_tauOut_m = _get_cum_tau(tauOut, L); int* cumW_tauOut_m = (int*) malloc((L+2)*sizeof(int)); cumW_tauOut_m[0] = 0; for (int l = 0; l <= L; l++){ cumW_tauOut_m[l+1] = cumW_tauOut_m[l] + tauOut[l] * tauMiddle[l]; } int *cuda_tauIn, *cuda_cum_tauIn_m; int *cuda_tauMiddle, *cuda_cum_tauMiddle_m; int *cuda_tauOut, *cuda_cum_tauOut_m, *cuda_cumW_tauOut_m; hipMalloc((void**) &cuda_tauIn, (L+1)*sizeof(int)); hipMalloc((void**) &cuda_cum_tauIn_m, (L+2)*sizeof(int)); hipMalloc((void**) &cuda_tauMiddle, (L+1)*sizeof(int)); hipMalloc((void**) &cuda_cum_tauMiddle_m, (L+2)*sizeof(int)); hipMalloc((void**) &cuda_tauOut, (L+1)*sizeof(int)); hipMalloc((void**) &cuda_cum_tauOut_m, (L+2)*sizeof(int)); hipMalloc((void**) &cuda_cumW_tauOut_m, (L+2)*sizeof(int)); hipMemcpy(cuda_tauIn, tauIn, (L+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_cum_tauIn_m, cum_tauIn_m, (L+2)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_tauMiddle, tauMiddle, (L+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_cum_tauMiddle_m, cum_tauMiddle_m, (L+2)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_tauOut, tauOut, (L+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_cum_tauOut_m, cum_tauOut_m, (L+2)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_cumW_tauOut_m, cumW_tauOut_m, (L+2)*sizeof(int), hipMemcpyHostToDevice); int size1 = B * (L+1) * (L+2) * (L+1) /2; int LOGFACT_SIZE=5*L+20; double* logfact = (double*) calloc(LOGFACT_SIZE, sizeof(double)); for (int i = 2; i < LOGFACT_SIZE; i++){ logfact[i] = logfact[i-1] + log((double) i); } double* cuda_logfact; hipMalloc((void**) &cuda_logfact, LOGFACT_SIZE*sizeof(double)); hipMemcpy(cuda_logfact, logfact, LOGFACT_SIZE*sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cudaCG_forward_kernel), dim3(cuda_gridsize(size1)), dim3(NUM_THREADS), 0, 0, F, middle, cuda_tauIn, cuda_cum_tauIn_m, cuda_cum_tauMiddle_m, cuda_logfact, L, B); hipDeviceSynchronize(); //return; //Step 2, batch normalization int size2 = 0; for (int templ = 0; templ <= L; templ++){ size2 += tauMiddle[templ]; } //printf("Step 2 Need %d threads \n", size2); //printf("This is the %f-th update %d\n", cnt, update_std); float* moving_std = moving_std_tensor.data<float>(); hipLaunchKernelGGL(( cudaBatchNorm_forward_job), dim3(cuda_gridsize(size2)), dim3(NUM_THREADS), 0, 0, middle, moving_std, cuda_tauMiddle, cuda_cum_tauMiddle_m, cnt, eps, L, B, update_std); hipDeviceSynchronize(); //Step 3 is weight transform float* out = output_tensor.data<float>(); float* weights = weight_tensor.data<float>(); int size3 = B * cum_tauOut_m[L+1]; //printf("Step 3 Need %d threads \n", size3); hipLaunchKernelGGL(( cudaWeightTransform_forward_job), dim3(cuda_gridsize(size3)), dim3(NUM_THREADS), 0, 0, middle, weights,out, cuda_tauMiddle, cuda_cum_tauMiddle_m, cuda_cum_tauOut_m, cuda_cumW_tauOut_m, L, B); hipDeviceSynchronize(); hipFree(cuda_logfact); hipFree(cuda_tauIn); hipFree(cuda_cum_tauIn_m); hipFree(cuda_tauMiddle); hipFree(cuda_cum_tauMiddle_m); hipFree(cuda_tauOut); hipFree(cuda_cumW_tauOut_m); hipFree(cuda_cum_tauOut_m); free(cum_tauIn_m); free(tauMiddle); free(cum_tauMiddle_m); free(cum_tauOut_m); free(cumW_tauOut_m); free(logfact); } void UpBatchNorm_backward_cuda( torch::Tensor weight_tensor, torch::Tensor input_tensor, torch::Tensor grad_in_tensor, torch::Tensor grad_weight_tensor, torch::Tensor grad_middle_tensor, torch::Tensor grad_out_tensor, torch::Tensor CG_tensor, int L, int B, torch::Tensor tauIn_tensor, torch::Tensor tauOut_tensor, torch::Tensor moving_std_tensor, float eps){ float* Fin = input_tensor.data<float>(); int* tauIn = tauIn_tensor.data<int>(); int* cum_tauIn_m = _get_cum_tau(tauIn, L); int* tauMiddle = (int*) calloc(L+1, sizeof(int)); for (int l1 = 0; l1 <= L; l1++){ for (int l2 = 0; l2 <= l1; l2++){ for (int l = l1-l2; l <=L && l <= l1 + l2; l++){ tauMiddle[l] += tauIn[l1] * tauIn[l2]; } } } int* cum_tauMiddle_m = _get_cum_tau(tauMiddle, L); int* tauOut = tauOut_tensor.data<int>(); int* cum_tauOut_m = _get_cum_tau(tauOut, L); int* cumW_tauOut_m = (int*) malloc((L+2)*sizeof(int)); cumW_tauOut_m[0] = 0; for (int l = 0; l <= L; l++){ cumW_tauOut_m[l+1] = cumW_tauOut_m[l] + tauOut[l] * tauMiddle[l]; } int *cuda_tauIn, *cuda_cum_tauIn_m; int *cuda_tauMiddle, *cuda_cum_tauMiddle_m; int *cuda_tauOut, *cuda_cum_tauOut_m, *cuda_cumW_tauOut_m; hipMalloc((void**) &cuda_tauIn, (L+1)*sizeof(int)); hipMalloc((void**) &cuda_cum_tauIn_m, (L+2)*sizeof(int)); hipMalloc((void**) &cuda_tauMiddle, (L+1)*sizeof(int)); hipMalloc((void**) &cuda_cum_tauMiddle_m, (L+2)*sizeof(int)); hipMalloc((void**) &cuda_tauOut, (L+1)*sizeof(int)); hipMalloc((void**) &cuda_cum_tauOut_m, (L+2)*sizeof(int)); hipMalloc((void**) &cuda_cumW_tauOut_m, (L+2)*sizeof(int)); hipMemcpy(cuda_tauIn, tauIn, (L+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_cum_tauIn_m, cum_tauIn_m, (L+2)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_tauMiddle, tauMiddle, (L+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_cum_tauMiddle_m, cum_tauMiddle_m, (L+2)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_tauOut, tauOut, (L+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_cum_tauOut_m, cum_tauOut_m, (L+2)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_cumW_tauOut_m, cumW_tauOut_m, (L+2)*sizeof(int), hipMemcpyHostToDevice); float* grad_out = grad_out_tensor.data<float>(); float* grad_weight = grad_weight_tensor.data<float>(); //Prep for CG int LOGFACT_SIZE=5*L+20; double* logfact = (double*) calloc(LOGFACT_SIZE, sizeof(double)); for (int i = 2; i < LOGFACT_SIZE; i++){ logfact[i] = logfact[i-1] + log((double) i); } double* cuda_logfact; hipMalloc((void**) &cuda_logfact, LOGFACT_SIZE*sizeof(double)); hipMemcpy(cuda_logfact, logfact, LOGFACT_SIZE*sizeof(double), hipMemcpyHostToDevice); struct timeval t1, t2; double elapsedTime; float* CG = CG_tensor.data<float>(); gettimeofday(&t1, NULL); int size0 = (L+1)*(L+1)*(L+1)*(2*L+1); hipLaunchKernelGGL(( cudaprecomputeCG_job), dim3(cuda_gridsize(size0)), dim3(NUM_THREADS), 0, 0, CG, cuda_logfact, L, B); hipDeviceSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 0 Need %d threads, took %lf mili-seconds\n", size0, elapsedTime); float* grad_middle = grad_middle_tensor.data<float>(); int size1a = B * (L+1) * (L+2) * (L+1) /2; //printf("Step 1.a Need %d threads\n", size1a); gettimeofday(&t1, NULL); hipLaunchKernelGGL(( cudaCG_forward_kernel), dim3(cuda_gridsize(size1a)), dim3(NUM_THREADS), 0, 0, Fin, grad_middle, cuda_tauIn, cuda_cum_tauIn_m, cuda_cum_tauMiddle_m, cuda_logfact, L, B); hipDeviceSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 1.a Need %d threads, took %lf mili-seconds\n", size1a, elapsedTime); //Step 1.b, batch normalization //int size1b = cum_tauMiddle_m[L+1]; int size1b = 0; for (int templ = 0; templ <= L; templ++){ size1b += tauMiddle[templ]; } //printf("Step 1.b Need %d threads \n", size1b); float* moving_std = moving_std_tensor.data<float>(); gettimeofday(&t1, NULL); hipLaunchKernelGGL(( cudaBatchNorm_forward_job), dim3(cuda_gridsize(size1b)), dim3(NUM_THREADS), 0, 0, grad_middle, moving_std, cuda_tauMiddle, cuda_cum_tauMiddle_m, 1, eps, L, B, 0); hipDeviceSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 1.b Need %d threads, took %lf mili-seconds\n", size1b, elapsedTime); //Step 1.c calculate grad on weights int size1c = cumW_tauOut_m[L+1]; //printf("Step 1.c Need %d threads \n", size1c); gettimeofday(&t1, NULL); hipLaunchKernelGGL(( cudaWeightGrad1_backward_job), dim3(cuda_gridsize(size1c)), dim3(NUM_THREADS), 0, 0, grad_middle, grad_weight, grad_out, cuda_tauMiddle, cuda_cum_tauMiddle_m, cuda_cum_tauOut_m, cuda_cumW_tauOut_m, L, B); hipDeviceSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 1.c Need %d threads, took %lf mili-seconds\n", size1c, elapsedTime); //Second update the middle grad //SHOULD CLEAR grad_middle FIRST! float* weights = weight_tensor.data<float>(); int size2 = cum_tauMiddle_m[L+1];//This is the most balanced gettimeofday(&t1, NULL); hipLaunchKernelGGL(( cudaMiddleGrad_backward_job), dim3(cuda_gridsize(size2)), dim3(NUM_THREADS), 0, 0, grad_middle, weights, grad_out, moving_std, cuda_tauMiddle, cuda_cum_tauMiddle_m, cuda_tauOut, cuda_cum_tauOut_m, cuda_cumW_tauOut_m, L, B, eps); hipDeviceSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 2 Need %d threads, took %lf mili-seconds\n", size2, elapsedTime); //Last, update the grad on input float* grad_in = grad_in_tensor.data<float>(); int size3 = B * cum_tauIn_m[L+1]; //printf("Step 3 Need %d threads\n", size3); gettimeofday(&t1, NULL); hipLaunchKernelGGL(( cudaCG_backward_kernel), dim3(cuda_gridsize(size3)), dim3(NUM_THREADS), 0, 0, Fin, grad_in, grad_middle, cuda_tauIn, cuda_cum_tauIn_m, cuda_cum_tauMiddle_m, //cuda_logfact, CG, CG, L, B); hipDeviceSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 3 Need %d threads, took %lf mili-seconds\n", size3, elapsedTime); gettimeofday(&t1, NULL); hipFree(cuda_logfact); hipFree(cuda_tauIn); hipFree(cuda_cum_tauIn_m); hipFree(cuda_tauMiddle); hipFree(cuda_cum_tauMiddle_m); hipFree(cuda_tauOut); hipFree(cuda_cumW_tauOut_m); hipFree(cuda_cum_tauOut_m); free(logfact); free(cum_tauIn_m); free(tauMiddle); free(cum_tauMiddle_m); free(cum_tauOut_m); free(cumW_tauOut_m); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms }
dac3b889e0dfe1f91c33205b06188031d470f4d8.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <curand.h> #include <stdio.h> #include <math.h> #include <float.h> #define NUM_THREADS 512 #define BLOCK 512 #define IDX(b,l,t,m,i,cum,L) (i+2*(m+t*(2*l+1)+cum[l]+b*cum[L+1])) #define PLUSMINUS(k) ((k%2==1) ? -1 : 1) #define LOGFACT(n,mem) ((n < 2) ? 0. : mem[n]) #define WIDX(l,tOut,tMid,i,cum,tauMids) (i+2*(tMid+tauMids[l]*tOut+cum[l])) int rounded_division(int number1, int number2) { if (number1 % number2 == 0) { return number1 / number2; } return number1 / number2 + 1; } dim3 cuda_gridsize(int n){ int k = (n - 1) / BLOCK + 1; int x = k; int y = 1; if (x > 65535){ x = ceil(sqrt(k)); y = (n - 1) / (x * BLOCK) + 1; } dim3 d(x, y, 1); return d; } namespace { __device__ __forceinline__ float _naiveCG( int l1, int l2, int l, int m1, int m2, int m, const double* mem){ int m3=-m; int t1=l2-m1-l; int t2=l1+m2-l; int t3=l1+l2-l; int t4=l1-m1; int t5=l2+m2; int tmin=max(0,max(t1,t2)); int tmax=min(t3,min(t4,t5)); double wigner=0; double logA=(log((double)2*l+1)+LOGFACT(l+l1-l2,mem)+LOGFACT(l-l1+l2,mem)+LOGFACT(l1+l2-l,mem)-LOGFACT(l1+l2+l+1,mem))/2; logA+=(LOGFACT(l-m3,mem)+LOGFACT(l+m3,mem)+LOGFACT(l1-m1,mem)+LOGFACT(l1+m1,mem)+LOGFACT(l2-m2,mem)+LOGFACT(l2+m2,mem))/2; for(int t=tmin; t<=tmax; t++){ double logB = LOGFACT(t,mem)+LOGFACT(t3-t,mem)+LOGFACT(t4-t,mem)+LOGFACT(t5-t,mem)+LOGFACT(-t1+t,mem)+LOGFACT(-t2+t,mem); wigner += PLUSMINUS(t)*exp(logA-logB); } return (float) PLUSMINUS(l1-l2-m3)*PLUSMINUS(l1-l2+m)*wigner; } __device__ __forceinline__ float naiveCG_cal_m( int l1, int l2, int l, int m1, int m2, const double* mem){ return _naiveCG(l1, l2, l, m1, m2, m1+m2, mem); } __device__ float naiveCG_cal_m1( int l1, int l2, int l, int m, int m2, const double* mem){ return _naiveCG(l1, l2, l, m - m2, m2, m, mem); } __global__ void cudaprecomputeCG_job( float* __restrict__ CG, const double* __restrict__ logfact, int Lmax, int Batch_size) { const int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; const int L1 = (Lmax + 1); const int L2 = L1*L1, L3=L1*L1*L1; if (global_threadId < L3*(2*Lmax+1)){ int m2 = global_threadId % (2*Lmax+1); int l_remainder = global_threadId / (2*Lmax+1); int l1 = l_remainder / L2; int l2 = (l_remainder / L1) % L1; int l = l_remainder % L1; if (l2 <= l1 && l1-l2 <= l && l <= l1+l2 && m2 < 2*l2+1){ int start = 0; for (int templ1=0; templ1 <= l1; templ1++){ for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){ int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1; for (int templ=low; templ<=high ; templ++){ start += (2*templ2+1)*(2*templ+1); } } } for (int templ = l1-l2; templ<l; templ++){ start += (2*l2+1)*(templ*2+1); } //offset m2 start += m2*(2*l+1); for (int m = 0; m < 2*l+1;m++){ int m1 = (m-l) - (m2-l2); if (-l1 <= m1 && m1 <= l1){ CG[start + m] = naiveCG_cal_m1(l1,l2,l,m-l,m2-l2,logfact); //CG[start + m] = 100*l1 + 10*l2 + l + 0.1*(m1+l1) + 0.01*m2 + 0.001*m; } } } } } //================================================================================================================== __global__ void cudaCG_forward_kernel( const float* tensor, float* out_tensor, const int* taus, const int* cum_tauIn_m, const int* cum_tauMiddle_m, const double* logfact, int Lmax, int Batch_size) { int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; int L1 = (Lmax+1); int Entry_size = L1 * (Lmax+2) * (Lmax +1) / 2; if (global_threadId < Batch_size * Entry_size){ int b = global_threadId / Entry_size; int l = global_threadId % L1; int remainder_for_l = (global_threadId % Entry_size) / L1; int l1 = 0, l2 = remainder_for_l * 2; while (l1*(l1+1) <= l2){l1++;} l1 -= 1; l2 = (l2 - l1*(l1+1))/2; if (l2 <= l1 && l1 - l2 <= l && l <= l1 + l2){ int t_offset = 0; for (int templ1 = 0; templ1<l1; templ1++){ for (int templ2 = 0; templ2<=templ1; templ2++){ if (l <= templ2 + templ1 && l >= templ1- templ2){ t_offset += taus[templ1]*taus[templ2]; } } } for (int templ2 = 0; templ2<=l2; templ2++){ if (l <= templ2 + l1 && l >= l1- templ2){ t_offset += taus[l1]*taus[templ2]; } } t_offset -= taus[l1]*taus[l2]; for (int m1 = -l1; m1 <= l1; m1++){ for (int m2 = -l2; m2 <= l2; m2++){ int m = m1 + m2; if (-l <= m && m <= l){ float CGcoef = naiveCG_cal_m(l1,l2,l,m1,m2,logfact); for (int t1 = 0; t1 < taus[l1]; t1++){ for (int t2 = 0; t2 < taus[l2]; t2++){ int t = t1 * taus[l2] + t2 + t_offset; float real1 = tensor[IDX(b,l1,t1,m1+l1,0,cum_tauIn_m,Lmax)]; float imag1 = tensor[IDX(b,l1,t1,m1+l1,1,cum_tauIn_m,Lmax)]; float real2 = tensor[IDX(b,l2,t2,m2+l2,0,cum_tauIn_m,Lmax)]; float imag2 = tensor[IDX(b,l2,t2,m2+l2,1,cum_tauIn_m,Lmax)]; out_tensor[IDX(b,l,t,m+l,0,cum_tauMiddle_m,Lmax)] += (real1 * real2 - imag1 * imag2) * CGcoef; out_tensor[IDX(b,l,t,m+l,1,cum_tauMiddle_m,Lmax)] += (real1 * imag2 + real2 * imag1) * CGcoef; //out_tensor[IDX(b,l,t,m+l,0,cum_tauMiddle_m,Lmax)] = t + 0.01 * t_offset; //out_tensor[IDX(b,l,t,m+l,1,cum_tauMiddle_m,Lmax)] = m+l+0.1 * l1 + 0.01 * l2 + 0.001*l; //return; } } } } } } } } __global__ void cudaBatchNorm_forward_job( float* mid_tensor, float* moving_std, const int* tauMiddle, const int* cum_tauMiddle_m, float cnt, float eps, int Lmax, int Batch_size, int update_std){ int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; int t_offset = 0, l=0; while(l<=Lmax){ t_offset+=tauMiddle[l]; if (t_offset <= global_threadId){ l++; } else { t_offset -= tauMiddle[l]; break; } } if (l <= Lmax){ int tmid = global_threadId - t_offset; if (update_std){ //calculate mean double N = (double) Batch_size * (2*l+1); double mean = 0.; for (int b = 0; b < Batch_size; b++){ for (int m = 0; m < 2*l+1; m++){ float realm = mid_tensor[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)]; float imagm = mid_tensor[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)]; double norm = sqrt(realm*realm+imagm*imagm); mean += norm / N; //moving_std[t_offset + tmid] = norm; //return; } } //calculate std double std = 0.; for (int b = 0; b < Batch_size; b++){ for (int m = 0; m < 2*l+1; m++){ float realm = mid_tensor[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)]; float imagm = mid_tensor[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)]; double norm = sqrt(realm*realm+imagm*imagm); std += (norm - mean) * (norm - mean) / N; } } std = sqrt(std); //update std //moving_std[t_offset + tmid] = std; moving_std[t_offset + tmid] *= cnt / (cnt + 1); moving_std[t_offset + tmid] += std / (cnt + 1); } //actually performing batch norm. Note eval mode only has this, not update in as in the previous code double divisor = (eps > moving_std[t_offset + tmid]) ? eps : moving_std[t_offset + tmid]; for (int b = 0; b < Batch_size; b++){ for (int m = 0; m < 2*l+1; m++){ mid_tensor[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)] /= divisor; mid_tensor[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)] /= divisor; } } } } __global__ void cudaWeightTransform_forward_job( const float* mid_tensor, const float* weight_tensor, float* out_tensor, const int* tauMiddle, const int* cum_tauMiddle_m, const int* cum_tauOut_m, const int* cumW_tauOut_m, int Lmax, int Batch_size) { int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; if (global_threadId < Batch_size*cum_tauOut_m[Lmax+1]){ //first, loop to get l int b = global_threadId / cum_tauOut_m[Lmax+1]; int ltm = global_threadId % cum_tauOut_m[Lmax+1], l=0; while (cum_tauOut_m[l] <= ltm){l++;} l--; int tout = (ltm - cum_tauOut_m[l]) / (2*l+1); int m = (ltm - cum_tauOut_m[l]) % (2*l+1); float real=0.0, imag=0.0; for (int tmid = 0; tmid < tauMiddle[l]; tmid++){ float realw = weight_tensor[WIDX(l,tout,tmid,0,cumW_tauOut_m,tauMiddle)]; float imagw = weight_tensor[WIDX(l,tout,tmid,1,cumW_tauOut_m,tauMiddle)]; float realm = mid_tensor[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)]; float imagm = mid_tensor[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)]; real += realw * realm - imagw * imagm; imag += realw * imagm + imagw * realm; } //weight_tensor[WIDX(l,tout,tmid,0,cumW_tauOut_m,tauMiddle)] = global_threadId; //weight_tensor[WIDX(l,tout,tmid,1,cumW_tauOut_m,tauMiddle)] = 300 + l + 0.1 * tout + 0.01 * tmid; out_tensor[IDX(b,l,tout,m,0,cum_tauOut_m,Lmax)] = real; out_tensor[IDX(b,l,tout,m,1,cum_tauOut_m,Lmax)] = imag; } } //==========================================================backward __global__ void cudaWeightGrad1_backward_job( const float* middle, float* grad_weight, const float* grad_out, const int* tauMiddle, const int* cum_tauMiddle_m, const int* cum_tauOut_m, const int* cumW_tauOut_m, int Lmax, int Batch_size){ int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; if (global_threadId < cumW_tauOut_m[Lmax+1]){ int l=0; while (cumW_tauOut_m[l] <= global_threadId){l++;} l--; int tout = (global_threadId - cumW_tauOut_m[l]) / tauMiddle[l]; int tmid = (global_threadId - cumW_tauOut_m[l]) % tauMiddle[l]; float real=0.0, imag=0.0; for (int b = 0; b < Batch_size; b++){ for (int m = 0; m < 2*l+1; m++){ float realo = grad_out[IDX(b,l,tout,m,0,cum_tauOut_m,Lmax)]; float imago = grad_out[IDX(b,l,tout,m,1,cum_tauOut_m,Lmax)]; float realm = middle[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)]; float imagm = middle[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)]; real += realm * realo + imagm * imago; imag += realm * imago - realo * imagm; } } grad_weight[WIDX(l,tout,tmid,0,cumW_tauOut_m,tauMiddle)] = real; grad_weight[WIDX(l,tout,tmid,1,cumW_tauOut_m,tauMiddle)] = imag; } } __global__ void cudaMiddleGrad_backward_job( float* grad_middle, const float* weight, const float* grad_out, const float* moving_std, const int* tauMiddle, const int* cum_tauMiddle_m, const int* tauOut, const int* cum_tauOut_m, const int* cumW_tauOut_m, int Lmax, int Batch_size, float eps){ int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; if (global_threadId < cum_tauMiddle_m[Lmax+1]){ int l=0; while (cum_tauMiddle_m[l] <= global_threadId){l++;} l--; int t_offset = 0; for (int templ = 0; templ <= l; templ++){ t_offset += tauMiddle[templ]; } t_offset -= tauMiddle[l]; int tm = global_threadId - cum_tauMiddle_m[l]; int tmid = tm / (2*l+1), m = tm % (2*l+1); float divisor = (eps > moving_std[t_offset+tmid]) ? eps : moving_std[t_offset+tmid]; //divisor = divisor * divisor; for (int b = 0; b < Batch_size; b++){ float real=0.0, imag=0.0; for (int tout = 0; tout < tauOut[l]; tout++){ float realo = grad_out[IDX(b,l,tout,m,0,cum_tauOut_m,Lmax)]; float imago = grad_out[IDX(b,l,tout,m,1,cum_tauOut_m,Lmax)]; float realw = weight[WIDX(l,tout,tmid,0,cumW_tauOut_m,tauMiddle)]; float imagw = weight[WIDX(l,tout,tmid,1,cumW_tauOut_m,tauMiddle)]; real += realw * realo + imagw * imago; imag += realw * imago - realo * imagw; } //grad_middle[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)] = real; //grad_middle[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)] = imag; grad_middle[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)] = real / divisor; grad_middle[IDX(b,l,tmid,m,1,cum_tauMiddle_m,Lmax)] = imag / divisor; //grad_middle[IDX(b,l,tmid,m,0,cum_tauMiddle_m,Lmax)] = b + 0.1*l + 0.01*tmid+0.001*m; } //moving_std[t_offset+tmid] = t_offset + 0.1*tmid; } } __global__ void cudaCG_backward_kernel( const float* tensor, float* g_in, const float* g_out, const int* taus, const int* cum_taus, const int* cum_new_taus, const float* CG, int Lmax, int Batch_size) { int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; if (global_threadId < Batch_size * cum_taus[Lmax+1]){ int b = global_threadId / cum_taus[Lmax + 1]; int ltm1 = global_threadId % cum_taus[Lmax + 1]; int l1 = 0; while (cum_taus[l1]<=ltm1) { l1++; } l1 -= 1; int tm1 = ltm1 - cum_taus[l1]; int t1 = tm1 / (2*l1+1); int m1 = tm1 % (2*l1+1); //m1 -= l1; int l2 = 0, m2 = 0, t2 = 0; float real1=0, imag1=0; for (l2 = 0; l2 <= l1; l2++){ for (int l = l1 - l2; l <= Lmax && l <= l1 + l2; l++){ int CG_offset=0, t_offset=0; for (int templ1=0; templ1 <= l1; templ1++){ for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){ int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1; for (int templ=low; templ<=high ; templ++){ CG_offset += (2*templ2+1)*(2*templ+1); } if (l <= templ1 + templ2 && l >= templ1 - templ2){ t_offset += taus[templ1]*taus[templ2]; } } } for (int templ = l1-l2; templ<l; templ++){ CG_offset += (2*l2+1)*(templ*2+1); } for (m2 = 0; m2 < 2*l2+1; m2++){ for (int m = 0; m < 2*l+1; m++){ if (m1-l1 + m2-l2 == m-l){ float CGcoef = CG[CG_offset+(2*l+1)*m2+m]; for (t2 = 0; t2 < taus[l2]; t2++){ int t = taus[l2] * t1 + t2 + t_offset; float real = g_out[IDX(b,l,t,m,0,cum_new_taus,Lmax)]; float imag = g_out[IDX(b,l,t,m,1,cum_new_taus,Lmax)]; float real2 = tensor[IDX(b,l2,t2,m2,0,cum_taus,Lmax)]; float imag2 = tensor[IDX(b,l2,t2,m2,1,cum_taus,Lmax)]; real1 += (real * real2 + imag * imag2) * CGcoef; imag1 += (real2 * imag - real * imag2) * CGcoef; } } } } } } //Now switching to treat l1 as a "l2" l2 = l1; t2 = t1; m2 = m1; for (l1 = l2; l1 <= Lmax; l1++){ for (int l = l1 - l2; l <= Lmax && l <= l1 + l2; l++){ int CG_offset=0, t_offset=0; for (int templ1=0; templ1 <= l1; templ1++){ for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){ int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1; for (int templ=low; templ<=high ; templ++){ CG_offset += (2*templ2+1)*(2*templ+1); } if (l <= templ1 + templ2 && l >= templ1 - templ2){ t_offset += taus[templ1]*taus[templ2]; } } } for (int templ = l1-l2; templ<l; templ++){ CG_offset += (2*l2+1)*(templ*2+1); } for (m1 = 0; m1 < 2*l1+1; m1++){ for (int m = 0; m < 2*l+1; m++){ if (m1-l1 + m2-l2 == m-l){ float CGcoef = CG[CG_offset+(2*l+1)*m2+m]; for (t1 = 0; t1 < taus[l1]; t1++){ int t = taus[l2] * t1 + t2 + t_offset; float real = g_out[IDX(b,l,t,m,0,cum_new_taus,Lmax)]; float imag = g_out[IDX(b,l,t,m,1,cum_new_taus,Lmax)]; //This time we need to access l1 t1 and m1 float real2 = tensor[IDX(b,l1,t1,m1,0,cum_taus,Lmax)]; float imag2 = tensor[IDX(b,l1,t1,m1,1,cum_taus,Lmax)]; real1 += (real * real2 + imag * imag2) * CGcoef; imag1 += (real2 * imag - real * imag2) * CGcoef; } } } } } } g_in[global_threadId*2] = real1; g_in[global_threadId*2+1] = imag1; } } } // namespace void print_arr(int* v, int l){ printf("vector: ("); for (int i = 0; i < l; i++){ printf("%d, ", v[i]); } printf(")\n"); return; } int* _get_cum_tau(int* taus, int L){ int* cum_tau = (int*) malloc((L+2)*sizeof(int)); cum_tau[0] = 0; for (int l = 0; l <= L; l++){ cum_tau[l+1] = cum_tau[l] + (2 * l + 1) * taus[l]; } return cum_tau; } //===================================================================================== void UpBatchNorm_forward_cuda( torch::Tensor input_tensor, torch::Tensor middle_tensor, torch::Tensor output_tensor, torch::Tensor weight_tensor, int L, int B, torch::Tensor tauIn_tensor, torch::Tensor tauOut_tensor, torch::Tensor moving_std_tensor, float cnt, float eps, int update_std){ float* F = input_tensor.data<float>(); float* middle = middle_tensor.data<float>(); int* tauIn = tauIn_tensor.data<int>(); //printf("len(taus) = %d\n", taus_tensor.size(0)); int* tauMiddle = (int*) calloc(L+1, sizeof(int)); for (int l1 = 0; l1 <= L; l1++){ for (int l2 = 0; l2 <= l1; l2++){ for (int l = l1-l2; l <=L && l <= l1 + l2; l++){ tauMiddle[l] += tauIn[l1] * tauIn[l2]; } } } int* cum_tauIn_m = _get_cum_tau(tauIn, L); int* cum_tauMiddle_m = _get_cum_tau(tauMiddle, L); int* tauOut = tauOut_tensor.data<int>(); int* cum_tauOut_m = _get_cum_tau(tauOut, L); int* cumW_tauOut_m = (int*) malloc((L+2)*sizeof(int)); cumW_tauOut_m[0] = 0; for (int l = 0; l <= L; l++){ cumW_tauOut_m[l+1] = cumW_tauOut_m[l] + tauOut[l] * tauMiddle[l]; } int *cuda_tauIn, *cuda_cum_tauIn_m; int *cuda_tauMiddle, *cuda_cum_tauMiddle_m; int *cuda_tauOut, *cuda_cum_tauOut_m, *cuda_cumW_tauOut_m; cudaMalloc((void**) &cuda_tauIn, (L+1)*sizeof(int)); cudaMalloc((void**) &cuda_cum_tauIn_m, (L+2)*sizeof(int)); cudaMalloc((void**) &cuda_tauMiddle, (L+1)*sizeof(int)); cudaMalloc((void**) &cuda_cum_tauMiddle_m, (L+2)*sizeof(int)); cudaMalloc((void**) &cuda_tauOut, (L+1)*sizeof(int)); cudaMalloc((void**) &cuda_cum_tauOut_m, (L+2)*sizeof(int)); cudaMalloc((void**) &cuda_cumW_tauOut_m, (L+2)*sizeof(int)); cudaMemcpy(cuda_tauIn, tauIn, (L+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_cum_tauIn_m, cum_tauIn_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_tauMiddle, tauMiddle, (L+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_cum_tauMiddle_m, cum_tauMiddle_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_tauOut, tauOut, (L+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_cum_tauOut_m, cum_tauOut_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_cumW_tauOut_m, cumW_tauOut_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice); int size1 = B * (L+1) * (L+2) * (L+1) /2; int LOGFACT_SIZE=5*L+20; double* logfact = (double*) calloc(LOGFACT_SIZE, sizeof(double)); for (int i = 2; i < LOGFACT_SIZE; i++){ logfact[i] = logfact[i-1] + log((double) i); } double* cuda_logfact; cudaMalloc((void**) &cuda_logfact, LOGFACT_SIZE*sizeof(double)); cudaMemcpy(cuda_logfact, logfact, LOGFACT_SIZE*sizeof(double), cudaMemcpyHostToDevice); cudaCG_forward_kernel<<<cuda_gridsize(size1), NUM_THREADS, 0>>>( F, middle, cuda_tauIn, cuda_cum_tauIn_m, cuda_cum_tauMiddle_m, cuda_logfact, L, B); cudaThreadSynchronize(); //return; //Step 2, batch normalization int size2 = 0; for (int templ = 0; templ <= L; templ++){ size2 += tauMiddle[templ]; } //printf("Step 2 Need %d threads \n", size2); //printf("This is the %f-th update %d\n", cnt, update_std); float* moving_std = moving_std_tensor.data<float>(); cudaBatchNorm_forward_job<<<cuda_gridsize(size2), NUM_THREADS, 0>>>( middle, moving_std, cuda_tauMiddle, cuda_cum_tauMiddle_m, cnt, eps, L, B, update_std); cudaThreadSynchronize(); //Step 3 is weight transform float* out = output_tensor.data<float>(); float* weights = weight_tensor.data<float>(); int size3 = B * cum_tauOut_m[L+1]; //printf("Step 3 Need %d threads \n", size3); cudaWeightTransform_forward_job<<<cuda_gridsize(size3), NUM_THREADS, 0>>>( middle, weights,out, cuda_tauMiddle, cuda_cum_tauMiddle_m, cuda_cum_tauOut_m, cuda_cumW_tauOut_m, L, B); cudaThreadSynchronize(); cudaFree(cuda_logfact); cudaFree(cuda_tauIn); cudaFree(cuda_cum_tauIn_m); cudaFree(cuda_tauMiddle); cudaFree(cuda_cum_tauMiddle_m); cudaFree(cuda_tauOut); cudaFree(cuda_cumW_tauOut_m); cudaFree(cuda_cum_tauOut_m); free(cum_tauIn_m); free(tauMiddle); free(cum_tauMiddle_m); free(cum_tauOut_m); free(cumW_tauOut_m); free(logfact); } void UpBatchNorm_backward_cuda( torch::Tensor weight_tensor, torch::Tensor input_tensor, torch::Tensor grad_in_tensor, torch::Tensor grad_weight_tensor, torch::Tensor grad_middle_tensor, torch::Tensor grad_out_tensor, torch::Tensor CG_tensor, int L, int B, torch::Tensor tauIn_tensor, torch::Tensor tauOut_tensor, torch::Tensor moving_std_tensor, float eps){ float* Fin = input_tensor.data<float>(); int* tauIn = tauIn_tensor.data<int>(); int* cum_tauIn_m = _get_cum_tau(tauIn, L); int* tauMiddle = (int*) calloc(L+1, sizeof(int)); for (int l1 = 0; l1 <= L; l1++){ for (int l2 = 0; l2 <= l1; l2++){ for (int l = l1-l2; l <=L && l <= l1 + l2; l++){ tauMiddle[l] += tauIn[l1] * tauIn[l2]; } } } int* cum_tauMiddle_m = _get_cum_tau(tauMiddle, L); int* tauOut = tauOut_tensor.data<int>(); int* cum_tauOut_m = _get_cum_tau(tauOut, L); int* cumW_tauOut_m = (int*) malloc((L+2)*sizeof(int)); cumW_tauOut_m[0] = 0; for (int l = 0; l <= L; l++){ cumW_tauOut_m[l+1] = cumW_tauOut_m[l] + tauOut[l] * tauMiddle[l]; } int *cuda_tauIn, *cuda_cum_tauIn_m; int *cuda_tauMiddle, *cuda_cum_tauMiddle_m; int *cuda_tauOut, *cuda_cum_tauOut_m, *cuda_cumW_tauOut_m; cudaMalloc((void**) &cuda_tauIn, (L+1)*sizeof(int)); cudaMalloc((void**) &cuda_cum_tauIn_m, (L+2)*sizeof(int)); cudaMalloc((void**) &cuda_tauMiddle, (L+1)*sizeof(int)); cudaMalloc((void**) &cuda_cum_tauMiddle_m, (L+2)*sizeof(int)); cudaMalloc((void**) &cuda_tauOut, (L+1)*sizeof(int)); cudaMalloc((void**) &cuda_cum_tauOut_m, (L+2)*sizeof(int)); cudaMalloc((void**) &cuda_cumW_tauOut_m, (L+2)*sizeof(int)); cudaMemcpy(cuda_tauIn, tauIn, (L+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_cum_tauIn_m, cum_tauIn_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_tauMiddle, tauMiddle, (L+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_cum_tauMiddle_m, cum_tauMiddle_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_tauOut, tauOut, (L+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_cum_tauOut_m, cum_tauOut_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_cumW_tauOut_m, cumW_tauOut_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice); float* grad_out = grad_out_tensor.data<float>(); float* grad_weight = grad_weight_tensor.data<float>(); //Prep for CG int LOGFACT_SIZE=5*L+20; double* logfact = (double*) calloc(LOGFACT_SIZE, sizeof(double)); for (int i = 2; i < LOGFACT_SIZE; i++){ logfact[i] = logfact[i-1] + log((double) i); } double* cuda_logfact; cudaMalloc((void**) &cuda_logfact, LOGFACT_SIZE*sizeof(double)); cudaMemcpy(cuda_logfact, logfact, LOGFACT_SIZE*sizeof(double), cudaMemcpyHostToDevice); struct timeval t1, t2; double elapsedTime; float* CG = CG_tensor.data<float>(); gettimeofday(&t1, NULL); int size0 = (L+1)*(L+1)*(L+1)*(2*L+1); cudaprecomputeCG_job<<<cuda_gridsize(size0), NUM_THREADS, 0>>>( CG, cuda_logfact, L, B); cudaThreadSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 0 Need %d threads, took %lf mili-seconds\n", size0, elapsedTime); float* grad_middle = grad_middle_tensor.data<float>(); int size1a = B * (L+1) * (L+2) * (L+1) /2; //printf("Step 1.a Need %d threads\n", size1a); gettimeofday(&t1, NULL); cudaCG_forward_kernel<<<cuda_gridsize(size1a), NUM_THREADS, 0>>>( Fin, grad_middle, cuda_tauIn, cuda_cum_tauIn_m, cuda_cum_tauMiddle_m, cuda_logfact, L, B); cudaThreadSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 1.a Need %d threads, took %lf mili-seconds\n", size1a, elapsedTime); //Step 1.b, batch normalization //int size1b = cum_tauMiddle_m[L+1]; int size1b = 0; for (int templ = 0; templ <= L; templ++){ size1b += tauMiddle[templ]; } //printf("Step 1.b Need %d threads \n", size1b); float* moving_std = moving_std_tensor.data<float>(); gettimeofday(&t1, NULL); cudaBatchNorm_forward_job<<<cuda_gridsize(size1b), NUM_THREADS, 0>>>( grad_middle, moving_std, cuda_tauMiddle, cuda_cum_tauMiddle_m, 1, eps, L, B, 0); cudaThreadSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 1.b Need %d threads, took %lf mili-seconds\n", size1b, elapsedTime); //Step 1.c calculate grad on weights int size1c = cumW_tauOut_m[L+1]; //printf("Step 1.c Need %d threads \n", size1c); gettimeofday(&t1, NULL); cudaWeightGrad1_backward_job<<<cuda_gridsize(size1c), NUM_THREADS, 0>>>( grad_middle, grad_weight, grad_out, cuda_tauMiddle, cuda_cum_tauMiddle_m, cuda_cum_tauOut_m, cuda_cumW_tauOut_m, L, B); cudaThreadSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 1.c Need %d threads, took %lf mili-seconds\n", size1c, elapsedTime); //Second update the middle grad //SHOULD CLEAR grad_middle FIRST! float* weights = weight_tensor.data<float>(); int size2 = cum_tauMiddle_m[L+1];//This is the most balanced gettimeofday(&t1, NULL); cudaMiddleGrad_backward_job<<<cuda_gridsize(size2), NUM_THREADS, 0>>>( grad_middle, weights, grad_out, moving_std, cuda_tauMiddle, cuda_cum_tauMiddle_m, cuda_tauOut, cuda_cum_tauOut_m, cuda_cumW_tauOut_m, L, B, eps); cudaThreadSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 2 Need %d threads, took %lf mili-seconds\n", size2, elapsedTime); //Last, update the grad on input float* grad_in = grad_in_tensor.data<float>(); int size3 = B * cum_tauIn_m[L+1]; //printf("Step 3 Need %d threads\n", size3); gettimeofday(&t1, NULL); cudaCG_backward_kernel<<<cuda_gridsize(size3), NUM_THREADS, 0>>>( Fin, grad_in, grad_middle, cuda_tauIn, cuda_cum_tauIn_m, cuda_cum_tauMiddle_m, //cuda_logfact, CG, CG, L, B); cudaThreadSynchronize(); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms //printf("Step 3 Need %d threads, took %lf mili-seconds\n", size3, elapsedTime); gettimeofday(&t1, NULL); cudaFree(cuda_logfact); cudaFree(cuda_tauIn); cudaFree(cuda_cum_tauIn_m); cudaFree(cuda_tauMiddle); cudaFree(cuda_cum_tauMiddle_m); cudaFree(cuda_tauOut); cudaFree(cuda_cumW_tauOut_m); cudaFree(cuda_cum_tauOut_m); free(logfact); free(cum_tauIn_m); free(tauMiddle); free(cum_tauMiddle_m); free(cum_tauOut_m); free(cumW_tauOut_m); gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms }
aa9357cfb1f18e35c5f9901d8e4cb295bcd7d9df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "device_launch_parameters.h" #include "cpu_bitmap.h" #define DIM 1024 #define PI 3.1415926535897932f __global__ void kernel(unsigned char *ptr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; __shared__ float shared[16][16]; const float period = 128.0f; shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x * 2.0f * PI / period) + 1.0f) * (sinf(y * 2.0f * PI / period) + 1.0f) / 4.0f; __syncthreads(); ptr[offset * 4 + 0] = 0; ptr[offset * 4 + 1] = shared[15 - threadIdx.x][15 - threadIdx.y]; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int main(void) { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; HANDLE_ERROR(hipMalloc((void **)&dev_bitmap, bitmap.image_size())); dim3 grids(DIM / 16, DIM / 16); dim3 threads(16, 16); kernel << <grids, threads >> >(dev_bitmap); HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost)); bitmap.display_and_exit(); HANDLE_ERROR(hipFree(dev_bitmap)); return 0; }
aa9357cfb1f18e35c5f9901d8e4cb295bcd7d9df.cu
#include "common.h" #include "device_launch_parameters.h" #include "cpu_bitmap.h" #define DIM 1024 #define PI 3.1415926535897932f __global__ void kernel(unsigned char *ptr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; __shared__ float shared[16][16]; const float period = 128.0f; shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x * 2.0f * PI / period) + 1.0f) * (sinf(y * 2.0f * PI / period) + 1.0f) / 4.0f; __syncthreads(); ptr[offset * 4 + 0] = 0; ptr[offset * 4 + 1] = shared[15 - threadIdx.x][15 - threadIdx.y]; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int main(void) { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; HANDLE_ERROR(cudaMalloc((void **)&dev_bitmap, bitmap.image_size())); dim3 grids(DIM / 16, DIM / 16); dim3 threads(16, 16); kernel << <grids, threads >> >(dev_bitmap); HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost)); bitmap.display_and_exit(); HANDLE_ERROR(cudaFree(dev_bitmap)); return 0; }
646e2985acd95d418e0ddd59367c8e42620cdc8f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "out_of_bounds_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( out_of_bounds_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( out_of_bounds_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( out_of_bounds_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
646e2985acd95d418e0ddd59367c8e42620cdc8f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "out_of_bounds_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); out_of_bounds_kernel<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { out_of_bounds_kernel<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { out_of_bounds_kernel<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e7b22d91af1c3638a7e66b43c57473b9987f0be6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ftl/render/render_params.hpp> #include "splatter_cuda.hpp" #include <ftl/rgbd/camera.hpp> #include <ftl/cuda_common.hpp> #include <ftl/cuda/fixed.hpp> #include <ftl/cuda/weighting.hpp> using ftl::rgbd::Camera; using ftl::cuda::TextureObject; using ftl::render::Parameters; #define T_PER_BLOCK 8 __device__ inline float length2(int dx, int dy) { return dx*dx + dy*dy; } __device__ inline float cross(const float2 &a, const float2 &b) { return a.x*b.y - a.y*b.x; } __device__ inline bool within(float x) { return 0.0f <= x <= 1.0f; } __device__ inline bool operator==(const float2 &a, const float2 &b) { return a.x == b.x && a.y == b.y; } __device__ inline bool insideTriangle(const float2 &a, const float2 &b, const float2 &c, const float2 &p) { float det = (b.y - c.y)*(a.x - c.x) + (c.x - b.x)*(a.y - c.y); float factor_alpha = (b.y - c.y)*(p.x - c.x) + (c.x - b.x)*(p.y - c.y); float factor_beta = (c.y - a.y)*(p.x - c.x) + (a.x - c.x)*(p.y - c.y); float alpha = factor_alpha / det; float beta = factor_beta / det; float gamma = 1.0 - alpha - beta; return p == a || p == b || p == c || (within(alpha) && within(beta) && within(gamma)); } __device__ inline void swap(short2 &a, short2 &b) { short2 t = a; a = b; b = t; } __device__ void drawLine(TextureObject<int> &depth_out, int y, int x1, int x2, float d) { for (int x=x1; x<=x2; ++x) { if (x < 0) continue; if (x >= depth_out.width()) return; atomicMin(&depth_out(x,y), int(d*1000.0f)); } } /* See: https://github.com/bcrusco/CUDA-Rasterizer */ /** * Calculate the signed area of a given triangle. */ __device__ static inline float calculateSignedArea(const short2 &a, const short2 &b, const short2 &c) { return 0.5f * (float(c.x - a.x) * float(b.y - a.y) - float(b.x - a.x) * float(c.y - a.y)); } /** * Helper function for calculating barycentric coordinates. */ __device__ static inline float calculateBarycentricCoordinateValue(const short2 &a, const short2 &b, const short2 &c, const short2 (&tri)[3]) { return calculateSignedArea(a,b,c) / calculateSignedArea(tri[0], tri[1], tri[2]); } /** * Calculate barycentric coordinates. * TODO: Update to handle triangles coming in and not the array */ __device__ static float3 calculateBarycentricCoordinate(const short2 (&tri)[3], const short2 &point) { float beta = calculateBarycentricCoordinateValue(tri[0], point, tri[2], tri); float gamma = calculateBarycentricCoordinateValue(tri[0], tri[1], point, tri); float alpha = 1.0f - beta - gamma; return make_float3(alpha, beta, gamma); } /** * Check if a barycentric coordinate is within the boundaries of a triangle. */ __host__ __device__ static bool isBarycentricCoordInBounds(const float3 &barycentricCoord) { return barycentricCoord.x >= -0.0001f && //barycentricCoord.x <= 1.0f && barycentricCoord.y >= -0.0001f && //barycentricCoord.y <= 1.0f && barycentricCoord.z >= -0.0001f; // &&barycentricCoord.z <= 1.0f; } /** * For a given barycentric coordinate, compute the corresponding z position * (i.e. depth) on the triangle. */ __device__ static float getZAtCoordinate(const float3 &barycentricCoord, const float (&tri)[3]) { return (barycentricCoord.x * tri[0] + barycentricCoord.y * tri[1] + barycentricCoord.z * tri[2]); } /** * Loop over rectangular region covering the triangle and test each pixel for * being inside or outside (using bary centric coordinate method). If inside * then atomically write to the depth map. */ __device__ void drawTriangle(const float (&d)[3], const short2 (&v)[3], const Parameters &params, int* depth_out, int out_pitch4) { const int minX = min(v[0].x, min(v[1].x, v[2].x)); const int minY = min(v[0].y, min(v[1].y, v[2].y)); const int maxX = max(v[0].x, max(v[1].x, v[2].x)); const int maxY = max(v[0].y, max(v[1].y, v[2].y)); // Remove really large triangles if ((maxX - minX) * (maxY - minY) <= params.triangle_limit) { // TODO: Verify that < is correct, was <= before but < is faster. for (int sy=minY; sy < maxY; ++sy) { for (int sx=minX; sx < maxX; ++sx) { //if () continue; float3 baryCentricCoordinate = calculateBarycentricCoordinate(v, make_short2(sx, sy)); if (sx < params.camera.width && sx >= 0 && sy < params.camera.height && sy >= 0 && isBarycentricCoordInBounds(baryCentricCoordinate)) { float new_depth = getZAtCoordinate(baryCentricCoordinate, d); atomicMin(&depth_out[sx+sy*out_pitch4], int(new_depth*100000.0f)); } } } } } /** * This selects which triangles are drawn. It assumes that discontinuities * have already been removed such that the screen coordinate alone acts to * indicate a valid or invalid point. */ __device__ inline bool isValidTriangle(const short2 (&v)[3]) { return v[1].x < 30000 && v[2].x < 30000; } /** * Read the other two verticies into memory. The template parameters determine * which verticies to load. */ template <int A, int B> __device__ bool loadTriangle(int x, int y, float (&d)[3], short2 (&v)[3], const short* __restrict__ depth_in, const short2* __restrict__ screen, int pitch4, int pitch2) { d[1] = fixed2float<10>(depth_in[y*pitch2+x+A]); d[2] = fixed2float<10>(depth_in[(y+B)*pitch2+x]); v[1] = screen[y*pitch4+x+A]; v[2] = screen[(y+B)*pitch4+x]; return isValidTriangle(v); } /* * Convert source screen position to output screen coordinates. */ __global__ void triangle_render_kernel( const short* __restrict__ depth_in, int* depth_out, const short2* __restrict__ screen, int width, int height, int pitch2, int pitch4, int out_pitch4, Parameters params) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= 1 && x < width-1 && y >= 1 && y < height-1) { float d[3]; d[0] = fixed2float<10>(depth_in[y*pitch2+x]); short2 v[3]; v[0] = screen[y*pitch4+x]; if (v[0].x < 30000) { // Calculate discontinuity threshold. //const float threshold = (params.depthCoef / ((params.depthCoef / d[0]) - params.disconDisparities)) - d[0]; // Draw (optionally) 4 triangles as a diamond pattern around the central point. if (loadTriangle<1,1>(x, y, d, v, depth_in, screen, pitch4, pitch2)) drawTriangle(d, v, params, depth_out, out_pitch4); if (loadTriangle<1,-1>(x, y, d, v, depth_in, screen, pitch4, pitch2)) drawTriangle(d, v, params, depth_out, out_pitch4); if (loadTriangle<-1,1>(x, y, d, v, depth_in, screen, pitch4, pitch2)) drawTriangle(d, v, params, depth_out, out_pitch4); if (loadTriangle<-1,-1>(x, y, d, v, depth_in, screen, pitch4, pitch2)) drawTriangle(d, v, params, depth_out, out_pitch4); } } } void ftl::cuda::triangle_render1(const cv::cuda::GpuMat &depth_in, cv::cuda::GpuMat &depth_out, const cv::cuda::GpuMat &screen, const Parameters &params, hipStream_t stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth_in.cols + THREADS_X - 1)/THREADS_X, (depth_in.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); depth_out.create(params.camera.height, params.camera.width, CV_32S); hipLaunchKernelGGL(( triangle_render_kernel), dim3(gridSize), dim3(blockSize), 0, stream, depth_in.ptr<short>(), depth_out.ptr<int>(), screen.ptr<short2>(), depth_in.cols, depth_in.rows, depth_in.step1(), screen.step1()/2, depth_out.step1(), params); cudaSafeCall( hipGetLastError() ); } // ==== Merge convert =========== __global__ void merge_convert_kernel( TextureObject<int> depth_in, TextureObject<float> depth_out, float alpha) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < 0 || x >= depth_in.width() || y < 0 || y >= depth_in.height()) return; int d = depth_in.tex2D(x,y); float a = float(d)*alpha; float b = depth_out.tex2D(x,y); depth_out(x,y) = min(a,b); } void ftl::cuda::merge_convert_depth(TextureObject<int> &depth_in, TextureObject<float> &depth_out, float alpha, hipStream_t stream) { const dim3 gridSize((depth_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); hipLaunchKernelGGL(( merge_convert_kernel), dim3(gridSize), dim3(blockSize), 0, stream, depth_in, depth_out, alpha); cudaSafeCall( hipGetLastError() ); } // ==== BLENDER ======== /* * Merge two depth maps together */ __global__ void mesh_blender_simple_kernel( TextureObject<int> depth_in, TextureObject<int> depth_out, ftl::rgbd::Camera camera, float alpha) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < 0 || x >= depth_in.width() || y < 0 || y >= depth_in.height()) return; int a = depth_in.tex2D(x,y); int b = depth_out.tex2D(x,y); float mindepth = (float)min(a,b) / 100000.0f; float maxdepth = (float)max(a,b) / 100000.0f; float weight = ftl::cuda::weighting(maxdepth-mindepth, alpha); //depth_out(x,y) = (int)(((float)mindepth + (float)maxdepth*weight) / (1.0f + weight) * 100000.0f); float depth = (mindepth + maxdepth*weight) / (1.0f + weight); depth_out(x,y) = (int)(depth * 100000.0f); } __global__ void mesh_blender_kernel( TextureObject<int> depth_in, TextureObject<float> depth_out, TextureObject<short> weights_in, TextureObject<float> weights_out, ftl::render::Parameters params, ftl::rgbd::Camera camera, float4x4 transform, float alpha) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; const float d1 = float(depth_in.tex2D((int)x, (int)y)) / 100000.0f; const float d2 = depth_out.tex2D((int)x, (int)y); const float wout = weights_out.tex2D((int)x, (int)y); if (d1 > params.camera.minDepth && d1 < params.camera.maxDepth) { //const uint2 rpt = convertScreen<VPMODE>(params, x, y); const float3 camPos = transform * params.camera.screenToCam(x, y, d1); if (camPos.z > camera.minDepth && camPos.z < camera.maxDepth) { const float2 screenPos = camera.camToScreen<float2>(camPos); // Not on screen so stop now... if (screenPos.x < weights_in.width() && screenPos.y < weights_in.height()) { const float win = float(weights_in.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f))); if (d1 < d2/wout - alpha || (fabsf(d2/wout - d1) < alpha && win > wout)) { depth_out(x,y) = d1 * win; weights_out(x,y) = win; } //else if (fabsf(d2/wout - d1) < alpha) { //depth_out(x,y) = d2 + d1 * win; //weights_out(x,y) = wout + win; //depth_out(x,y) = (win > wout) ? d1*win : d2; //weights_out(x,y) = (win > wout) ? win : wout; //} } } } } void ftl::cuda::mesh_blender(TextureObject<int> &depth_in, TextureObject<int> &depth_out, const ftl::rgbd::Camera &camera, float alpha, hipStream_t stream) { const dim3 gridSize((depth_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); hipLaunchKernelGGL(( mesh_blender_simple_kernel), dim3(gridSize), dim3(blockSize), 0, stream, depth_in, depth_out, camera, alpha); cudaSafeCall( hipGetLastError() ); } void ftl::cuda::mesh_blender(TextureObject<int> &depth_in, TextureObject<float> &depth_out, TextureObject<short> &weights_in, TextureObject<float> &weights_out, const ftl::render::Parameters &params, const ftl::rgbd::Camera &camera, const float4x4 &transform, float alpha, hipStream_t stream) { const dim3 gridSize((depth_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); hipLaunchKernelGGL(( mesh_blender_kernel), dim3(gridSize), dim3(blockSize), 0, stream, depth_in, depth_out, weights_in, weights_out, params, camera, transform, alpha); cudaSafeCall( hipGetLastError() ); }
e7b22d91af1c3638a7e66b43c57473b9987f0be6.cu
#include <ftl/render/render_params.hpp> #include "splatter_cuda.hpp" #include <ftl/rgbd/camera.hpp> #include <ftl/cuda_common.hpp> #include <ftl/cuda/fixed.hpp> #include <ftl/cuda/weighting.hpp> using ftl::rgbd::Camera; using ftl::cuda::TextureObject; using ftl::render::Parameters; #define T_PER_BLOCK 8 __device__ inline float length2(int dx, int dy) { return dx*dx + dy*dy; } __device__ inline float cross(const float2 &a, const float2 &b) { return a.x*b.y - a.y*b.x; } __device__ inline bool within(float x) { return 0.0f <= x <= 1.0f; } __device__ inline bool operator==(const float2 &a, const float2 &b) { return a.x == b.x && a.y == b.y; } __device__ inline bool insideTriangle(const float2 &a, const float2 &b, const float2 &c, const float2 &p) { float det = (b.y - c.y)*(a.x - c.x) + (c.x - b.x)*(a.y - c.y); float factor_alpha = (b.y - c.y)*(p.x - c.x) + (c.x - b.x)*(p.y - c.y); float factor_beta = (c.y - a.y)*(p.x - c.x) + (a.x - c.x)*(p.y - c.y); float alpha = factor_alpha / det; float beta = factor_beta / det; float gamma = 1.0 - alpha - beta; return p == a || p == b || p == c || (within(alpha) && within(beta) && within(gamma)); } __device__ inline void swap(short2 &a, short2 &b) { short2 t = a; a = b; b = t; } __device__ void drawLine(TextureObject<int> &depth_out, int y, int x1, int x2, float d) { for (int x=x1; x<=x2; ++x) { if (x < 0) continue; if (x >= depth_out.width()) return; atomicMin(&depth_out(x,y), int(d*1000.0f)); } } /* See: https://github.com/bcrusco/CUDA-Rasterizer */ /** * Calculate the signed area of a given triangle. */ __device__ static inline float calculateSignedArea(const short2 &a, const short2 &b, const short2 &c) { return 0.5f * (float(c.x - a.x) * float(b.y - a.y) - float(b.x - a.x) * float(c.y - a.y)); } /** * Helper function for calculating barycentric coordinates. */ __device__ static inline float calculateBarycentricCoordinateValue(const short2 &a, const short2 &b, const short2 &c, const short2 (&tri)[3]) { return calculateSignedArea(a,b,c) / calculateSignedArea(tri[0], tri[1], tri[2]); } /** * Calculate barycentric coordinates. * TODO: Update to handle triangles coming in and not the array */ __device__ static float3 calculateBarycentricCoordinate(const short2 (&tri)[3], const short2 &point) { float beta = calculateBarycentricCoordinateValue(tri[0], point, tri[2], tri); float gamma = calculateBarycentricCoordinateValue(tri[0], tri[1], point, tri); float alpha = 1.0f - beta - gamma; return make_float3(alpha, beta, gamma); } /** * Check if a barycentric coordinate is within the boundaries of a triangle. */ __host__ __device__ static bool isBarycentricCoordInBounds(const float3 &barycentricCoord) { return barycentricCoord.x >= -0.0001f && //barycentricCoord.x <= 1.0f && barycentricCoord.y >= -0.0001f && //barycentricCoord.y <= 1.0f && barycentricCoord.z >= -0.0001f; // &&barycentricCoord.z <= 1.0f; } /** * For a given barycentric coordinate, compute the corresponding z position * (i.e. depth) on the triangle. */ __device__ static float getZAtCoordinate(const float3 &barycentricCoord, const float (&tri)[3]) { return (barycentricCoord.x * tri[0] + barycentricCoord.y * tri[1] + barycentricCoord.z * tri[2]); } /** * Loop over rectangular region covering the triangle and test each pixel for * being inside or outside (using bary centric coordinate method). If inside * then atomically write to the depth map. */ __device__ void drawTriangle(const float (&d)[3], const short2 (&v)[3], const Parameters &params, int* depth_out, int out_pitch4) { const int minX = min(v[0].x, min(v[1].x, v[2].x)); const int minY = min(v[0].y, min(v[1].y, v[2].y)); const int maxX = max(v[0].x, max(v[1].x, v[2].x)); const int maxY = max(v[0].y, max(v[1].y, v[2].y)); // Remove really large triangles if ((maxX - minX) * (maxY - minY) <= params.triangle_limit) { // TODO: Verify that < is correct, was <= before but < is faster. for (int sy=minY; sy < maxY; ++sy) { for (int sx=minX; sx < maxX; ++sx) { //if () continue; float3 baryCentricCoordinate = calculateBarycentricCoordinate(v, make_short2(sx, sy)); if (sx < params.camera.width && sx >= 0 && sy < params.camera.height && sy >= 0 && isBarycentricCoordInBounds(baryCentricCoordinate)) { float new_depth = getZAtCoordinate(baryCentricCoordinate, d); atomicMin(&depth_out[sx+sy*out_pitch4], int(new_depth*100000.0f)); } } } } } /** * This selects which triangles are drawn. It assumes that discontinuities * have already been removed such that the screen coordinate alone acts to * indicate a valid or invalid point. */ __device__ inline bool isValidTriangle(const short2 (&v)[3]) { return v[1].x < 30000 && v[2].x < 30000; } /** * Read the other two verticies into memory. The template parameters determine * which verticies to load. */ template <int A, int B> __device__ bool loadTriangle(int x, int y, float (&d)[3], short2 (&v)[3], const short* __restrict__ depth_in, const short2* __restrict__ screen, int pitch4, int pitch2) { d[1] = fixed2float<10>(depth_in[y*pitch2+x+A]); d[2] = fixed2float<10>(depth_in[(y+B)*pitch2+x]); v[1] = screen[y*pitch4+x+A]; v[2] = screen[(y+B)*pitch4+x]; return isValidTriangle(v); } /* * Convert source screen position to output screen coordinates. */ __global__ void triangle_render_kernel( const short* __restrict__ depth_in, int* depth_out, const short2* __restrict__ screen, int width, int height, int pitch2, int pitch4, int out_pitch4, Parameters params) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= 1 && x < width-1 && y >= 1 && y < height-1) { float d[3]; d[0] = fixed2float<10>(depth_in[y*pitch2+x]); short2 v[3]; v[0] = screen[y*pitch4+x]; if (v[0].x < 30000) { // Calculate discontinuity threshold. //const float threshold = (params.depthCoef / ((params.depthCoef / d[0]) - params.disconDisparities)) - d[0]; // Draw (optionally) 4 triangles as a diamond pattern around the central point. if (loadTriangle<1,1>(x, y, d, v, depth_in, screen, pitch4, pitch2)) drawTriangle(d, v, params, depth_out, out_pitch4); if (loadTriangle<1,-1>(x, y, d, v, depth_in, screen, pitch4, pitch2)) drawTriangle(d, v, params, depth_out, out_pitch4); if (loadTriangle<-1,1>(x, y, d, v, depth_in, screen, pitch4, pitch2)) drawTriangle(d, v, params, depth_out, out_pitch4); if (loadTriangle<-1,-1>(x, y, d, v, depth_in, screen, pitch4, pitch2)) drawTriangle(d, v, params, depth_out, out_pitch4); } } } void ftl::cuda::triangle_render1(const cv::cuda::GpuMat &depth_in, cv::cuda::GpuMat &depth_out, const cv::cuda::GpuMat &screen, const Parameters &params, cudaStream_t stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth_in.cols + THREADS_X - 1)/THREADS_X, (depth_in.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); depth_out.create(params.camera.height, params.camera.width, CV_32S); triangle_render_kernel<<<gridSize, blockSize, 0, stream>>>(depth_in.ptr<short>(), depth_out.ptr<int>(), screen.ptr<short2>(), depth_in.cols, depth_in.rows, depth_in.step1(), screen.step1()/2, depth_out.step1(), params); cudaSafeCall( cudaGetLastError() ); } // ==== Merge convert =========== __global__ void merge_convert_kernel( TextureObject<int> depth_in, TextureObject<float> depth_out, float alpha) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < 0 || x >= depth_in.width() || y < 0 || y >= depth_in.height()) return; int d = depth_in.tex2D(x,y); float a = float(d)*alpha; float b = depth_out.tex2D(x,y); depth_out(x,y) = min(a,b); } void ftl::cuda::merge_convert_depth(TextureObject<int> &depth_in, TextureObject<float> &depth_out, float alpha, cudaStream_t stream) { const dim3 gridSize((depth_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); merge_convert_kernel<<<gridSize, blockSize, 0, stream>>>(depth_in, depth_out, alpha); cudaSafeCall( cudaGetLastError() ); } // ==== BLENDER ======== /* * Merge two depth maps together */ __global__ void mesh_blender_simple_kernel( TextureObject<int> depth_in, TextureObject<int> depth_out, ftl::rgbd::Camera camera, float alpha) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < 0 || x >= depth_in.width() || y < 0 || y >= depth_in.height()) return; int a = depth_in.tex2D(x,y); int b = depth_out.tex2D(x,y); float mindepth = (float)min(a,b) / 100000.0f; float maxdepth = (float)max(a,b) / 100000.0f; float weight = ftl::cuda::weighting(maxdepth-mindepth, alpha); //depth_out(x,y) = (int)(((float)mindepth + (float)maxdepth*weight) / (1.0f + weight) * 100000.0f); float depth = (mindepth + maxdepth*weight) / (1.0f + weight); depth_out(x,y) = (int)(depth * 100000.0f); } __global__ void mesh_blender_kernel( TextureObject<int> depth_in, TextureObject<float> depth_out, TextureObject<short> weights_in, TextureObject<float> weights_out, ftl::render::Parameters params, ftl::rgbd::Camera camera, float4x4 transform, float alpha) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; const float d1 = float(depth_in.tex2D((int)x, (int)y)) / 100000.0f; const float d2 = depth_out.tex2D((int)x, (int)y); const float wout = weights_out.tex2D((int)x, (int)y); if (d1 > params.camera.minDepth && d1 < params.camera.maxDepth) { //const uint2 rpt = convertScreen<VPMODE>(params, x, y); const float3 camPos = transform * params.camera.screenToCam(x, y, d1); if (camPos.z > camera.minDepth && camPos.z < camera.maxDepth) { const float2 screenPos = camera.camToScreen<float2>(camPos); // Not on screen so stop now... if (screenPos.x < weights_in.width() && screenPos.y < weights_in.height()) { const float win = float(weights_in.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f))); if (d1 < d2/wout - alpha || (fabsf(d2/wout - d1) < alpha && win > wout)) { depth_out(x,y) = d1 * win; weights_out(x,y) = win; } //else if (fabsf(d2/wout - d1) < alpha) { //depth_out(x,y) = d2 + d1 * win; //weights_out(x,y) = wout + win; //depth_out(x,y) = (win > wout) ? d1*win : d2; //weights_out(x,y) = (win > wout) ? win : wout; //} } } } } void ftl::cuda::mesh_blender(TextureObject<int> &depth_in, TextureObject<int> &depth_out, const ftl::rgbd::Camera &camera, float alpha, cudaStream_t stream) { const dim3 gridSize((depth_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); mesh_blender_simple_kernel<<<gridSize, blockSize, 0, stream>>>(depth_in, depth_out, camera, alpha); cudaSafeCall( cudaGetLastError() ); } void ftl::cuda::mesh_blender(TextureObject<int> &depth_in, TextureObject<float> &depth_out, TextureObject<short> &weights_in, TextureObject<float> &weights_out, const ftl::render::Parameters &params, const ftl::rgbd::Camera &camera, const float4x4 &transform, float alpha, cudaStream_t stream) { const dim3 gridSize((depth_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); mesh_blender_kernel<<<gridSize, blockSize, 0, stream>>>(depth_in, depth_out, weights_in, weights_out, params, camera, transform, alpha); cudaSafeCall( cudaGetLastError() ); }
1370cf621ee2a42260e9de33935a69bdc05e85f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2010, Paul Furgale and Chi Hay Tong All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "GpuIntegralImage_kernel.h" namespace asrl { // convert unsigned chars into floats (scaled by 255.0f) // - second function does nothing - overloaded, but __device__ functions are inline, so no overhead __device__ void convert_dev(float & out, unsigned char in){ out = (float) in / 255.0f; } __device__ void convert_dev(float & out, float in){ out = in; } // matrix transpose operation (on the GPU) template <typename T> __global__ void transpose_kernel(float *odata, size_t o_pitch, T *idata, size_t i_pitch, size_t width, size_t height) { __shared__ float block[ASRL_TRANSPOSE_BLOCK_DIM][ASRL_TRANSPOSE_BLOCK_DIM+1]; // read the matrix tile into shared memory unsigned int xBlock = __mul24(blockDim.x, blockIdx.x); unsigned int yBlock = __mul24(blockDim.y, blockIdx.y); unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; if ((xIndex < width) && (yIndex < height)) { // load block into shared memory unsigned int index_in = __mul24(i_pitch, yIndex) + xIndex; // where from in data convert_dev(block[threadIdx.y][threadIdx.x], idata[index_in]); // convert to float (if not already) } __syncthreads(); // write it back to global memory xIndex = yBlock + threadIdx.x; yIndex = xBlock + threadIdx.y; if ((xIndex < height) && (yIndex < width)) { unsigned int index_out = __mul24(o_pitch, yIndex) + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } void run_transpose_kernel_uchar(dim3 grid, dim3 block, float *odata, size_t o_pitch, unsigned char *idata, size_t i_pitch, size_t width, size_t height) { hipLaunchKernelGGL(( transpose_kernel<unsigned char>) , dim3(grid), dim3(block), 0, 0, odata, o_pitch, idata, i_pitch, width, height); } void run_transpose_kernel_float(dim3 grid, dim3 block, float *odata, size_t o_pitch, float *idata, size_t i_pitch, size_t width, size_t height) { hipLaunchKernelGGL(( transpose_kernel<float>) , dim3(grid), dim3(block), 0, 0, odata, o_pitch, idata, i_pitch, width, height); } } // namespace asrl
1370cf621ee2a42260e9de33935a69bdc05e85f1.cu
/* Copyright (c) 2010, Paul Furgale and Chi Hay Tong All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "GpuIntegralImage_kernel.h" namespace asrl { // convert unsigned chars into floats (scaled by 255.0f) // - second function does nothing - overloaded, but __device__ functions are inline, so no overhead __device__ void convert_dev(float & out, unsigned char in){ out = (float) in / 255.0f; } __device__ void convert_dev(float & out, float in){ out = in; } // matrix transpose operation (on the GPU) template <typename T> __global__ void transpose_kernel(float *odata, size_t o_pitch, T *idata, size_t i_pitch, size_t width, size_t height) { __shared__ float block[ASRL_TRANSPOSE_BLOCK_DIM][ASRL_TRANSPOSE_BLOCK_DIM+1]; // read the matrix tile into shared memory unsigned int xBlock = __mul24(blockDim.x, blockIdx.x); unsigned int yBlock = __mul24(blockDim.y, blockIdx.y); unsigned int xIndex = xBlock + threadIdx.x; unsigned int yIndex = yBlock + threadIdx.y; if ((xIndex < width) && (yIndex < height)) { // load block into shared memory unsigned int index_in = __mul24(i_pitch, yIndex) + xIndex; // where from in data convert_dev(block[threadIdx.y][threadIdx.x], idata[index_in]); // convert to float (if not already) } __syncthreads(); // write it back to global memory xIndex = yBlock + threadIdx.x; yIndex = xBlock + threadIdx.y; if ((xIndex < height) && (yIndex < width)) { unsigned int index_out = __mul24(o_pitch, yIndex) + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } void run_transpose_kernel_uchar(dim3 grid, dim3 block, float *odata, size_t o_pitch, unsigned char *idata, size_t i_pitch, size_t width, size_t height) { transpose_kernel<unsigned char> <<< grid, block, 0>>> (odata, o_pitch, idata, i_pitch, width, height); } void run_transpose_kernel_float(dim3 grid, dim3 block, float *odata, size_t o_pitch, float *idata, size_t i_pitch, size_t width, size_t height) { transpose_kernel<float> <<< grid, block, 0>>> (odata, o_pitch, idata, i_pitch, width, height); } } // namespace asrl
77aa200897361442014d0057af286f08c02370a3.hip
// !!! This is a file automatically generated by hipify!!! //convOpti.cu /* nvcc convOpti.cu -o convOpti.exe nvcc -ccbin /home/seungbin/Desktop/llvm/llvm-install/bin/clang++ -Xcompiler ,\"-emit-llvm\",\"-std=c++11\", convOpti.cu -c -o convOpti.bc -D__STRICT_ANSI__ nvcc -ccbin /home/seungbin/Desktop/llvm/llvm-install/bin/clang++ -Xcompiler ,\"-emit-llvm\",\"-D__cplusplus=201103L\", convOpti.cu -c -o convOpti.bc -D__STRICT_ANSI__ -I./include -I./include/common -I./include/cudaconv2 -I./include/nvmatrix -I/home/seungbin/npu/tools/cudnn-6.5-linux-x64-v2 -I./include/reorder -I. -I/usr/local/cuda/include -I/home/seungbin/NVIDIA_GPU_Computing_SDK/C/common/inc -I/home/seungbin/NVIDIA_GPU_Computing_SDK/shared//inc clang++ -c -emit-llvm -I/usr/bin/local/cuda/include convOpti.cu -o convOpti.bc */ //#include <convOpti.cuh> //#include <nvmatrix.cuh> #include <string.h> #include <time.h> /* clock_t, clock, CLOCKS_PER_SEC */ #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <assert.h> #include <math.h> #include "hip/hip_runtime.h" #include <device_launch_parameters.h> #include <hip/hip_runtime_api.h> #include <hip/hip_vector_types.h> #include <host_defines.h> //#include <map> //#include <algorithm> #ifndef N_LIVING_NEURON #define N_LIVING_NEURON 2027 // it's just for compiler option #endif #ifndef GRID_DIM_X #define GRID_DIM_X 64 // it's just for compiler option #endif #define BLOCK_SIZE 32 #define REPEAT 1000 #define EXIT_MSG(s) \ do { \ printf ("%s\n", s); \ fflush (stdout); \ exit(1); \ } while (0) float * readMatrix(char * filename, int nRows, int nCols); void print_result(float* result, int mR, int nR, int real_mR, int real_nR, int isRowMajor); template <int T_numImages> __global__ void optiConvFilter(float* images, float* filters, float* targets, const int numImages, const int numFilters, //128(1), 64 const int imgSizeY, const int imgSizeX, //12, 12 const int filterSize, const int paddingStart, //5, -2 const int moduleStride, //1 const int numModulesY, const int numModulesX, //12, 12 const int imgStride, const int numImgColors, //128(1), 64 const float scaleTargets, const float scaleOutputs, //0.0, 1.0 const bool conv);//true int main() { hipError_t err; char filename_img[1024] = "image.test.data";//"image.one.data"; char filename_filter[1024] = "filter.test.data";//"filter.zero.data"; char filename_targetInit[1024] = "targetInit.test.data"; float *images_h = readMatrix(filename_img, 9216, 1); float *images_d = NULL; float *filters_h = readMatrix(filename_filter, 1600, 64); float *filters_d = NULL; err = hipMalloc((void**)&images_d, 9216*1*sizeof(float)); if (err != hipSuccess) EXIT_MSG("ERROR ~"); err = hipMemcpy(images_d, images_h, (size_t)(9216*1*sizeof(float)), hipMemcpyHostToDevice); if (err != hipSuccess) EXIT_MSG("ERROR ~"); err = hipMalloc((void**)&filters_d , 1600*64*sizeof(float)); if (err != hipSuccess) EXIT_MSG("ERROR ~"); err = hipMemcpy(filters_d, filters_h, (size_t)(1600*64*sizeof(float)), hipMemcpyHostToDevice); if (err != hipSuccess) EXIT_MSG("ERROR ~"); float* targets_h = readMatrix(filename_targetInit, 9216, 1); float* targets_d = NULL; err = hipMalloc((void**)&targets_d, 9216*1*sizeof(float)); if (err != hipSuccess) EXIT_MSG("ERROR ~"); err = hipMemcpy(targets_d, targets_h, (size_t)(9216*1*sizeof(float)), hipMemcpyHostToDevice); if (err != hipSuccess) EXIT_MSG("ERROR ~"); //~~~~~~~~~~~~~~~~~~~~~~~~~~ int imgSizeY = 12; int numModulesY = 12; int numModulesX = 12; int paddingStart = -2; int moduleStride = 1; int numImgColors = 64; int numGroups = 1; float scaleTargets = 0.0; float scaleOutput = 1.0; bool conv = true; int numFilterColors = numImgColors / numGroups; int numFilters = 64; int numModules = numModulesY * numModulesX; int numImages = 1; int imgPixels = 9216/numImgColors; int imgSizeX = imgPixels / imgSizeY; int filterModuleMult = conv ? 1 : numModules; assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 2 == 0); assert(numFilters % (16 * numGroups) == 0); assert(numImgColors % numGroups == 0); assert(9216 == imgPixels * numImgColors); assert(imgSizeY * imgSizeX == imgPixels); int numFiltersPerGroup = numFilters / numGroups; int imgStride = 1; // images does not need to be a contiguous matrix int filterPixels = 1600 / (filterModuleMult * numFilterColors); int filterSize = int(sqrt(filterPixels)); assert(filterSize * filterSize == filterPixels); assert(1600 == filterModuleMult * numFilterColors * filterPixels); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkImgBounds = numImages % (32*imgsPerThread) != 0; if (scaleTargets == 0) { ;//targets.resize(numFilters * numModules, numImages); } else { assert(9216 == numFilters * numModules); assert(1 == numImages); } assert(imgSizeY == 12 && imgSizeX == 12 && numModulesY == 12 && numModulesX == 12); assert(filterSize == 5 && paddingStart == -2); assert(numFilters == 64); assert(numImgColors == 64); assert(numGroups == 1);//sure assert(scaleTargets == 0.0);//sure assert(scaleOutput == 1.0);//sure assert(numFiltersPerGroup == 64);//sure assert(imgsPerThread == 1 && checkImgBounds == true); if(!(numImages == 1 && imgStride == 1)) EXIT_MSG("ERROR !! this should no mini version(i.e. --mini=1)"); //~~~~~~~~~~~~~~~~~~~~~~~~~~ dim3 blocks (64, 1, 1); dim3 threads(144, 1, 1); //hipFuncCachePreferNone//hipFuncCachePreferShared//hipFuncCachePreferL1 for (int i = 0; i < REPEAT; ++i) { hipFuncSetCacheConfig(optiConvFilter <1>, hipFuncCachePreferShared); hipLaunchKernelGGL(( optiConvFilter <1>), dim3(blocks), dim3(threads), 0, 0, images_d, filters_d, targets_d, numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, scaleTargets, scaleOutput, conv); } err = hipMemcpy(targets_h, targets_d, (size_t)(9216*1*sizeof(float)), hipMemcpyDeviceToHost); if (err != hipSuccess) EXIT_MSG("ERROR ~"); print_result(targets_h, 9216, 1,9216, 1, 1); } template <int T_numImages> __global__ void optiConvFilter(float* images, float* filters, float* targets, const int numImages, const int numFilters, //128(1), 64 const int imgSizeY, const int imgSizeX, //12, 12 const int filterSize, const int paddingStart, //5, -2 const int moduleStride, //1 const int numModulesY, const int numModulesX, //12, 12 const int imgStride, const int numImgColors, //128(1), 64 const float scaleTargets, const float scaleOutputs, //0.0, 1.0 const bool conv)//true { const int nMaxConnPerNeuron = (filterSize*filterSize) * numImgColors; const int neuronIdx = blockIdx.x;//blockDim.x*blockIdx.x + threadIdx.x;//0-63 const int tarIdx = blockDim.x*(neuronIdx) + threadIdx.x;//blockDim.y*blockIdx.y + threadIdx.y;//0-9216 const int imgPixels = imgSizeX*imgSizeY; //144 const unsigned nLoads = nMaxConnPerNeuron; float privProd; float a=0.0; __shared__ float shW[1600]; const unsigned loc = neuronIdx;//for first weight in that neuron privProd = 0.0; for (int i = 0; i < (nMaxConnPerNeuron-1)/imgPixels + 1; ++i){ if(blockDim.x*i + threadIdx.x < nMaxConnPerNeuron) shW[blockDim.x * i + threadIdx.x] = filters[ loc + numFilters*(blockDim.x * i + threadIdx.x) ]; } __syncthreads(); int center = tarIdx % imgPixels;//img center : neuronIdx w/o color info int upperLeft = center - ((filterSize)/2) - imgSizeX*((filterSize)/2); //check padding condition // * 1 * // 2 3 // * 4 * int padding1 = 0; int padding2 = 0; int padding3 = 0; int padding4 = 0; for (int i = 0; i < filterSize/2; ++i){ for(int j = 0; j < i+1 ; j ++){ padding1 += (int)(center/imgSizeX == j); padding2 += (int)(center%imgSizeX == j); padding3 += (int)(((imgSizeX - 1) - center%imgSizeX) == j); padding4 += (int)(((imgSizeX - 1) - center/imgSizeX) == j); } } //~~~~~~~~~~~~~~~iterate for 1(T_numImages) img~~~~~~~~~~~ for (int li = 0; li < nLoads; li+= 1){ //0-1599 int actLoadIdx = li; int c = actLoadIdx/(filterSize*filterSize); //color int y = (actLoadIdx%(filterSize*filterSize))/filterSize; // y idx in 5x5 filter int x = (actLoadIdx%(filterSize*filterSize))%filterSize; // x idx in 5x5 filter //w = filters[ loc + numFilters*actLoadIdx ]; a = 0.0; if(y >= padding1 && (filterSize - 1) - y >= padding4 ){ if(x >= padding2 && (filterSize - 1) - x >= padding3 ){ a = images[(c*(imgPixels) + upperLeft + y*imgSizeX + x)*numImages + 0]; } } privProd += a * shW[actLoadIdx]; } //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* * Store Phase */ targets[tarIdx * numImages] = privProd; } float * readMatrix(char * filename, int nRows, int nCols) { float tmp; FILE *fp; float *full; full = (float *) malloc (nRows*nCols*sizeof(full[0])); if((fp = fopen(filename, "r+")) == NULL) { printf("No such file: %s\n", filename); exit(1); } for (int i = 0; i < nRows; ++i) { for (int j = 0; j < nCols; ++j) { int ret = fscanf(fp, "%f ", &tmp); if(ret == 1){ full[i*nCols + j] = tmp; //printf("%.15f\n", tmp); } else if(errno != 0) { perror("scanf:"); break; } else if(ret == EOF) { //printf("finish.\n"); break; } else { printf("No match.\n"); exit(0); } } } return full;//full_dev } void print_result(float* result, int mR, int nR, int real_mR, int real_nR, int isRowMajor){ //printf("$$$$$$$$$ RESULT $$$$$$$$$$$$$$$\n"); for (int y = 0; y < mR; ++y) { for (int x = 0; x < nR; ++x) { if(x<real_nR && y<real_mR){ //if(result[nR*y + x] != -1.0) printf("%.15f ", result[nR*y + x]); } } printf("\n"); } //printf("============END==========\n"); }
77aa200897361442014d0057af286f08c02370a3.cu
//convOpti.cu /* nvcc convOpti.cu -o convOpti.exe nvcc -ccbin /home/seungbin/Desktop/llvm/llvm-install/bin/clang++ -Xcompiler ,\"-emit-llvm\",\"-std=c++11\", convOpti.cu -c -o convOpti.bc -D__STRICT_ANSI__ nvcc -ccbin /home/seungbin/Desktop/llvm/llvm-install/bin/clang++ -Xcompiler ,\"-emit-llvm\",\"-D__cplusplus=201103L\", convOpti.cu -c -o convOpti.bc -D__STRICT_ANSI__ -I./include -I./include/common -I./include/cudaconv2 -I./include/nvmatrix -I/home/seungbin/npu/tools/cudnn-6.5-linux-x64-v2 -I./include/reorder -I. -I/usr/local/cuda/include -I/home/seungbin/NVIDIA_GPU_Computing_SDK/C/common/inc -I/home/seungbin/NVIDIA_GPU_Computing_SDK/shared//inc clang++ -c -emit-llvm -I/usr/bin/local/cuda/include convOpti.cu -o convOpti.bc */ //#include <convOpti.cuh> //#include <nvmatrix.cuh> #include <string.h> #include <time.h> /* clock_t, clock, CLOCKS_PER_SEC */ #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <assert.h> #include <math.h> #include "cuda.h" #include <device_launch_parameters.h> #include <cuda_runtime_api.h> #include <vector_types.h> #include <host_defines.h> //#include <map> //#include <algorithm> #ifndef N_LIVING_NEURON #define N_LIVING_NEURON 2027 // it's just for compiler option #endif #ifndef GRID_DIM_X #define GRID_DIM_X 64 // it's just for compiler option #endif #define BLOCK_SIZE 32 #define REPEAT 1000 #define EXIT_MSG(s) \ do { \ printf ("%s\n", s); \ fflush (stdout); \ exit(1); \ } while (0) float * readMatrix(char * filename, int nRows, int nCols); void print_result(float* result, int mR, int nR, int real_mR, int real_nR, int isRowMajor); template <int T_numImages> __global__ void optiConvFilter(float* images, float* filters, float* targets, const int numImages, const int numFilters, //128(1), 64 const int imgSizeY, const int imgSizeX, //12, 12 const int filterSize, const int paddingStart, //5, -2 const int moduleStride, //1 const int numModulesY, const int numModulesX, //12, 12 const int imgStride, const int numImgColors, //128(1), 64 const float scaleTargets, const float scaleOutputs, //0.0, 1.0 const bool conv);//true int main() { cudaError_t err; char filename_img[1024] = "image.test.data";//"image.one.data"; char filename_filter[1024] = "filter.test.data";//"filter.zero.data"; char filename_targetInit[1024] = "targetInit.test.data"; float *images_h = readMatrix(filename_img, 9216, 1); float *images_d = NULL; float *filters_h = readMatrix(filename_filter, 1600, 64); float *filters_d = NULL; err = cudaMalloc((void**)&images_d, 9216*1*sizeof(float)); if (err != cudaSuccess) EXIT_MSG("ERROR ~"); err = cudaMemcpy(images_d, images_h, (size_t)(9216*1*sizeof(float)), cudaMemcpyHostToDevice); if (err != cudaSuccess) EXIT_MSG("ERROR ~"); err = cudaMalloc((void**)&filters_d , 1600*64*sizeof(float)); if (err != cudaSuccess) EXIT_MSG("ERROR ~"); err = cudaMemcpy(filters_d, filters_h, (size_t)(1600*64*sizeof(float)), cudaMemcpyHostToDevice); if (err != cudaSuccess) EXIT_MSG("ERROR ~"); float* targets_h = readMatrix(filename_targetInit, 9216, 1); float* targets_d = NULL; err = cudaMalloc((void**)&targets_d, 9216*1*sizeof(float)); if (err != cudaSuccess) EXIT_MSG("ERROR ~"); err = cudaMemcpy(targets_d, targets_h, (size_t)(9216*1*sizeof(float)), cudaMemcpyHostToDevice); if (err != cudaSuccess) EXIT_MSG("ERROR ~"); //~~~~~~~~~~~~~~~~~~~~~~~~~~ int imgSizeY = 12; int numModulesY = 12; int numModulesX = 12; int paddingStart = -2; int moduleStride = 1; int numImgColors = 64; int numGroups = 1; float scaleTargets = 0.0; float scaleOutput = 1.0; bool conv = true; int numFilterColors = numImgColors / numGroups; int numFilters = 64; int numModules = numModulesY * numModulesX; int numImages = 1; int imgPixels = 9216/numImgColors; int imgSizeX = imgPixels / imgSizeY; int filterModuleMult = conv ? 1 : numModules; assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 2 == 0); assert(numFilters % (16 * numGroups) == 0); assert(numImgColors % numGroups == 0); assert(9216 == imgPixels * numImgColors); assert(imgSizeY * imgSizeX == imgPixels); int numFiltersPerGroup = numFilters / numGroups; int imgStride = 1; // images does not need to be a contiguous matrix int filterPixels = 1600 / (filterModuleMult * numFilterColors); int filterSize = int(sqrt(filterPixels)); assert(filterSize * filterSize == filterPixels); assert(1600 == filterModuleMult * numFilterColors * filterPixels); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; bool checkImgBounds = numImages % (32*imgsPerThread) != 0; if (scaleTargets == 0) { ;//targets.resize(numFilters * numModules, numImages); } else { assert(9216 == numFilters * numModules); assert(1 == numImages); } assert(imgSizeY == 12 && imgSizeX == 12 && numModulesY == 12 && numModulesX == 12); assert(filterSize == 5 && paddingStart == -2); assert(numFilters == 64); assert(numImgColors == 64); assert(numGroups == 1);//sure assert(scaleTargets == 0.0);//sure assert(scaleOutput == 1.0);//sure assert(numFiltersPerGroup == 64);//sure assert(imgsPerThread == 1 && checkImgBounds == true); if(!(numImages == 1 && imgStride == 1)) EXIT_MSG("ERROR !! this should no mini version(i.e. --mini=1)"); //~~~~~~~~~~~~~~~~~~~~~~~~~~ dim3 blocks (64, 1, 1); dim3 threads(144, 1, 1); //cudaFuncCachePreferNone//cudaFuncCachePreferShared//cudaFuncCachePreferL1 for (int i = 0; i < REPEAT; ++i) { cudaFuncSetCacheConfig(optiConvFilter <1>, cudaFuncCachePreferShared); optiConvFilter <1><<<blocks, threads>>>(images_d, filters_d, targets_d, numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, scaleTargets, scaleOutput, conv); } err = cudaMemcpy(targets_h, targets_d, (size_t)(9216*1*sizeof(float)), cudaMemcpyDeviceToHost); if (err != cudaSuccess) EXIT_MSG("ERROR ~"); print_result(targets_h, 9216, 1,9216, 1, 1); } template <int T_numImages> __global__ void optiConvFilter(float* images, float* filters, float* targets, const int numImages, const int numFilters, //128(1), 64 const int imgSizeY, const int imgSizeX, //12, 12 const int filterSize, const int paddingStart, //5, -2 const int moduleStride, //1 const int numModulesY, const int numModulesX, //12, 12 const int imgStride, const int numImgColors, //128(1), 64 const float scaleTargets, const float scaleOutputs, //0.0, 1.0 const bool conv)//true { const int nMaxConnPerNeuron = (filterSize*filterSize) * numImgColors; const int neuronIdx = blockIdx.x;//blockDim.x*blockIdx.x + threadIdx.x;//0-63 const int tarIdx = blockDim.x*(neuronIdx) + threadIdx.x;//blockDim.y*blockIdx.y + threadIdx.y;//0-9216 const int imgPixels = imgSizeX*imgSizeY; //144 const unsigned nLoads = nMaxConnPerNeuron; float privProd; float a=0.0; __shared__ float shW[1600]; const unsigned loc = neuronIdx;//for first weight in that neuron privProd = 0.0; for (int i = 0; i < (nMaxConnPerNeuron-1)/imgPixels + 1; ++i){ if(blockDim.x*i + threadIdx.x < nMaxConnPerNeuron) shW[blockDim.x * i + threadIdx.x] = filters[ loc + numFilters*(blockDim.x * i + threadIdx.x) ]; } __syncthreads(); int center = tarIdx % imgPixels;//img center : neuronIdx w/o color info int upperLeft = center - ((filterSize)/2) - imgSizeX*((filterSize)/2); //check padding condition // * 1 * // 2 3 // * 4 * int padding1 = 0; int padding2 = 0; int padding3 = 0; int padding4 = 0; for (int i = 0; i < filterSize/2; ++i){ for(int j = 0; j < i+1 ; j ++){ padding1 += (int)(center/imgSizeX == j); padding2 += (int)(center%imgSizeX == j); padding3 += (int)(((imgSizeX - 1) - center%imgSizeX) == j); padding4 += (int)(((imgSizeX - 1) - center/imgSizeX) == j); } } //~~~~~~~~~~~~~~~iterate for 1(T_numImages) img~~~~~~~~~~~ for (int li = 0; li < nLoads; li+= 1){ //0-1599 int actLoadIdx = li; int c = actLoadIdx/(filterSize*filterSize); //color int y = (actLoadIdx%(filterSize*filterSize))/filterSize; // y idx in 5x5 filter int x = (actLoadIdx%(filterSize*filterSize))%filterSize; // x idx in 5x5 filter //w = filters[ loc + numFilters*actLoadIdx ]; a = 0.0; if(y >= padding1 && (filterSize - 1) - y >= padding4 ){ if(x >= padding2 && (filterSize - 1) - x >= padding3 ){ a = images[(c*(imgPixels) + upperLeft + y*imgSizeX + x)*numImages + 0]; } } privProd += a * shW[actLoadIdx]; } //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* * Store Phase */ targets[tarIdx * numImages] = privProd; } float * readMatrix(char * filename, int nRows, int nCols) { float tmp; FILE *fp; float *full; full = (float *) malloc (nRows*nCols*sizeof(full[0])); if((fp = fopen(filename, "r+")) == NULL) { printf("No such file: %s\n", filename); exit(1); } for (int i = 0; i < nRows; ++i) { for (int j = 0; j < nCols; ++j) { int ret = fscanf(fp, "%f ", &tmp); if(ret == 1){ full[i*nCols + j] = tmp; //printf("%.15f\n", tmp); } else if(errno != 0) { perror("scanf:"); break; } else if(ret == EOF) { //printf("finish.\n"); break; } else { printf("No match.\n"); exit(0); } } } return full;//full_dev } void print_result(float* result, int mR, int nR, int real_mR, int real_nR, int isRowMajor){ //printf("$$$$$$$$$ RESULT $$$$$$$$$$$$$$$\n"); for (int y = 0; y < mR; ++y) { for (int x = 0; x < nR; ++x) { if(x<real_nR && y<real_mR){ //if(result[nR*y + x] != -1.0) printf("%.15f ", result[nR*y + x]); } } printf("\n"); } //printf("============END==========\n"); }
d40e342b3b26274309bbeea14d715811ab911b58.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <exception> #include <fstream> #include <vector> #include <glm/vec3.hpp> #include <hiprand/hiprand.h> #include "utils/cuda_memory.h" #include "utils/logger.h" #include "utils/png.h" #include "Display.h" #include "RayTracer.h" #include "Sphere.h" using namespace std; using namespace glm; constexpr int display_width = 1920; constexpr int display_height = 1080; ostream& operator<<(ostream& output, const vec3& vector) { return output << vector.x << " " << vector.y << " " << vector.z; } CUDA::unique_ptr<float> GenerateRandomNumbers(const size_t size) { hiprandGenerator_t generator; hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(generator, 123); CUDA::unique_ptr<float> device_random_numbers(size); hiprandGenerateUniform( generator, device_random_numbers.get_device_pointer().get(), device_random_numbers.get_device_pointer().size()); return move(device_random_numbers); } CUDA::unique_ptr<Sphere> CreateWorld() { vector<Sphere> spheres = { Sphere{ 0.3, { 0, 0, -1} , Material::Scatter({1,1,1})}, // big red sphere Sphere{ 20, { 0, -20.30, -1} , Material::Scatter({0.1,1,0.1})}, // floor Sphere{ 0.2, { 0.6, 0, -1.1} , Material::Metal({1,1,0}, 0.1)}, Sphere{ 0.06, { 0.35,-0.2, -1.1} , Material::Light({1,1,1})}, Sphere{ 0.02, { 0.35,-0.25, -0.8} , Material::Light({1,0,1})}, Sphere{ 0.05, { 0.25,-0.25, -0.9} , Material::Metal({1,1,1}, 1)}, Sphere{ 0.04, { 0.15,-0.25, -0.8} , Material::Light({0,1,1})}, Sphere{ 0.1, { 0.5, -0.3, -0.9} , Material::Metal({1,1,1}, 0.9)}, Sphere{ 0.04, { 0.3, -0.27, -0.7} , Material::Dielectric(1.5f)}, }; CUDA::unique_ptr<Sphere> device_spheres(spheres.size()); device_spheres.copy_from(spheres.data()); return move(device_spheres); } int main() { try { Display display{display_width, display_height}; const auto world = CreateWorld(); const auto random_numbers = GenerateRandomNumbers(display_width * display_height); dim3 threads(16, 16); dim3 blocks(display_width / threads.x + 1, display_height / threads.y + 1); Logger::info() << "Rendering" << endl; // RENDER hipLaunchKernelGGL(( RayTracer::RenderScreen), dim3(blocks), dim3(threads), 0, 0, world.get_device_pointer(), random_numbers.get_device_pointer(), display.GetDisplay(), display_width, display_height ); const auto image = display.GetImage(); Logger::info() << "Exporting image to PNG" << endl; PNG::WriteImage("output.png", display_width, display_height, image); Logger::info() << "Done!" << endl; } catch (const exception& e) { Logger::fatal() << e.what() << endl; } return 0; }
d40e342b3b26274309bbeea14d715811ab911b58.cu
#include <iostream> #include <exception> #include <fstream> #include <vector> #include <glm/vec3.hpp> #include <curand.h> #include "utils/cuda_memory.h" #include "utils/logger.h" #include "utils/png.h" #include "Display.h" #include "RayTracer.h" #include "Sphere.h" using namespace std; using namespace glm; constexpr int display_width = 1920; constexpr int display_height = 1080; ostream& operator<<(ostream& output, const vec3& vector) { return output << vector.x << " " << vector.y << " " << vector.z; } CUDA::unique_ptr<float> GenerateRandomNumbers(const size_t size) { curandGenerator_t generator; curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(generator, 123); CUDA::unique_ptr<float> device_random_numbers(size); curandGenerateUniform( generator, device_random_numbers.get_device_pointer().get(), device_random_numbers.get_device_pointer().size()); return move(device_random_numbers); } CUDA::unique_ptr<Sphere> CreateWorld() { vector<Sphere> spheres = { Sphere{ 0.3, { 0, 0, -1} , Material::Scatter({1,1,1})}, // big red sphere Sphere{ 20, { 0, -20.30, -1} , Material::Scatter({0.1,1,0.1})}, // floor Sphere{ 0.2, { 0.6, 0, -1.1} , Material::Metal({1,1,0}, 0.1)}, Sphere{ 0.06, { 0.35,-0.2, -1.1} , Material::Light({1,1,1})}, Sphere{ 0.02, { 0.35,-0.25, -0.8} , Material::Light({1,0,1})}, Sphere{ 0.05, { 0.25,-0.25, -0.9} , Material::Metal({1,1,1}, 1)}, Sphere{ 0.04, { 0.15,-0.25, -0.8} , Material::Light({0,1,1})}, Sphere{ 0.1, { 0.5, -0.3, -0.9} , Material::Metal({1,1,1}, 0.9)}, Sphere{ 0.04, { 0.3, -0.27, -0.7} , Material::Dielectric(1.5f)}, }; CUDA::unique_ptr<Sphere> device_spheres(spheres.size()); device_spheres.copy_from(spheres.data()); return move(device_spheres); } int main() { try { Display display{display_width, display_height}; const auto world = CreateWorld(); const auto random_numbers = GenerateRandomNumbers(display_width * display_height); dim3 threads(16, 16); dim3 blocks(display_width / threads.x + 1, display_height / threads.y + 1); Logger::info() << "Rendering" << endl; // RENDER RayTracer::RenderScreen<<<blocks, threads>>>( world.get_device_pointer(), random_numbers.get_device_pointer(), display.GetDisplay(), display_width, display_height ); const auto image = display.GetImage(); Logger::info() << "Exporting image to PNG" << endl; PNG::WriteImage("output.png", display_width, display_height, image); Logger::info() << "Done!" << endl; } catch (const exception& e) { Logger::fatal() << e.what() << endl; } return 0; }
47989a8aa9414fb76507d7786f61e9f52732e1a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "helper_cuda.h" // // kernel routine // __global__ void my_first_kernel(float *x) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] = (float) threadIdx.x; } // // main code // int main(int argc, const char **argv) { float *h_x, *d_x; int nblocks, nthreads, nsize, n; // initialise card findCudaDevice(argc, argv); // set number of blocks, and threads per block nblocks = 2; nthreads = 8; nsize = nblocks*nthreads ; // allocate memory for array h_x = (float *)malloc(nsize*sizeof(float)); checkCudaErrors(hipMalloc((void **)&d_x, nsize*sizeof(float))); // execute kernel hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x); getLastCudaError("my_first_kernel execution failed\n"); // copy back results and print them out checkCudaErrors( hipMemcpy(h_x,d_x,nsize*sizeof(float), hipMemcpyDeviceToHost) ); for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]); // free memory checkCudaErrors(hipFree(d_x)); free(h_x); // CUDA exit -- needed to flush printf write buffer hipDeviceReset(); return 0; }
47989a8aa9414fb76507d7786f61e9f52732e1a2.cu
// // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "helper_cuda.h" // // kernel routine // __global__ void my_first_kernel(float *x) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] = (float) threadIdx.x; } // // main code // int main(int argc, const char **argv) { float *h_x, *d_x; int nblocks, nthreads, nsize, n; // initialise card findCudaDevice(argc, argv); // set number of blocks, and threads per block nblocks = 2; nthreads = 8; nsize = nblocks*nthreads ; // allocate memory for array h_x = (float *)malloc(nsize*sizeof(float)); checkCudaErrors(cudaMalloc((void **)&d_x, nsize*sizeof(float))); // execute kernel my_first_kernel<<<nblocks,nthreads>>>(d_x); getLastCudaError("my_first_kernel execution failed\n"); // copy back results and print them out checkCudaErrors( cudaMemcpy(h_x,d_x,nsize*sizeof(float), cudaMemcpyDeviceToHost) ); for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]); // free memory checkCudaErrors(cudaFree(d_x)); free(h_x); // CUDA exit -- needed to flush printf write buffer cudaDeviceReset(); return 0; }
a77a9fec0e756f31bfa61aaf98e55647772b4b80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "DropOut.h" namespace cytonLib { Variable* DropOut::init(string tag_, Variable* x, Precision dropout) { tag=tag_; this->x=x; this->dropout=dropout; this->active=dropout>1e-6; this->localTestMode=testMode; if(active && ! localTestMode) { y.resize(*x); y.enlarge=false; } else { y.set(*x); } return &y; } __global__ void dropOut_forward_kernel(Precision* src, Precision* mask, Precision* des, Precision dropOut, int len) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<len) { if(mask[i]<dropOut) { des[i]=0; } else { des[i]=src[i]/(1-dropOut); } } } __global__ void dropOut_backward_kernel(Precision* src, Precision* mask, Precision* des, Precision dropOut, int len) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<len) { if(mask[i]<dropOut) { des[i]=0; } else { des[i]=src[i]/(1-dropOut); } } } void DropOut::forward() { if(active && !localTestMode) { if(!testMode) { mask.resize(x->ni, x->nj); mask.initRandomCurand(); y.resize(*x); int len=y.length(); assert(mask.length()==len); hipLaunchKernelGGL(( dropOut_forward_kernel), dim3(ceil(len,blockSize)),dim3(blockSize), 0, 0, x->data, mask.data, y.data, dropout, len); } else { y.copyFrom(*x); } } else { y.set(*x); } } void DropOut::backward() { if(active) { assert(!testMode); int len=x->length(); assert(mask.length()==len); hipLaunchKernelGGL(( dropOut_backward_kernel), dim3(ceil(len,blockSize)),dim3(blockSize), 0, 0, y.grad.data, mask.data, x->grad.data, dropout, len); } } } /* namespace cytonLib */
a77a9fec0e756f31bfa61aaf98e55647772b4b80.cu
/* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "DropOut.h" namespace cytonLib { Variable* DropOut::init(string tag_, Variable* x, Precision dropout) { tag=tag_; this->x=x; this->dropout=dropout; this->active=dropout>1e-6; this->localTestMode=testMode; if(active && ! localTestMode) { y.resize(*x); y.enlarge=false; } else { y.set(*x); } return &y; } __global__ void dropOut_forward_kernel(Precision* src, Precision* mask, Precision* des, Precision dropOut, int len) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<len) { if(mask[i]<dropOut) { des[i]=0; } else { des[i]=src[i]/(1-dropOut); } } } __global__ void dropOut_backward_kernel(Precision* src, Precision* mask, Precision* des, Precision dropOut, int len) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<len) { if(mask[i]<dropOut) { des[i]=0; } else { des[i]=src[i]/(1-dropOut); } } } void DropOut::forward() { if(active && !localTestMode) { if(!testMode) { mask.resize(x->ni, x->nj); mask.initRandomCurand(); y.resize(*x); int len=y.length(); assert(mask.length()==len); dropOut_forward_kernel<<<ceil(len,blockSize),blockSize>>>(x->data, mask.data, y.data, dropout, len); } else { y.copyFrom(*x); } } else { y.set(*x); } } void DropOut::backward() { if(active) { assert(!testMode); int len=x->length(); assert(mask.length()==len); dropOut_backward_kernel<<<ceil(len,blockSize),blockSize>>>(y.grad.data, mask.data, x->grad.data, dropout, len); } } } /* namespace cytonLib */
acc69eb5f21d3fdac7f328af01ce5da223d5f300.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flexflow/ops/kernels/softmax_kernels.h" #include "flexflow/utils/cuda_helper.h" #include "flexflow/utils/hash_utils.h" namespace FlexFlow { // declare Legion names using Legion::Domain; SoftmaxMeta::SoftmaxMeta(FFHandler handler, Softmax const *softmax, Domain const &input_domain) : OpMeta(handler) { checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnSetTensorDescriptorFromDomain4SoftMax( inputTensor, input_domain, softmax->data_type)); dim = softmax->dim; profiling = softmax->profiling; std::strcpy(op_name, softmax->name); } namespace Kernels { namespace Softmax { template <typename DT> void forward_kernel_wrapper(SoftmaxMeta const *m, DT const *input_ptr, DT *output_ptr) { hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } Internal::forward_kernel(m, input_ptr, output_ptr, stream); if (m->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); // print_tensor<float>(acc_input.ptr, acc_input.rect.volume(), // "[Softmax:forward:input]"); print_tensor<float>(acc_output.ptr, // acc_output.rect.volume(), "[Softmax:forward:output]"); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); log_measure.debug( "%s [Softmax] forward time = %.2fms\n", m->op_name, elapsed); } } template <typename DT> void backward_kernel_wrapper(SoftmaxMeta const *m, DT *input_grad_ptr, DT const *output_grad_ptr, size_t num_elements) { hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } Internal::backward_kernel( input_grad_ptr, output_grad_ptr, num_elements, stream); if (m->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); // print_tensor<float>(acc_output_grad.ptr, acc_output_grad.rect.volume(), // "[Softmax:backward:output_grad]"); // print_tensor<float>(acc_input_grad.ptr, acc_input_grad.rect.volume(), // "[Softmax:backward:input_grad]"); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); log_measure.debug("Softmax backward time = %.2fms\n", elapsed); } } template void forward_kernel_wrapper<float>(SoftmaxMeta const *m, float const *input_ptr, float *output_ptr); template void forward_kernel_wrapper<half>(SoftmaxMeta const *m, half const *input_ptr, half *output_ptr); template void backward_kernel_wrapper<float>(SoftmaxMeta const *m, float *input_grad_ptr, float const *output_grad_ptr, size_t num_elements); template void backward_kernel_wrapper<half>(SoftmaxMeta const *m, half *input_grad_ptr, half const *output_grad_ptr, size_t num_elements); namespace Internal { template <typename DT> void forward_kernel(SoftmaxMeta const *m, DT const *input_ptr, DT *output_ptr, hipStream_t stream) { checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); float alpha = 1.0f, beta = 0.0f; checkCUDNN(cudnnSoftmaxForward(m->handle.dnn, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, m->inputTensor, input_ptr, &beta, m->inputTensor, output_ptr)); } template <typename DT> void backward_kernel(DT *input_grad_ptr, DT const *output_grad_ptr, size_t num_elements, hipStream_t stream) { checkCUDA(hipMemcpyAsync(input_grad_ptr, output_grad_ptr, num_elements * sizeof(DT), hipMemcpyDeviceToDevice, stream)); } } // namespace Internal } // namespace Softmax } // namespace Kernels } // namespace FlexFlow
acc69eb5f21d3fdac7f328af01ce5da223d5f300.cu
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flexflow/ops/kernels/softmax_kernels.h" #include "flexflow/utils/cuda_helper.h" #include "flexflow/utils/hash_utils.h" namespace FlexFlow { // declare Legion names using Legion::Domain; SoftmaxMeta::SoftmaxMeta(FFHandler handler, Softmax const *softmax, Domain const &input_domain) : OpMeta(handler) { checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnSetTensorDescriptorFromDomain4SoftMax( inputTensor, input_domain, softmax->data_type)); dim = softmax->dim; profiling = softmax->profiling; std::strcpy(op_name, softmax->name); } namespace Kernels { namespace Softmax { template <typename DT> void forward_kernel_wrapper(SoftmaxMeta const *m, DT const *input_ptr, DT *output_ptr) { cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } Internal::forward_kernel(m, input_ptr, output_ptr, stream); if (m->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); // print_tensor<float>(acc_input.ptr, acc_input.rect.volume(), // "[Softmax:forward:input]"); print_tensor<float>(acc_output.ptr, // acc_output.rect.volume(), "[Softmax:forward:output]"); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); log_measure.debug( "%s [Softmax] forward time = %.2fms\n", m->op_name, elapsed); } } template <typename DT> void backward_kernel_wrapper(SoftmaxMeta const *m, DT *input_grad_ptr, DT const *output_grad_ptr, size_t num_elements) { cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } Internal::backward_kernel( input_grad_ptr, output_grad_ptr, num_elements, stream); if (m->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); // print_tensor<float>(acc_output_grad.ptr, acc_output_grad.rect.volume(), // "[Softmax:backward:output_grad]"); // print_tensor<float>(acc_input_grad.ptr, acc_input_grad.rect.volume(), // "[Softmax:backward:input_grad]"); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); log_measure.debug("Softmax backward time = %.2fms\n", elapsed); } } template void forward_kernel_wrapper<float>(SoftmaxMeta const *m, float const *input_ptr, float *output_ptr); template void forward_kernel_wrapper<half>(SoftmaxMeta const *m, half const *input_ptr, half *output_ptr); template void backward_kernel_wrapper<float>(SoftmaxMeta const *m, float *input_grad_ptr, float const *output_grad_ptr, size_t num_elements); template void backward_kernel_wrapper<half>(SoftmaxMeta const *m, half *input_grad_ptr, half const *output_grad_ptr, size_t num_elements); namespace Internal { template <typename DT> void forward_kernel(SoftmaxMeta const *m, DT const *input_ptr, DT *output_ptr, cudaStream_t stream) { checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); float alpha = 1.0f, beta = 0.0f; checkCUDNN(cudnnSoftmaxForward(m->handle.dnn, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, m->inputTensor, input_ptr, &beta, m->inputTensor, output_ptr)); } template <typename DT> void backward_kernel(DT *input_grad_ptr, DT const *output_grad_ptr, size_t num_elements, cudaStream_t stream) { checkCUDA(cudaMemcpyAsync(input_grad_ptr, output_grad_ptr, num_elements * sizeof(DT), cudaMemcpyDeviceToDevice, stream)); } } // namespace Internal } // namespace Softmax } // namespace Kernels } // namespace FlexFlow
b885e18d538d01155120654131fdb0288c2ca250.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> using namespace std; __global__ void add(int *a, const int *b){ int i = blockIdx.x; a[i] += b[i]; } int main(){ const int N = 10; // number of elements int *a, *b, *temp, i; // malloc HOST memory for temp temp = new int [N]; // malloc DEVICE memory for a, b hipMalloc(&a, N*sizeof(int)); hipMalloc(&b, N*sizeof(int)); // set a's values: a[i] = i for(i=0;i<N;i++) temp[i] = i; hipMemcpy(a, temp, N*sizeof(int), hipMemcpyHostToDevice); // set b's values: b[i] = 2*i for(i=0;i<N;i++) temp[i] = 2*i; hipMemcpy(b, temp, N*sizeof(int), hipMemcpyHostToDevice); // calculate a[i] += b[i] in GPU hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, a, b); // show a's values hipMemcpy(temp, a, N*sizeof(int), hipMemcpyDeviceToHost); for(i=0;i<N;i++){ cout << temp[i] << endl; } // free HOST & DEVICE memory delete [] temp; hipFree(a); hipFree(b); }
b885e18d538d01155120654131fdb0288c2ca250.cu
#include <iostream> #include <cuda.h> using namespace std; __global__ void add(int *a, const int *b){ int i = blockIdx.x; a[i] += b[i]; } int main(){ const int N = 10; // number of elements int *a, *b, *temp, i; // malloc HOST memory for temp temp = new int [N]; // malloc DEVICE memory for a, b cudaMalloc(&a, N*sizeof(int)); cudaMalloc(&b, N*sizeof(int)); // set a's values: a[i] = i for(i=0;i<N;i++) temp[i] = i; cudaMemcpy(a, temp, N*sizeof(int), cudaMemcpyHostToDevice); // set b's values: b[i] = 2*i for(i=0;i<N;i++) temp[i] = 2*i; cudaMemcpy(b, temp, N*sizeof(int), cudaMemcpyHostToDevice); // calculate a[i] += b[i] in GPU add<<<N,1>>>(a, b); // show a's values cudaMemcpy(temp, a, N*sizeof(int), cudaMemcpyDeviceToHost); for(i=0;i<N;i++){ cout << temp[i] << endl; } // free HOST & DEVICE memory delete [] temp; cudaFree(a); cudaFree(b); }
a855338b62acfb17aa7fca4e289256b99be3641a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "file1.h" #include "file2.h" result_type __device__ file1_func(int x); result_type_dynamic __device__ file2_func(int x); static __global__ void file3_kernel(result_type* r, int x) { *r = file1_func(x); result_type_dynamic rd = file2_func(x); } int file3_launch_kernel(int x) { result_type* r; hipError_t err = hipMallocManaged(&r, sizeof(result_type)); if(err != hipSuccess) { std::cerr << "file3_launch_kernel: hipMallocManaged failed: " << hipGetErrorString(err) << std::endl; return x; } hipLaunchKernelGGL(( file3_kernel) , dim3(1),dim3(1), 0, 0, r,x); err = hipGetLastError(); if(err != hipSuccess) { std::cerr << "file3_kernel [SYNC] failed: " << hipGetErrorString(err) << std::endl; return x; } err = hipDeviceSynchronize(); if(err != hipSuccess) { std::cerr << "file3_kernel [ASYNC] failed: " << hipGetErrorString(hipGetLastError()) << std::endl; return x; } int result = r->sum; err = hipFree(r); if(err != hipSuccess) { std::cerr << "file3_launch_kernel: hipFree failed: " << hipGetErrorString(err) << std::endl; return x; } return result; }
a855338b62acfb17aa7fca4e289256b99be3641a.cu
#include <iostream> #include "file1.h" #include "file2.h" result_type __device__ file1_func(int x); result_type_dynamic __device__ file2_func(int x); static __global__ void file3_kernel(result_type* r, int x) { *r = file1_func(x); result_type_dynamic rd = file2_func(x); } int file3_launch_kernel(int x) { result_type* r; cudaError_t err = cudaMallocManaged(&r, sizeof(result_type)); if(err != cudaSuccess) { std::cerr << "file3_launch_kernel: cudaMallocManaged failed: " << cudaGetErrorString(err) << std::endl; return x; } file3_kernel <<<1,1>>> (r,x); err = cudaGetLastError(); if(err != cudaSuccess) { std::cerr << "file3_kernel [SYNC] failed: " << cudaGetErrorString(err) << std::endl; return x; } err = cudaDeviceSynchronize(); if(err != cudaSuccess) { std::cerr << "file3_kernel [ASYNC] failed: " << cudaGetErrorString(cudaGetLastError()) << std::endl; return x; } int result = r->sum; err = cudaFree(r); if(err != cudaSuccess) { std::cerr << "file3_launch_kernel: cudaFree failed: " << cudaGetErrorString(err) << std::endl; return x; } return result; }
3d9fb2adbf4235ec6a82a12357295aebe75aab77.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE #define FILENAME(line) \ FILENAME_FOR_EXCEPTIONS_CUDA( \ "src/cuda-kernels/awkward_reduce_min.cu", line) #include "standard_parallel_algorithms.h" #include "awkward/kernels.h" template <typename OUT, typename IN> __global__ void awkward_reduce_min_kernel(OUT* toptr, const IN* fromptr, const int64_t* parents, int64_t lenparents) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < lenparents) { IN x = fromptr[thread_id]; toptr[parents[thread_id]] = (x < toptr[parents[thread_id]] ? x : toptr[parents[thread_id]]); } } template <typename OUT> __global__ void awkward_reduce_min_initialize_toptr(OUT* toptr, OUT identity, int64_t outlength) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < outlength) { toptr[thread_id] = identity; } } template <typename OUT, typename IN> ERROR awkward_reduce_min(OUT* toptr, const IN* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, OUT identity) { dim3 blocks_per_grid = blocks(outlength); dim3 threads_per_block = threads(outlength); hipLaunchKernelGGL(( awkward_reduce_min_initialize_toptr), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, toptr, identity, outlength); blocks_per_grid = blocks(lenparents); threads_per_block = threads(lenparents); hipLaunchKernelGGL(( awkward_reduce_min_kernel<OUT, IN>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, toptr, fromptr, parents, lenparents); return success(); } ERROR awkward_reduce_min_int8_int8_64(int8_t* toptr, const int8_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, int8_t identity) { return awkward_reduce_min<int8_t, int8_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_uint8_uint8_64(uint8_t* toptr, const uint8_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, uint8_t identity) { return awkward_reduce_min<uint8_t, uint8_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_int16_int16_64(int16_t* toptr, const int16_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, int16_t identity) { return awkward_reduce_min<int16_t, int16_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_uint16_uint16_64(uint16_t* toptr, const uint16_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, uint16_t identity) { return awkward_reduce_min<uint16_t, uint16_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_int32_int32_64(int32_t* toptr, const int32_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, int32_t identity) { return awkward_reduce_min<int32_t, int32_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_uint32_uint32_64(uint32_t* toptr, const uint32_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, uint32_t identity) { return awkward_reduce_min<uint32_t, uint32_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_int64_int64_64(int64_t* toptr, const int64_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, int64_t identity) { return awkward_reduce_min<int64_t, int64_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_uint64_uint64_64(uint64_t* toptr, const uint64_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, uint64_t identity) { return awkward_reduce_min<uint64_t, uint64_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_float32_float32_64(float* toptr, const float* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, float identity) { return awkward_reduce_min<float, float>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_float64_float64_64(double* toptr, const double* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, double identity) { return awkward_reduce_min<double, double>( toptr, fromptr, parents, lenparents, outlength, identity); }
3d9fb2adbf4235ec6a82a12357295aebe75aab77.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE #define FILENAME(line) \ FILENAME_FOR_EXCEPTIONS_CUDA( \ "src/cuda-kernels/awkward_reduce_min.cu", line) #include "standard_parallel_algorithms.h" #include "awkward/kernels.h" template <typename OUT, typename IN> __global__ void awkward_reduce_min_kernel(OUT* toptr, const IN* fromptr, const int64_t* parents, int64_t lenparents) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < lenparents) { IN x = fromptr[thread_id]; toptr[parents[thread_id]] = (x < toptr[parents[thread_id]] ? x : toptr[parents[thread_id]]); } } template <typename OUT> __global__ void awkward_reduce_min_initialize_toptr(OUT* toptr, OUT identity, int64_t outlength) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < outlength) { toptr[thread_id] = identity; } } template <typename OUT, typename IN> ERROR awkward_reduce_min(OUT* toptr, const IN* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, OUT identity) { dim3 blocks_per_grid = blocks(outlength); dim3 threads_per_block = threads(outlength); awkward_reduce_min_initialize_toptr<<<blocks_per_grid, threads_per_block>>>( toptr, identity, outlength); blocks_per_grid = blocks(lenparents); threads_per_block = threads(lenparents); awkward_reduce_min_kernel<OUT, IN><<<blocks_per_grid, threads_per_block>>>( toptr, fromptr, parents, lenparents); return success(); } ERROR awkward_reduce_min_int8_int8_64(int8_t* toptr, const int8_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, int8_t identity) { return awkward_reduce_min<int8_t, int8_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_uint8_uint8_64(uint8_t* toptr, const uint8_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, uint8_t identity) { return awkward_reduce_min<uint8_t, uint8_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_int16_int16_64(int16_t* toptr, const int16_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, int16_t identity) { return awkward_reduce_min<int16_t, int16_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_uint16_uint16_64(uint16_t* toptr, const uint16_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, uint16_t identity) { return awkward_reduce_min<uint16_t, uint16_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_int32_int32_64(int32_t* toptr, const int32_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, int32_t identity) { return awkward_reduce_min<int32_t, int32_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_uint32_uint32_64(uint32_t* toptr, const uint32_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, uint32_t identity) { return awkward_reduce_min<uint32_t, uint32_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_int64_int64_64(int64_t* toptr, const int64_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, int64_t identity) { return awkward_reduce_min<int64_t, int64_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_uint64_uint64_64(uint64_t* toptr, const uint64_t* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, uint64_t identity) { return awkward_reduce_min<uint64_t, uint64_t>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_float32_float32_64(float* toptr, const float* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, float identity) { return awkward_reduce_min<float, float>( toptr, fromptr, parents, lenparents, outlength, identity); } ERROR awkward_reduce_min_float64_float64_64(double* toptr, const double* fromptr, const int64_t* parents, int64_t lenparents, int64_t outlength, double identity) { return awkward_reduce_min<double, double>( toptr, fromptr, parents, lenparents, outlength, identity); }
f1fc3ccc1f732a516bad439c3f1c8bd50aa6ca2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* This script is a mockup of the fuctionality to be parallelized in the Monte Carlo Simulation. It calculates "energy" among pairs of "atoms" in a system, and compares serial and parallel performance. The command line arguments are as follows: first argument (optional) - integer representing number of atoms - defaults to 100 - input -1 to run benchmarking suite for 10000 <= N <= 40000 and specified thread block size second argument (optional) - integer <= 1024 representing thread block size - input -1 to run benchmarking suite for 64 <= BS <= 1024 and specified N value For example, -1 512 will run all N with block size = 512 -1 or -1 -1 will run all N for all block sizes 20000 -1 will run N = 20000 for all block sizes Each simulation adds a line into RunLog.log with data about the run. */ //Given two indices in an array (representing atoms), //calculate their product (potential energy), //and store in energies array. //Parallel //Given two indices in an array (representing atoms), //calculate their product (potential energy), //and store in energies array. //Serial __global__ void calcEnergyParallel(int *atoms, int numAtoms, int *energies, int numEnergies) { int atom1 = blockIdx.x, atom2 = blockIdx.y * blockDim.x + threadIdx.x, energyIdx; if (atom2 < numAtoms && atom2 > atom1) { energyIdx = gridDim.x * atom1 + atom2 - (blockIdx.x + 1) * (blockIdx.x + 2) / 2; energies[energyIdx] = atoms[atom1] * atoms[atom2]; } }
f1fc3ccc1f732a516bad439c3f1c8bd50aa6ca2b.cu
#include "includes.h" /* This script is a mockup of the fuctionality to be parallelized in the Monte Carlo Simulation. It calculates "energy" among pairs of "atoms" in a system, and compares serial and parallel performance. The command line arguments are as follows: first argument (optional) - integer representing number of atoms - defaults to 100 - input -1 to run benchmarking suite for 10000 <= N <= 40000 and specified thread block size second argument (optional) - integer <= 1024 representing thread block size - input -1 to run benchmarking suite for 64 <= BS <= 1024 and specified N value For example, -1 512 will run all N with block size = 512 -1 or -1 -1 will run all N for all block sizes 20000 -1 will run N = 20000 for all block sizes Each simulation adds a line into RunLog.log with data about the run. */ //Given two indices in an array (representing atoms), //calculate their product (potential energy), //and store in energies array. //Parallel //Given two indices in an array (representing atoms), //calculate their product (potential energy), //and store in energies array. //Serial __global__ void calcEnergyParallel(int *atoms, int numAtoms, int *energies, int numEnergies) { int atom1 = blockIdx.x, atom2 = blockIdx.y * blockDim.x + threadIdx.x, energyIdx; if (atom2 < numAtoms && atom2 > atom1) { energyIdx = gridDim.x * atom1 + atom2 - (blockIdx.x + 1) * (blockIdx.x + 2) / 2; energies[energyIdx] = atoms[atom1] * atoms[atom2]; } }
b80cb17df9abfa186cc1838651ede87e91be873d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <cmath> #include <stdio.h> #include <iostream> #include <chrono> hipError_t addWithCuda(int *inter,double *f, int size); __global__ void addKernel(int *inter, double *fact) { __shared__ double fact_gpu[4*3]; int i = threadIdx.x; int num = int(i/4); int i1 = i%4; fact_gpu[i]=1; for(int j=inter[num*5+i1]; j<inter[num*5+i1+1]; j++){ fact_gpu[i]*=j; } __syncthreads(); fact[i]=fact_gpu[i]; } void calculate_gpu(int n, int k, int arraySize) { int intervals[5*3]; //5 n, 5 k, 5 n-k double facts[(5-1)*3]; intervals[0] = 2; // intervals[2] = int((2 + n) / 2); // intervals[1] = int((2 + intervals[2]) / 2); intervals[3] = int((intervals[2] + n) / 2); intervals[4] = n+1;// intervals[0+arraySize] = 2; // intervals[2+arraySize] = int((2 + k) / 2); // intervals[1+arraySize] = int((2 + intervals[2+arraySize]) / 2); intervals[3+arraySize] = int((intervals[2+arraySize] + k) / 2); intervals[4+arraySize] = k+1;// intervals[0+2*arraySize] = 2; // intervals[2+2*arraySize] = int((2 + n-k) / 2); // intervals[1+2*arraySize] = int((2 + intervals[2+2*arraySize]) / 2); intervals[3+2*arraySize] = int((intervals[2+2*arraySize] + n-k) / 2); intervals[4+2*arraySize] = n-k+1;// auto begin = std::chrono::steady_clock::now(); hipError_t cudaStatus = addWithCuda(intervals, facts, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); } auto end = std::chrono::steady_clock::now(); auto elapsed_ms = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin); printf("Time= %i nanoseconds \n", elapsed_ms.count()); double nf = facts[0]*facts[1]*facts[2]*facts[3]; double kf = facts[0+4]*facts[1+4]*facts[2+4]*facts[3+4]; double nkf= facts[0+8]*facts[1+8]*facts[2+8]*facts[3+8]; double ans = nf/(kf*nkf); printf("With GPU C (n=%i, k=%i) = %g \n", n, k, ans); } double fact_cpu(int n) { double f=1; for (int i = 2; i <= n; i++) { f *= (double)i; } return f; } void calculate_cpu(int n, int k) { auto begin = std::chrono::steady_clock::now(); double ans=fact_cpu(n)/(fact_cpu(k)*fact_cpu(n-k)); auto end = std::chrono::steady_clock::now(); auto elapsed_ms = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin); printf("Time= %i nanoseconds \n", elapsed_ms.count()); printf("With CPU C (n=%i, k=%i) = %g \n", n, k, ans); } int main() { hipError_t cudaStatus; const int arraySize=5; int n = 12; int k = 4; for (int j = 0; j < 7; j++) { calculate_cpu(n,k); printf("\n"); calculate_gpu(n, k, arraySize); printf("\n"); n*=2; k*=2; } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *inter,double *f, int size) { int *dev_inter = 0; double *dev_fact = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_inter, size* 3 * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_fact, (size-1)*3*sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_inter, inter, size*3*sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_fact, f, (size-1)* 3 * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel) , dim3(1), dim3((size-1)*3) , 0, 0, dev_inter, dev_fact); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(f, dev_fact, (size-1)* 3 * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_inter); hipFree(dev_fact); return cudaStatus; }
b80cb17df9abfa186cc1838651ede87e91be873d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cmath> #include <stdio.h> #include <iostream> #include <chrono> cudaError_t addWithCuda(int *inter,double *f, int size); __global__ void addKernel(int *inter, double *fact) { __shared__ double fact_gpu[4*3]; int i = threadIdx.x; int num = int(i/4); int i1 = i%4; fact_gpu[i]=1; for(int j=inter[num*5+i1]; j<inter[num*5+i1+1]; j++){ fact_gpu[i]*=j; } __syncthreads(); fact[i]=fact_gpu[i]; } void calculate_gpu(int n, int k, int arraySize) { int intervals[5*3]; //5 интервала для n, 5 для k, 5 для n-k double facts[(5-1)*3]; intervals[0] = 2; //начало intervals[2] = int((2 + n) / 2); //середина intervals[1] = int((2 + intervals[2]) / 2); intervals[3] = int((intervals[2] + n) / 2); intervals[4] = n+1;//конец intervals[0+arraySize] = 2; //начало intervals[2+arraySize] = int((2 + k) / 2); //середина intervals[1+arraySize] = int((2 + intervals[2+arraySize]) / 2); intervals[3+arraySize] = int((intervals[2+arraySize] + k) / 2); intervals[4+arraySize] = k+1;//конец intervals[0+2*arraySize] = 2; //начало intervals[2+2*arraySize] = int((2 + n-k) / 2); //середина intervals[1+2*arraySize] = int((2 + intervals[2+2*arraySize]) / 2); intervals[3+2*arraySize] = int((intervals[2+2*arraySize] + n-k) / 2); intervals[4+2*arraySize] = n-k+1;//конец auto begin = std::chrono::steady_clock::now(); cudaError_t cudaStatus = addWithCuda(intervals, facts, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); } auto end = std::chrono::steady_clock::now(); auto elapsed_ms = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin); printf("Time= %i nanoseconds \n", elapsed_ms.count()); double nf = facts[0]*facts[1]*facts[2]*facts[3]; double kf = facts[0+4]*facts[1+4]*facts[2+4]*facts[3+4]; double nkf= facts[0+8]*facts[1+8]*facts[2+8]*facts[3+8]; double ans = nf/(kf*nkf); printf("With GPU C (n=%i, k=%i) = %g \n", n, k, ans); } double fact_cpu(int n) { double f=1; for (int i = 2; i <= n; i++) { f *= (double)i; } return f; } void calculate_cpu(int n, int k) { auto begin = std::chrono::steady_clock::now(); double ans=fact_cpu(n)/(fact_cpu(k)*fact_cpu(n-k)); auto end = std::chrono::steady_clock::now(); auto elapsed_ms = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin); printf("Time= %i nanoseconds \n", elapsed_ms.count()); printf("With CPU C (n=%i, k=%i) = %g \n", n, k, ans); } int main() { cudaError_t cudaStatus; const int arraySize=5; int n = 12; int k = 4; for (int j = 0; j < 7; j++) { calculate_cpu(n,k); printf("\n"); calculate_gpu(n, k, arraySize); printf("\n"); n*=2; k*=2; } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *inter,double *f, int size) { int *dev_inter = 0; double *dev_fact = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_inter, size* 3 * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_fact, (size-1)*3*sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_inter, inter, size*3*sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_fact, f, (size-1)* 3 * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel <<<1, (size-1)*3 >>> (dev_inter, dev_fact); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(f, dev_fact, (size-1)* 3 * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_inter); cudaFree(dev_fact); return cudaStatus; }
31eb24d7223143ea347765a91e79013097ce007e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define _NTHREAD 512 #define _NBLOCK 65535 #include<cuda.h> __global__ void _AFFINE_KERNEL(int* ,int ,int ,int* ,int ,int ,int ,int ,int ,int ,int ,int ,int ,int ); #include<stdio.h> #include<stdlib.h> int main() { int x[20][20]; int y[20][20]; int a[20],i,j,k; int n=20; for(i=0;i<20;i++) for(j=0;j<20;j++) { x[i][j]=i+j; y[i][j]=2*i+j; a[i]=2*i; } int _SZ_y_2 = 20; int _SZ_y_1 = 20; int _SZ_x_2 = 20; int _SZ_x_1 = 20; int *_DEV_y; hipMalloc((void**) &_DEV_y, sizeof(int)*_SZ_y_2*_SZ_y_1); hipMemcpy(_DEV_y, y, sizeof(int)*_SZ_y_2*_SZ_y_1, hipMemcpyHostToDevice); int *_DEV_x; hipMalloc((void**) &_DEV_x, sizeof(int)*_SZ_x_2*_SZ_x_1); hipMemcpy(_DEV_x, x, sizeof(int)*_SZ_x_2*_SZ_x_1, hipMemcpyHostToDevice); float _NUM_THREADS = 400,_NUM_BLOCKS=1; int _NUM_TILE=1; dim3 _THREADS(512); dim3 _BLOCKS(1); if(_NUM_THREADS < _NTHREAD) { _THREADS.x=20; _THREADS.y=20; } else { _NUM_BLOCKS=_NUM_THREADS/256; _BLOCKS.x=_BLOCKS.y=ceil(sqrt(_NUM_BLOCKS)); _THREADS.x=_THREADS.y=ceil(sqrt(400.0/(_BLOCKS.x*_BLOCKS.y))); int temp=_NUM_BLOCKS; if(_NUM_BLOCKS>_NBLOCK) _NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1); } int _CUDA_TILE; for(i=0;i<=20;i+=3) for(j=0;j<=15;j+=2) for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++) hipLaunchKernelGGL(( _AFFINE_KERNEL), dim3(_BLOCKS),dim3(_THREADS), 0, 0, _DEV_y, _SZ_y_2, _SZ_y_1, _DEV_x, _SZ_x_2, _SZ_x_1, 2, i, j, 0, 20, 0, 15, _CUDA_TILE); hipDeviceSynchronize(); hipMemcpy(y, _DEV_y, sizeof(int)*_SZ_y_2*_SZ_y_1, hipMemcpyDeviceToHost); hipMemcpy(x, _DEV_x, sizeof(int)*_SZ_x_2*_SZ_x_1, hipMemcpyDeviceToHost); for(i=0;i<20;i++) for(j=0;j<20;j++) printf("%d\t%d\n",x[i][j],y[i][j]); return 0; } __global__ void _AFFINE_KERNEL(int* y,int _SZ_y_2,int _SZ_y_1,int* x,int _SZ_x_2,int _SZ_x_1,int phi_count, int CUDA_i, int CUDA_j, int CUDA_L_i,int CUDA_U_i, int CUDA_L_j,int CUDA_U_j, int _CUDA_TILE) { int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x; int j = gridDim.y*blockDim.y*_CUDA_TILE + blockDim.y*blockIdx.y + threadIdx.y; if((CUDA_i<=i)&&(i<(CUDA_i+3))&&(i<=CUDA_U_i)){ if((CUDA_j<=j)&&(j<(CUDA_j+2))&&(j<=CUDA_U_j)){ x[(3+i+5)*_SZ_x_1+5+j+1]=y[(3+i-1)*_SZ_y_1+5+j-2]+x[(3+i)*_SZ_x_1+5+j-1]; y[(3+i+2)*_SZ_y_1+5+j+2]=x[(3+i+2)*_SZ_x_1+5+j-1]; }}}
31eb24d7223143ea347765a91e79013097ce007e.cu
#define _NTHREAD 512 #define _NBLOCK 65535 #include<cuda.h> __global__ void _AFFINE_KERNEL(int* ,int ,int ,int* ,int ,int ,int ,int ,int ,int ,int ,int ,int ,int ); #include<stdio.h> #include<stdlib.h> int main() { int x[20][20]; int y[20][20]; int a[20],i,j,k; int n=20; for(i=0;i<20;i++) for(j=0;j<20;j++) { x[i][j]=i+j; y[i][j]=2*i+j; a[i]=2*i; } int _SZ_y_2 = 20; int _SZ_y_1 = 20; int _SZ_x_2 = 20; int _SZ_x_1 = 20; int *_DEV_y; cudaMalloc((void**) &_DEV_y, sizeof(int)*_SZ_y_2*_SZ_y_1); cudaMemcpy(_DEV_y, y, sizeof(int)*_SZ_y_2*_SZ_y_1, cudaMemcpyHostToDevice); int *_DEV_x; cudaMalloc((void**) &_DEV_x, sizeof(int)*_SZ_x_2*_SZ_x_1); cudaMemcpy(_DEV_x, x, sizeof(int)*_SZ_x_2*_SZ_x_1, cudaMemcpyHostToDevice); float _NUM_THREADS = 400,_NUM_BLOCKS=1; int _NUM_TILE=1; dim3 _THREADS(512); dim3 _BLOCKS(1); if(_NUM_THREADS < _NTHREAD) { _THREADS.x=20; _THREADS.y=20; } else { _NUM_BLOCKS=_NUM_THREADS/256; _BLOCKS.x=_BLOCKS.y=ceil(sqrt(_NUM_BLOCKS)); _THREADS.x=_THREADS.y=ceil(sqrt(400.0/(_BLOCKS.x*_BLOCKS.y))); int temp=_NUM_BLOCKS; if(_NUM_BLOCKS>_NBLOCK) _NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1); } int _CUDA_TILE; for(i=0;i<=20;i+=3) for(j=0;j<=15;j+=2) for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++) _AFFINE_KERNEL<<<_BLOCKS,_THREADS>>>(_DEV_y, _SZ_y_2, _SZ_y_1, _DEV_x, _SZ_x_2, _SZ_x_1, 2, i, j, 0, 20, 0, 15, _CUDA_TILE); cudaDeviceSynchronize(); cudaMemcpy(y, _DEV_y, sizeof(int)*_SZ_y_2*_SZ_y_1, cudaMemcpyDeviceToHost); cudaMemcpy(x, _DEV_x, sizeof(int)*_SZ_x_2*_SZ_x_1, cudaMemcpyDeviceToHost); for(i=0;i<20;i++) for(j=0;j<20;j++) printf("%d\t%d\n",x[i][j],y[i][j]); return 0; } __global__ void _AFFINE_KERNEL(int* y,int _SZ_y_2,int _SZ_y_1,int* x,int _SZ_x_2,int _SZ_x_1,int phi_count, int CUDA_i, int CUDA_j, int CUDA_L_i,int CUDA_U_i, int CUDA_L_j,int CUDA_U_j, int _CUDA_TILE) { int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x; int j = gridDim.y*blockDim.y*_CUDA_TILE + blockDim.y*blockIdx.y + threadIdx.y; if((CUDA_i<=i)&&(i<(CUDA_i+3))&&(i<=CUDA_U_i)){ if((CUDA_j<=j)&&(j<(CUDA_j+2))&&(j<=CUDA_U_j)){ x[(3+i+5)*_SZ_x_1+5+j+1]=y[(3+i-1)*_SZ_y_1+5+j-2]+x[(3+i)*_SZ_x_1+5+j-1]; y[(3+i+2)*_SZ_y_1+5+j+2]=x[(3+i+2)*_SZ_x_1+5+j-1]; }}}
1836610021b28dc0508fd773d24ec76460ed9dea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } /* * In the current application, `N` is larger than the grid. * Refactor this kernel to use a grid-stride loop in order that * each parallel thread work on more than one element of the array. */ __global__ void doubleElements(int *a, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; i < N; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { /* * `N` is greater than the size of the grid (see below). */ int N = 10000; int *a; size_t size = N * sizeof(int); hipMallocManaged(&a, size); init(a, N); /* * The size of this grid is 256*32 = 8192. */ size_t threads_per_block = 256; size_t number_of_blocks = 32; hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N); hipDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); hipFree(a); }
1836610021b28dc0508fd773d24ec76460ed9dea.cu
#include <stdio.h> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } /* * In the current application, `N` is larger than the grid. * Refactor this kernel to use a grid-stride loop in order that * each parallel thread work on more than one element of the array. */ __global__ void doubleElements(int *a, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; i < N; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { /* * `N` is greater than the size of the grid (see below). */ int N = 10000; int *a; size_t size = N * sizeof(int); cudaMallocManaged(&a, size); init(a, N); /* * The size of this grid is 256*32 = 8192. */ size_t threads_per_block = 256; size_t number_of_blocks = 32; doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); cudaDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); cudaFree(a); }
49daa166f7a1d249e35135f8303187f206286f72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "helpers.h" #include "FixDihedralOPLS.h" #include "FixHelpers.h" #include "cutils_func.h" #define EPSILON 0.00001f namespace py = boost::python; using namespace std; const std::string dihedralOPLSType = "DihedralOPLS"; __global__ void compute_cu(int nAtoms, float4 *xs, float4 *forces, hipTextureObject_t idToIdxs, DihedralOPLSGPU *dihedrals, int *startstops, BoundsGPU bounds) { int idx = GETIDX(); extern __shared__ DihedralOPLSGPU dihedrals_shr[]; int idxBeginCopy = startstops[blockDim.x*blockIdx.x]; int idxEndCopy = startstops[min(nAtoms, blockDim.x*(blockIdx.x+1))]; copyToShared<DihedralOPLSGPU>(dihedrals + idxBeginCopy, dihedrals_shr, idxEndCopy - idxBeginCopy); __syncthreads(); if (idx < nAtoms) { // printf("going to compute %d\n", idx); int startIdx = startstops[idx]; int endIdx = startstops[idx+1]; //so start/end is the index within the entire bond list. //startIdx - idxBeginCopy gives my index in shared memory int shr_idx = startIdx - idxBeginCopy; int n = endIdx - startIdx; if (n) { int idSelf = dihedrals_shr[shr_idx].ids[dihedrals_shr[shr_idx].myIdx]; int idxSelf = tex2D<int>(idToIdxs, XIDX(idSelf, sizeof(int)), YIDX(idSelf, sizeof(int))); float3 pos = make_float3(xs[idxSelf]); // printf("I am idx %d and I am evaluating atom with pos %f %f %f\n", idx, pos.x, pos.y, pos.z); float3 forceSum = make_float3(0, 0, 0); for (int i=0; i<n; i++) { DihedralOPLSGPU dihedral = dihedrals_shr[shr_idx + i]; float3 positions[4]; positions[dihedral.myIdx] = pos; int toGet[3]; if (dihedral.myIdx==0) { toGet[0] = 1; toGet[1] = 2; toGet[2] = 3; } else if (dihedral.myIdx==1) { toGet[0] = 0; toGet[1] = 2; toGet[2] = 3; } else if (dihedral.myIdx==2) { toGet[0] = 0; toGet[1] = 1; toGet[2] = 3; } else if (dihedral.myIdx==3) { toGet[0] = 0; toGet[1] = 1; toGet[2] = 2; } for (int i=0; i<3; i++) { positions[toGet[i]] = make_float3(perAtomFromId(idToIdxs, xs, dihedral.ids[toGet[i]])); } for (int i=1; i<3; i++) { positions[i] = positions[0] + bounds.minImage(positions[i]-positions[0]); } float3 directors[3]; //vb_xyz in lammps float lenSqrs[3]; //bnmag2 in lammps float lens[3]; //bnmag in lammps float invLenSqrs[3]; //sb in lammps float invLens[3]; directors[0] = positions[0] - positions[1]; directors[1] = positions[2] - positions[1]; directors[2] = positions[3] - positions[2]; for (int i=0; i<3; i++) { //printf("directors %d is %f %f %f\n", i, directors[i].x, directors[i].y, directors[i].z); lenSqrs[i] = lengthSqr(directors[i]); lens[i] = sqrtf(lenSqrs[i]); invLenSqrs[i] = 1.0f / lenSqrs[i]; invLens[i] = 1.0f / lens[i]; // printf("inv len sqrs %d is %f\n", i, invLenSqrs[i]); } float c0 = dot(directors[0], directors[2]) * invLens[0] * invLens[2]; // printf("c0 is %f\n", c0); float c12Mags[2]; float invMagProds[2]; //r12c1, 2 in lammps for (int i=0; i<2; i++) { float dotProd = dot(directors[i+1], directors[i]); if (i==1) { dotProd *= -1; } // printf("ctmp is %f\n", dotProd); invMagProds[i] = invLens[i] * invLens[i+1]; c12Mags[i] = dotProd * invMagProds[i]; //lammps variable names are opaque // printf("c12 mag %d %f\n", i, c12Mags[i]); } float scValues[3]; //???, is s1, s2, s12 in lammps for (int i=0; i<2; i++) { float x = max(1 - c12Mags[i]*c12Mags[i], 0.0f); float sqrtVal = max(sqrtf(x), EPSILON); scValues[i] = 1.0 / sqrtVal; } scValues[2] = scValues[0] * scValues[1]; for (int i=0; i<2; i++) { scValues[i] *= scValues[i]; } // printf("sc values %f %f %f\n", scValues[0], scValues[1], scValues[2]); float c = (c0 + c12Mags[0]*c12Mags[1]) * scValues[2]; float3 cVector; cVector.x = directors[0].y*directors[1].z - directors[0].z*directors[1].y; cVector.y = directors[0].z*directors[1].x - directors[0].x*directors[1].z; cVector.z = directors[0].x*directors[1].y - directors[0].y*directors[1].x; float cVectorLen = length(cVector); float dx = dot(cVector, directors[2]) * invLens[2] / cVectorLen; // printf("c is %f\n", c); if (c > 1.0f) { c = 1.0f; } else if (c < -1.0f) { c = -1.0f; } float phi = acosf(c); //printf("phi is %f\n", phi); if (dx < 0) { phi = -phi; } // printf("phi is %f\n", phi); float sinPhi = sinf(phi); float absSinPhi = sinPhi < 0 ? -sinPhi : sinPhi; if (absSinPhi < EPSILON) { sinPhi = EPSILON; } float invSinPhi = 1.0f / sinPhi; float derivOfPotential = 0.5 * ( dihedral.coefs[0] - 2.0f * dihedral.coefs[1] * sinf(2.0f*phi) * invSinPhi + 3.0f * dihedral.coefs[2] * sinf(3.0f*phi) * invSinPhi - 4.0f * dihedral.coefs[3] * sinf(4.0f*phi) * invSinPhi ) ; // printf("deriv is %f\n", derivOfPotential); // printf("coefs %f %f %f %f\n", dihedral.coefs[0],dihedral.coefs[1] , dihedral.coefs[2] , dihedral.coefs[3]); c *= derivOfPotential; scValues[2] *= derivOfPotential; float a11 = c * invLenSqrs[0] * scValues[0]; float a22 = -invLenSqrs[1] * (2.0f*c0*scValues[2] - c*(scValues[0]+scValues[1])); float a33 = c*invLenSqrs[2]*scValues[1]; float a12 = -invMagProds[0] * (c12Mags[0] * c * scValues[0] + c12Mags[1] * scValues[2]); float a13 = -invLens[0] * invLens[2] * scValues[2]; float a23 = invMagProds[1] * (c12Mags[1]*c*scValues[1] + c12Mags[0]*scValues[2]); float3 myForce = make_float3(0, 0, 0); float3 sFloat3 = make_float3( a12*directors[0].x + a22*directors[1].x + a23*directors[2].x , a12*directors[0].y + a22*directors[1].y + a23*directors[2].y , a12*directors[0].z + a22*directors[1].z + a23*directors[2].z ); //printf("ssomething valyes %f %f %f\n", sFloat3.x, sFloat3.y, sFloat3.z); //printf("comps %f %f %f %f %f %f\n", a12, directors[0].x, a22, directors[1].x, a23, directors[2].x); if (dihedral.myIdx <= 1) { float3 a11Dir1 = directors[0] * a11; float3 a12Dir2 = directors[1] * a12; float3 a13Dir3 = directors[2] * a13; myForce.x += a11Dir1.x + a12Dir2.x + a13Dir3.x; myForce.y += a11Dir1.y + a12Dir2.y + a13Dir3.y; myForce.z += a11Dir1.z + a12Dir2.z + a13Dir3.z; if (dihedral.myIdx == 1) { myForce = -sFloat3 - myForce; // printf("dihedral idx 1 gets force %f %f %f\n", myForce.x, myForce.y, myForce.z); } //} else { // printf("dihedral idx 0 gets force %f %f %f\n", myForce.x, myForce.y, myForce.z); //} } else { float3 a13Dir1 = directors[0] * a13; float3 a23Dir2 = directors[1] * a23; float3 a33Dir3 = directors[2] * a33; myForce.x += a13Dir1.x + a23Dir2.x + a33Dir3.x; myForce.y += a13Dir1.y + a23Dir2.y + a33Dir3.y; myForce.z += a13Dir1.z + a23Dir2.z + a33Dir3.z; if (dihedral.myIdx == 2) { myForce = sFloat3 - myForce; // printf("dihedral idx 2 gets force %f %f %f\n", myForce.x, myForce.y, myForce.z); } // } else { // printf("dihedral idx 3 gets force %f %f %f\n", myForce.x, myForce.y, myForce.z); //} } forceSum += myForce; } forces[idxSelf] += forceSum; } } } FixDihedralOPLS::FixDihedralOPLS(SHARED(State) state_, string handle) : FixPotentialMultiAtom (state_, handle, dihedralOPLSType, true), pyListInterface(&forcers, &pyForcers) {} void FixDihedralOPLS::compute(bool computeVirials) { int nAtoms = state->atoms.size(); int activeIdx = state->gpd.activeIdx(); //cout << "max forcers " << maxForcersPerBlock << endl; hipLaunchKernelGGL(( compute_cu), dim3(NBLOCK(nAtoms)), dim3(PERBLOCK), sizeof(DihedralOPLSGPU) * maxForcersPerBlock, 0, nAtoms, state->gpd.xs(activeIdx), state->gpd.fs(activeIdx), state->gpd.idToIdxs.getTex(), forcersGPU.data(), forcerIdxs.data(), state->boundsGPU); } void FixDihedralOPLS::createDihedral(Atom *a, Atom *b, Atom *c, Atom *d, double v1, double v2, double v3, double v4, int type) { double vs[4] = {v1, v2, v3, v4}; if (type==-1) { for (int i=0; i<4; i++) { assert(vs[i] != COEF_DEFAULT); } } forcers.push_back(DihedralOPLS(a, b, c, d, vs, type)); pyListInterface.updateAppendedMember(); } void FixDihedralOPLS::createDihedralPy(Atom *a, Atom *b, Atom *c, Atom *d, py::list coefs, int type) { double coefs_c[4]; if (type!=-1) { createDihedral(a, b, c, d, COEF_DEFAULT, COEF_DEFAULT, COEF_DEFAULT, COEF_DEFAULT, type); } else { assert(len(coefs) == 4); for (int i=0; i<4; i++) { py::extract<double> coef(coefs[i]); assert(coef.check()); coefs_c[i] = coef; } createDihedral(a, b, c, d, coefs_c[0], coefs_c[1], coefs_c[2], coefs_c[3], type); } } void FixDihedralOPLS::setDihedralTypeCoefs(int type, py::list coefs) { assert(len(coefs)==4); double coefs_c[4]; for (int i=0; i<4; i++) { py::extract<double> coef(coefs[i]); assert(coef.check()); coefs_c[i] = coef; } DihedralOPLS dummy(coefs_c, type); setForcerType(type, dummy); } string FixDihedralOPLS::restartChunk(string format) { stringstream ss; return ss.str(); } void export_FixDihedralOPLS() { py::class_<FixDihedralOPLS, SHARED(FixDihedralOPLS), py::bases<Fix, TypedItemHolder> > ( "FixDihedralOPLS", py::init<SHARED(State), string> ( py::args("state", "handle") ) ) .def("createDihedral", &FixDihedralOPLS::createDihedralPy, (py::arg("coefs")=py::list(), py::arg("type")=-1) ) .def("setDihedralTypeCoefs", &FixDihedralOPLS::setDihedralTypeCoefs, (py::arg("type"), py::arg("coefs")) ) .def_readonly("dihedrals", &FixDihedralOPLS::pyForcers) ; }
49daa166f7a1d249e35135f8303187f206286f72.cu
#include "helpers.h" #include "FixDihedralOPLS.h" #include "FixHelpers.h" #include "cutils_func.h" #define EPSILON 0.00001f namespace py = boost::python; using namespace std; const std::string dihedralOPLSType = "DihedralOPLS"; __global__ void compute_cu(int nAtoms, float4 *xs, float4 *forces, cudaTextureObject_t idToIdxs, DihedralOPLSGPU *dihedrals, int *startstops, BoundsGPU bounds) { int idx = GETIDX(); extern __shared__ DihedralOPLSGPU dihedrals_shr[]; int idxBeginCopy = startstops[blockDim.x*blockIdx.x]; int idxEndCopy = startstops[min(nAtoms, blockDim.x*(blockIdx.x+1))]; copyToShared<DihedralOPLSGPU>(dihedrals + idxBeginCopy, dihedrals_shr, idxEndCopy - idxBeginCopy); __syncthreads(); if (idx < nAtoms) { // printf("going to compute %d\n", idx); int startIdx = startstops[idx]; int endIdx = startstops[idx+1]; //so start/end is the index within the entire bond list. //startIdx - idxBeginCopy gives my index in shared memory int shr_idx = startIdx - idxBeginCopy; int n = endIdx - startIdx; if (n) { int idSelf = dihedrals_shr[shr_idx].ids[dihedrals_shr[shr_idx].myIdx]; int idxSelf = tex2D<int>(idToIdxs, XIDX(idSelf, sizeof(int)), YIDX(idSelf, sizeof(int))); float3 pos = make_float3(xs[idxSelf]); // printf("I am idx %d and I am evaluating atom with pos %f %f %f\n", idx, pos.x, pos.y, pos.z); float3 forceSum = make_float3(0, 0, 0); for (int i=0; i<n; i++) { DihedralOPLSGPU dihedral = dihedrals_shr[shr_idx + i]; float3 positions[4]; positions[dihedral.myIdx] = pos; int toGet[3]; if (dihedral.myIdx==0) { toGet[0] = 1; toGet[1] = 2; toGet[2] = 3; } else if (dihedral.myIdx==1) { toGet[0] = 0; toGet[1] = 2; toGet[2] = 3; } else if (dihedral.myIdx==2) { toGet[0] = 0; toGet[1] = 1; toGet[2] = 3; } else if (dihedral.myIdx==3) { toGet[0] = 0; toGet[1] = 1; toGet[2] = 2; } for (int i=0; i<3; i++) { positions[toGet[i]] = make_float3(perAtomFromId(idToIdxs, xs, dihedral.ids[toGet[i]])); } for (int i=1; i<3; i++) { positions[i] = positions[0] + bounds.minImage(positions[i]-positions[0]); } float3 directors[3]; //vb_xyz in lammps float lenSqrs[3]; //bnmag2 in lammps float lens[3]; //bnmag in lammps float invLenSqrs[3]; //sb in lammps float invLens[3]; directors[0] = positions[0] - positions[1]; directors[1] = positions[2] - positions[1]; directors[2] = positions[3] - positions[2]; for (int i=0; i<3; i++) { //printf("directors %d is %f %f %f\n", i, directors[i].x, directors[i].y, directors[i].z); lenSqrs[i] = lengthSqr(directors[i]); lens[i] = sqrtf(lenSqrs[i]); invLenSqrs[i] = 1.0f / lenSqrs[i]; invLens[i] = 1.0f / lens[i]; // printf("inv len sqrs %d is %f\n", i, invLenSqrs[i]); } float c0 = dot(directors[0], directors[2]) * invLens[0] * invLens[2]; // printf("c0 is %f\n", c0); float c12Mags[2]; float invMagProds[2]; //r12c1, 2 in lammps for (int i=0; i<2; i++) { float dotProd = dot(directors[i+1], directors[i]); if (i==1) { dotProd *= -1; } // printf("ctmp is %f\n", dotProd); invMagProds[i] = invLens[i] * invLens[i+1]; c12Mags[i] = dotProd * invMagProds[i]; //lammps variable names are opaque // printf("c12 mag %d %f\n", i, c12Mags[i]); } float scValues[3]; //???, is s1, s2, s12 in lammps for (int i=0; i<2; i++) { float x = max(1 - c12Mags[i]*c12Mags[i], 0.0f); float sqrtVal = max(sqrtf(x), EPSILON); scValues[i] = 1.0 / sqrtVal; } scValues[2] = scValues[0] * scValues[1]; for (int i=0; i<2; i++) { scValues[i] *= scValues[i]; } // printf("sc values %f %f %f\n", scValues[0], scValues[1], scValues[2]); float c = (c0 + c12Mags[0]*c12Mags[1]) * scValues[2]; float3 cVector; cVector.x = directors[0].y*directors[1].z - directors[0].z*directors[1].y; cVector.y = directors[0].z*directors[1].x - directors[0].x*directors[1].z; cVector.z = directors[0].x*directors[1].y - directors[0].y*directors[1].x; float cVectorLen = length(cVector); float dx = dot(cVector, directors[2]) * invLens[2] / cVectorLen; // printf("c is %f\n", c); if (c > 1.0f) { c = 1.0f; } else if (c < -1.0f) { c = -1.0f; } float phi = acosf(c); //printf("phi is %f\n", phi); if (dx < 0) { phi = -phi; } // printf("phi is %f\n", phi); float sinPhi = sinf(phi); float absSinPhi = sinPhi < 0 ? -sinPhi : sinPhi; if (absSinPhi < EPSILON) { sinPhi = EPSILON; } float invSinPhi = 1.0f / sinPhi; float derivOfPotential = 0.5 * ( dihedral.coefs[0] - 2.0f * dihedral.coefs[1] * sinf(2.0f*phi) * invSinPhi + 3.0f * dihedral.coefs[2] * sinf(3.0f*phi) * invSinPhi - 4.0f * dihedral.coefs[3] * sinf(4.0f*phi) * invSinPhi ) ; // printf("deriv is %f\n", derivOfPotential); // printf("coefs %f %f %f %f\n", dihedral.coefs[0],dihedral.coefs[1] , dihedral.coefs[2] , dihedral.coefs[3]); c *= derivOfPotential; scValues[2] *= derivOfPotential; float a11 = c * invLenSqrs[0] * scValues[0]; float a22 = -invLenSqrs[1] * (2.0f*c0*scValues[2] - c*(scValues[0]+scValues[1])); float a33 = c*invLenSqrs[2]*scValues[1]; float a12 = -invMagProds[0] * (c12Mags[0] * c * scValues[0] + c12Mags[1] * scValues[2]); float a13 = -invLens[0] * invLens[2] * scValues[2]; float a23 = invMagProds[1] * (c12Mags[1]*c*scValues[1] + c12Mags[0]*scValues[2]); float3 myForce = make_float3(0, 0, 0); float3 sFloat3 = make_float3( a12*directors[0].x + a22*directors[1].x + a23*directors[2].x , a12*directors[0].y + a22*directors[1].y + a23*directors[2].y , a12*directors[0].z + a22*directors[1].z + a23*directors[2].z ); //printf("ssomething valyes %f %f %f\n", sFloat3.x, sFloat3.y, sFloat3.z); //printf("comps %f %f %f %f %f %f\n", a12, directors[0].x, a22, directors[1].x, a23, directors[2].x); if (dihedral.myIdx <= 1) { float3 a11Dir1 = directors[0] * a11; float3 a12Dir2 = directors[1] * a12; float3 a13Dir3 = directors[2] * a13; myForce.x += a11Dir1.x + a12Dir2.x + a13Dir3.x; myForce.y += a11Dir1.y + a12Dir2.y + a13Dir3.y; myForce.z += a11Dir1.z + a12Dir2.z + a13Dir3.z; if (dihedral.myIdx == 1) { myForce = -sFloat3 - myForce; // printf("dihedral idx 1 gets force %f %f %f\n", myForce.x, myForce.y, myForce.z); } //} else { // printf("dihedral idx 0 gets force %f %f %f\n", myForce.x, myForce.y, myForce.z); //} } else { float3 a13Dir1 = directors[0] * a13; float3 a23Dir2 = directors[1] * a23; float3 a33Dir3 = directors[2] * a33; myForce.x += a13Dir1.x + a23Dir2.x + a33Dir3.x; myForce.y += a13Dir1.y + a23Dir2.y + a33Dir3.y; myForce.z += a13Dir1.z + a23Dir2.z + a33Dir3.z; if (dihedral.myIdx == 2) { myForce = sFloat3 - myForce; // printf("dihedral idx 2 gets force %f %f %f\n", myForce.x, myForce.y, myForce.z); } // } else { // printf("dihedral idx 3 gets force %f %f %f\n", myForce.x, myForce.y, myForce.z); //} } forceSum += myForce; } forces[idxSelf] += forceSum; } } } FixDihedralOPLS::FixDihedralOPLS(SHARED(State) state_, string handle) : FixPotentialMultiAtom (state_, handle, dihedralOPLSType, true), pyListInterface(&forcers, &pyForcers) {} void FixDihedralOPLS::compute(bool computeVirials) { int nAtoms = state->atoms.size(); int activeIdx = state->gpd.activeIdx(); //cout << "max forcers " << maxForcersPerBlock << endl; compute_cu<<<NBLOCK(nAtoms), PERBLOCK, sizeof(DihedralOPLSGPU) * maxForcersPerBlock>>>(nAtoms, state->gpd.xs(activeIdx), state->gpd.fs(activeIdx), state->gpd.idToIdxs.getTex(), forcersGPU.data(), forcerIdxs.data(), state->boundsGPU); } void FixDihedralOPLS::createDihedral(Atom *a, Atom *b, Atom *c, Atom *d, double v1, double v2, double v3, double v4, int type) { double vs[4] = {v1, v2, v3, v4}; if (type==-1) { for (int i=0; i<4; i++) { assert(vs[i] != COEF_DEFAULT); } } forcers.push_back(DihedralOPLS(a, b, c, d, vs, type)); pyListInterface.updateAppendedMember(); } void FixDihedralOPLS::createDihedralPy(Atom *a, Atom *b, Atom *c, Atom *d, py::list coefs, int type) { double coefs_c[4]; if (type!=-1) { createDihedral(a, b, c, d, COEF_DEFAULT, COEF_DEFAULT, COEF_DEFAULT, COEF_DEFAULT, type); } else { assert(len(coefs) == 4); for (int i=0; i<4; i++) { py::extract<double> coef(coefs[i]); assert(coef.check()); coefs_c[i] = coef; } createDihedral(a, b, c, d, coefs_c[0], coefs_c[1], coefs_c[2], coefs_c[3], type); } } void FixDihedralOPLS::setDihedralTypeCoefs(int type, py::list coefs) { assert(len(coefs)==4); double coefs_c[4]; for (int i=0; i<4; i++) { py::extract<double> coef(coefs[i]); assert(coef.check()); coefs_c[i] = coef; } DihedralOPLS dummy(coefs_c, type); setForcerType(type, dummy); } string FixDihedralOPLS::restartChunk(string format) { stringstream ss; return ss.str(); } void export_FixDihedralOPLS() { py::class_<FixDihedralOPLS, SHARED(FixDihedralOPLS), py::bases<Fix, TypedItemHolder> > ( "FixDihedralOPLS", py::init<SHARED(State), string> ( py::args("state", "handle") ) ) .def("createDihedral", &FixDihedralOPLS::createDihedralPy, (py::arg("coefs")=py::list(), py::arg("type")=-1) ) .def("setDihedralTypeCoefs", &FixDihedralOPLS::setDihedralTypeCoefs, (py::arg("type"), py::arg("coefs")) ) .def_readonly("dihedrals", &FixDihedralOPLS::pyForcers) ; }
108006da8dffe3b7d66680b0f32292afe127bad8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime_api.h" #include <stdio.h> hipError_t transpose8WithCuda(unsigned char* input, unsigned char* output, int xSize, int ySize); int verify(unsigned char* input, unsigned char* output, int xSize, int ySize); int verify(unsigned char* GoldInput, unsigned char* CudaInput, int xSize, int ySize) { for (int i = 0; i < xSize * ySize; i++) { if (GoldInput[i] != CudaInput[i]) { printf("GoldInput[%d] = %d CInput[%d]=%d \n", i, GoldInput[i], i, CudaInput[i]); return(1); } } return(0); } void transpose8WithC(unsigned char* in, unsigned char* out, int x_size, int y_size) { size_t x, y; for (y = 0; y < y_size; y += 1) { for (x = 0; x < x_size; x += 1) { size_t in_pos = y * x_size + x; size_t out_pos = x * y_size + y; *(out + out_pos) = *(in + in_pos); } } } __global__ void kernelTranspose8(unsigned char* input, unsigned char* output, int size) { int xWidth = blockDim.x * gridDim.x; int xLoc = (blockIdx.x * blockDim.x + threadIdx.x); int yLoc = blockIdx.y * blockDim.y + threadIdx.y; int index = xLoc + yLoc * xWidth; if (index < (size)) { output[yLoc * xWidth + xLoc] = input[index]; } } int main() { unsigned char* input, * CudaOutput, * GoldOutput; int xSize, ySize; xSize = 512; ySize = 512; input = new unsigned char[xSize * ySize]; CudaOutput = new unsigned char[xSize * ySize]; GoldOutput = new unsigned char[xSize * ySize]; int i, j; printf("xSize=%d ySize=%d \n", xSize, ySize); FILE* fp; //fp = fopen("barbara_gray.raw", "rb"); //fread(input, xSize, ySize, fp); for (int i = 0; i < ySize; i++) for (int j = 0; j < xSize; j++) input[i * xSize + j] = (i * j) % 255; transpose8WithC(input, GoldOutput, xSize, ySize); // Add vectors in parallel. hipError_t cudaStatus = transpose8WithCuda(input, CudaOutput, xSize, ySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "invert8WithCuda failed!"); return 1; } int error = verify(GoldOutput, CudaOutput, xSize, ySize); if (error != 0) printf("Verify Failed \n"); else printf("Verify Successful \n"); fp = fopen("COutput.raw", "wb"); fwrite(GoldOutput, xSize, ySize, fp); fclose(fp); fp = fopen("CudaOutput.raw", "wb"); fwrite(CudaOutput, xSize, ySize, fp); fclose(fp); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } delete[] CudaOutput; delete[] GoldOutput; delete[] input; return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t transpose8WithCuda(unsigned char* input, unsigned char* output, int xSize, int ySize) { unsigned char* dev_input = 0; unsigned char* dev_output = 0; // hipProfilerInitialize(); unsigned int xysize = xSize * ySize; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. hipDeviceProp_t prop; int count; dim3 blocks, threads; threads.x = 512; threads.y = 1; //512x512 : along X 512/8 = 64 thread blocks Alon gY 64 blocks blocks.x = (xSize + threads.x - 1) / (threads.x); //1 blocks.y = (ySize + threads.y - 1) / (threads.y); //512 printf("blocks.x = %d blocks.y=%d \n", blocks.x, blocks.y); printf("threads.x = %d threads.y=%d \n", threads.x, threads.y); hipGetDeviceCount(&count); printf("Count = %d\n", count); cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } hipEventRecord(start, 0); // Allocate GPU buffers for two input . cudaStatus = hipMalloc((void**)&dev_input, xysize * sizeof(char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_output, xysize * sizeof(char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_input, input, xysize, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } hipProfilerStart(); // Launch a kernel on the GPU with one thread for each element. kernelTranspose8 __dim__(blocks, threads) (dev_input, dev_output, xysize); hipProfilerStop(); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching invert8Kernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(output, dev_output, xysize * sizeof(char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } hipEventRecord(stop, 0); hipEventSynchronize(stop); float cudaElapsedTime; hipEventElapsedTime(&cudaElapsedTime, start, stop); printf("Time for execution = %3.1f ms \n", cudaElapsedTime); hipEventDestroy(start); hipEventDestroy(stop); Error: hipFree(dev_input); hipFree(dev_output); return cudaStatus; }
108006da8dffe3b7d66680b0f32292afe127bad8.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_profiler_api.h" #include <stdio.h> cudaError_t transpose8WithCuda(unsigned char* input, unsigned char* output, int xSize, int ySize); int verify(unsigned char* input, unsigned char* output, int xSize, int ySize); int verify(unsigned char* GoldInput, unsigned char* CudaInput, int xSize, int ySize) { for (int i = 0; i < xSize * ySize; i++) { if (GoldInput[i] != CudaInput[i]) { printf("GoldInput[%d] = %d CInput[%d]=%d \n", i, GoldInput[i], i, CudaInput[i]); return(1); } } return(0); } void transpose8WithC(unsigned char* in, unsigned char* out, int x_size, int y_size) { size_t x, y; for (y = 0; y < y_size; y += 1) { for (x = 0; x < x_size; x += 1) { size_t in_pos = y * x_size + x; size_t out_pos = x * y_size + y; *(out + out_pos) = *(in + in_pos); } } } __global__ void kernelTranspose8(unsigned char* input, unsigned char* output, int size) { int xWidth = blockDim.x * gridDim.x; int xLoc = (blockIdx.x * blockDim.x + threadIdx.x); int yLoc = blockIdx.y * blockDim.y + threadIdx.y; int index = xLoc + yLoc * xWidth; if (index < (size)) { output[yLoc * xWidth + xLoc] = input[index]; } } int main() { unsigned char* input, * CudaOutput, * GoldOutput; int xSize, ySize; xSize = 512; ySize = 512; input = new unsigned char[xSize * ySize]; CudaOutput = new unsigned char[xSize * ySize]; GoldOutput = new unsigned char[xSize * ySize]; int i, j; printf("xSize=%d ySize=%d \n", xSize, ySize); FILE* fp; //fp = fopen("barbara_gray.raw", "rb"); //fread(input, xSize, ySize, fp); for (int i = 0; i < ySize; i++) for (int j = 0; j < xSize; j++) input[i * xSize + j] = (i * j) % 255; transpose8WithC(input, GoldOutput, xSize, ySize); // Add vectors in parallel. cudaError_t cudaStatus = transpose8WithCuda(input, CudaOutput, xSize, ySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "invert8WithCuda failed!"); return 1; } int error = verify(GoldOutput, CudaOutput, xSize, ySize); if (error != 0) printf("Verify Failed \n"); else printf("Verify Successful \n"); fp = fopen("COutput.raw", "wb"); fwrite(GoldOutput, xSize, ySize, fp); fclose(fp); fp = fopen("CudaOutput.raw", "wb"); fwrite(CudaOutput, xSize, ySize, fp); fclose(fp); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } delete[] CudaOutput; delete[] GoldOutput; delete[] input; return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t transpose8WithCuda(unsigned char* input, unsigned char* output, int xSize, int ySize) { unsigned char* dev_input = 0; unsigned char* dev_output = 0; // cudaProfilerInitialize(); unsigned int xysize = xSize * ySize; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaDeviceProp prop; int count; dim3 blocks, threads; threads.x = 512; threads.y = 1; //512x512 : along X 512/8 = 64 thread blocks Alon gY 64 blocks blocks.x = (xSize + threads.x - 1) / (threads.x); //1 blocks.y = (ySize + threads.y - 1) / (threads.y); //512 printf("blocks.x = %d blocks.y=%d \n", blocks.x, blocks.y); printf("threads.x = %d threads.y=%d \n", threads.x, threads.y); cudaGetDeviceCount(&count); printf("Count = %d\n", count); cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaEventRecord(start, 0); // Allocate GPU buffers for two input . cudaStatus = cudaMalloc((void**)&dev_input, xysize * sizeof(char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_output, xysize * sizeof(char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_input, input, xysize, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaProfilerStart(); // Launch a kernel on the GPU with one thread for each element. kernelTranspose8 __dim__(blocks, threads) (dev_input, dev_output, xysize); cudaProfilerStop(); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching invert8Kernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(output, dev_output, xysize * sizeof(char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float cudaElapsedTime; cudaEventElapsedTime(&cudaElapsedTime, start, stop); printf("Time for execution = %3.1f ms \n", cudaElapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); Error: cudaFree(dev_input); cudaFree(dev_output); return cudaStatus; }
e1a9193dc5e7de09e63d54b1bb236de4f993b810.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ // Parts of this code sourced from SnopyDogy // https://gist.github.com/SnopyDogy/a9a22497a893ec86aa3e #if defined(WITH_GRAPHICS) #include <interopManager.hpp> #include <err_cuda.hpp> #include <cstdio> namespace cuda { void InteropManager::destroyResources() { int n = getActiveDeviceId(); for(iter_t iter = interop_maps[n].begin(); iter != interop_maps[n].end(); iter++) { CUDA_CHECK(hipGraphicsUnregisterResource(iter->second)); } } InteropManager::~InteropManager() { try { for(int i = 0; i < getDeviceCount(); i++) { setDevice(i); destroyResources(); } } catch (AfError &ex) { const char* perr = getenv("AF_PRINT_ERRORS"); if(perr && perr[0] != '0') { fprintf(stderr, "%s\n", ex.what()); } } } InteropManager& InteropManager::getInstance() { static InteropManager my_instance; return my_instance; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Image* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaPBOResource; // Register PBO with CUDA CUDA_CHECK(hipGraphicsGLRegisterBuffer(&cudaPBOResource, key->pbo(), hipGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaPBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Plot* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(hipGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), hipGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Histogram* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(hipGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), hipGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } } #endif
e1a9193dc5e7de09e63d54b1bb236de4f993b810.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ // Parts of this code sourced from SnopyDogy // https://gist.github.com/SnopyDogy/a9a22497a893ec86aa3e #if defined(WITH_GRAPHICS) #include <interopManager.hpp> #include <err_cuda.hpp> #include <cstdio> namespace cuda { void InteropManager::destroyResources() { int n = getActiveDeviceId(); for(iter_t iter = interop_maps[n].begin(); iter != interop_maps[n].end(); iter++) { CUDA_CHECK(cudaGraphicsUnregisterResource(iter->second)); } } InteropManager::~InteropManager() { try { for(int i = 0; i < getDeviceCount(); i++) { setDevice(i); destroyResources(); } } catch (AfError &ex) { const char* perr = getenv("AF_PRINT_ERRORS"); if(perr && perr[0] != '0') { fprintf(stderr, "%s\n", ex.what()); } } } InteropManager& InteropManager::getInstance() { static InteropManager my_instance; return my_instance; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Image* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaPBOResource; // Register PBO with CUDA CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&cudaPBOResource, key->pbo(), cudaGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaPBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Plot* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), cudaGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Histogram* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), cudaGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } } #endif
1f42a300dd05395b31afda0f84023e52913cf824.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "bitmap_image.hpp" #include <iostream> #include <vector> #include <thread> #include <chrono> using namespace std; void callThreadsAndMeasureTimeOnCPU(bitmap_image& image, bitmap_image& outputImage, double timeTable[]); void embossFiltering(int whichOneThread, int threadsAmount, bitmap_image& image, bitmap_image& outputImage); void calculateNewPixelColorValues(bitmap_image& image, unsigned int x, unsigned int y, bitmap_image& outputImage); void prepareDataForGPUAndMeasureTime(unsigned char* imageRGBValues, unsigned char* outputImageRGBValues, int width, int height, int size, int row, float& timeGPU); __global__ void embossFilteringOnGPU(unsigned char* imageRGBValues, unsigned char* outputImageRGBValues, int height, int width, int size, int row); __device__ void calculateNewPixelColorValuesOnGPU(unsigned char* imageRGBValues, int position, int width, int height, unsigned char* outputImageRGBValues); int main() { bitmap_image image("11846x9945.bmp"); bitmap_image outputImageCPU(image.width(), image.height()); bitmap_image outputImageGPU(image.width(), image.height()); double timeTable[4]; float timeGPU = 0.0; callThreadsAndMeasureTimeOnCPU(image, outputImageCPU, timeTable); outputImageCPU.save_image("outputImageCPU.bmp"); prepareDataForGPUAndMeasureTime(image.data(), outputImageGPU.data(), image.width(), image.height(), image.width() * image.height() * image.bytes_per_pixel() * sizeof(unsigned char), image.width() * image.bytes_per_pixel(), timeGPU); outputImageGPU.save_image("outputImageGPU.bmp"); cout << "Czas wykonywania: " << endl; cout << "1 watek - " << timeTable[0] << "s" << endl; cout << "4 watki - " << timeTable[1] << "s" << endl; cout << "8 watkow - " << timeTable[2] << "s" << endl; cout << "12 watkow - " << timeTable[3] << "s" << endl; cout << "GPU - " << timeGPU << "s"; return 0; } void callThreadsAndMeasureTimeOnCPU(bitmap_image& image, bitmap_image& outputImage, double timeTable[]) { int counter = 0; vector<thread> threads; for (int i = 0; i <= 12; i += 4) { if (i == 0) i++; if (!threads.empty()) threads.clear(); auto begin = chrono::high_resolution_clock::now(); for (int j = 0; j < i; j++) threads.push_back(thread(&embossFiltering, j, i, ref(image), ref(outputImage))); for (auto& t : threads) t.join(); auto end = chrono::high_resolution_clock::now(); chrono::duration<double> diff = end - begin; timeTable[counter] = chrono::duration<double>(diff).count(); counter++; if (i == 1) i--; } } void embossFiltering(int whichOneThread, int threadsAmount, bitmap_image& image, bitmap_image& outputImage) { unsigned int height = (unsigned int)(image.height() / threadsAmount); unsigned int width = image.width(); for (unsigned int y = whichOneThread * height; y < whichOneThread * height + height; y++) { for (unsigned int x = 0; x < width; x++) { rgb_t color; image.get_pixel(x, y, color); calculateNewPixelColorValues(image, x, y, outputImage); } } } //emboss kernel = -2 -1 0 // -1 1 1 // 0 1 2 void calculateNewPixelColorValues(bitmap_image& image, unsigned int x, unsigned int y, bitmap_image& outputImage) { int xPosition[] = { -1, 0, 1 }; int yPosition[] = { -1, 0, 1 }; int mask[][3] = { {-2, -1, 0}, {-1, 1, 1}, {0, 1, 2} }; rgb_t color; unsigned int xTemp, yTemp; int rSum = 0, bSum = 0, gSum = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { xTemp = x + xPosition[i]; yTemp = y + yPosition[j]; if (xTemp >= 0 && xTemp <= image.width() - 1 && yTemp >= 0 && yTemp <= image.height() - 1) { image.get_pixel(xTemp, yTemp, color); rSum += color.red * mask[i][j]; gSum += color.green * mask[i][j]; bSum += color.blue * mask[i][j]; } } } if (rSum > 255) rSum = 255; if (rSum < 0) rSum = 0; if (gSum > 255) gSum = 255; if (gSum < 0) gSum = 0; if (bSum > 255) bSum = 255; if (bSum < 0) bSum = 0; outputImage.set_pixel(x, y, rSum, gSum, bSum); } void prepareDataForGPUAndMeasureTime(unsigned char* imageRGBValues, unsigned char* outputImageRGBValues, int width, int height, int size, int row, float& timeGPU) { unsigned char* imageRGBValuesGPU, * outputImageRGBValuesGPU; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipMalloc((void**)&imageRGBValuesGPU, size); hipMalloc((void**)&outputImageRGBValuesGPU, size); hipMemcpy(imageRGBValuesGPU, imageRGBValues, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( embossFilteringOnGPU), dim3((width * height + 255) / 256), dim3(256), 0, 0, imageRGBValuesGPU, outputImageRGBValuesGPU, width, height, size, row); hipMemcpy(outputImageRGBValues, outputImageRGBValuesGPU, size, hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&timeGPU, start, stop); timeGPU /= 1000.0; hipEventDestroy(start); hipEventDestroy(stop); } __global__ void embossFilteringOnGPU(unsigned char* imageRGBValues, unsigned char* outputImageRGBValues, int width, int height, int size, int row) { int i = blockDim.x * blockIdx.x + threadIdx.x; i *= 3; if (i > size) return; int y = i / row; int x = (i - y * row) / 3; if (y < 0 || y > height) return; if (x < 0 || x > row) return; int pixelPosistion = y * row + 3 * x; calculateNewPixelColorValuesOnGPU(imageRGBValues, pixelPosistion, width, height, outputImageRGBValues); } __device__ void calculateNewPixelColorValuesOnGPU(unsigned char* imageRGBValues, int position, int width, int height, unsigned char* outputImageRGBValues) { int pos[] = { 3 * -width + 3 * -1, 3 * -width, 3 * -width + 3 * 1, 3 * -1, 0, 3 * 1, 3 * width + 3 * -1, 3 * width, 3 * width + 3 * 1 }; int mask[] = { -2, -1, 0, -1, 1, 1, 0, 1, 2 }; int rSum = 0, bSum = 0, gSum = 0; for (int i = 0; i < 9; i++) { int positionTemp = position + pos[i]; int x = positionTemp % (3 * width); int xMiddle = position % (3 * width); if (positionTemp >= 0 && positionTemp < width * 3 * (height - 1) + 3 * (width - 1) && abs(x - xMiddle) < 3 * 2) { rSum += imageRGBValues[positionTemp + 2] * mask[i]; gSum += imageRGBValues[positionTemp + 1] * mask[i]; bSum += imageRGBValues[positionTemp] * mask[i]; } } if (rSum > 255) rSum = 255; if (rSum < 0) rSum = 0; if (gSum > 255) gSum = 255; if (gSum < 0) gSum = 0; if (bSum > 255) bSum = 255; if (bSum < 0) bSum = 0; outputImageRGBValues[position + 2] = rSum; outputImageRGBValues[position + 1] = gSum; outputImageRGBValues[position] = bSum; }
1f42a300dd05395b31afda0f84023e52913cf824.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bitmap_image.hpp" #include <iostream> #include <vector> #include <thread> #include <chrono> using namespace std; void callThreadsAndMeasureTimeOnCPU(bitmap_image& image, bitmap_image& outputImage, double timeTable[]); void embossFiltering(int whichOneThread, int threadsAmount, bitmap_image& image, bitmap_image& outputImage); void calculateNewPixelColorValues(bitmap_image& image, unsigned int x, unsigned int y, bitmap_image& outputImage); void prepareDataForGPUAndMeasureTime(unsigned char* imageRGBValues, unsigned char* outputImageRGBValues, int width, int height, int size, int row, float& timeGPU); __global__ void embossFilteringOnGPU(unsigned char* imageRGBValues, unsigned char* outputImageRGBValues, int height, int width, int size, int row); __device__ void calculateNewPixelColorValuesOnGPU(unsigned char* imageRGBValues, int position, int width, int height, unsigned char* outputImageRGBValues); int main() { bitmap_image image("11846x9945.bmp"); bitmap_image outputImageCPU(image.width(), image.height()); bitmap_image outputImageGPU(image.width(), image.height()); double timeTable[4]; float timeGPU = 0.0; callThreadsAndMeasureTimeOnCPU(image, outputImageCPU, timeTable); outputImageCPU.save_image("outputImageCPU.bmp"); prepareDataForGPUAndMeasureTime(image.data(), outputImageGPU.data(), image.width(), image.height(), image.width() * image.height() * image.bytes_per_pixel() * sizeof(unsigned char), image.width() * image.bytes_per_pixel(), timeGPU); outputImageGPU.save_image("outputImageGPU.bmp"); cout << "Czas wykonywania: " << endl; cout << "1 watek - " << timeTable[0] << "s" << endl; cout << "4 watki - " << timeTable[1] << "s" << endl; cout << "8 watkow - " << timeTable[2] << "s" << endl; cout << "12 watkow - " << timeTable[3] << "s" << endl; cout << "GPU - " << timeGPU << "s"; return 0; } void callThreadsAndMeasureTimeOnCPU(bitmap_image& image, bitmap_image& outputImage, double timeTable[]) { int counter = 0; vector<thread> threads; for (int i = 0; i <= 12; i += 4) { if (i == 0) i++; if (!threads.empty()) threads.clear(); auto begin = chrono::high_resolution_clock::now(); for (int j = 0; j < i; j++) threads.push_back(thread(&embossFiltering, j, i, ref(image), ref(outputImage))); for (auto& t : threads) t.join(); auto end = chrono::high_resolution_clock::now(); chrono::duration<double> diff = end - begin; timeTable[counter] = chrono::duration<double>(diff).count(); counter++; if (i == 1) i--; } } void embossFiltering(int whichOneThread, int threadsAmount, bitmap_image& image, bitmap_image& outputImage) { unsigned int height = (unsigned int)(image.height() / threadsAmount); unsigned int width = image.width(); for (unsigned int y = whichOneThread * height; y < whichOneThread * height + height; y++) { for (unsigned int x = 0; x < width; x++) { rgb_t color; image.get_pixel(x, y, color); calculateNewPixelColorValues(image, x, y, outputImage); } } } //emboss kernel = -2 -1 0 // -1 1 1 // 0 1 2 void calculateNewPixelColorValues(bitmap_image& image, unsigned int x, unsigned int y, bitmap_image& outputImage) { int xPosition[] = { -1, 0, 1 }; int yPosition[] = { -1, 0, 1 }; int mask[][3] = { {-2, -1, 0}, {-1, 1, 1}, {0, 1, 2} }; rgb_t color; unsigned int xTemp, yTemp; int rSum = 0, bSum = 0, gSum = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { xTemp = x + xPosition[i]; yTemp = y + yPosition[j]; if (xTemp >= 0 && xTemp <= image.width() - 1 && yTemp >= 0 && yTemp <= image.height() - 1) { image.get_pixel(xTemp, yTemp, color); rSum += color.red * mask[i][j]; gSum += color.green * mask[i][j]; bSum += color.blue * mask[i][j]; } } } if (rSum > 255) rSum = 255; if (rSum < 0) rSum = 0; if (gSum > 255) gSum = 255; if (gSum < 0) gSum = 0; if (bSum > 255) bSum = 255; if (bSum < 0) bSum = 0; outputImage.set_pixel(x, y, rSum, gSum, bSum); } void prepareDataForGPUAndMeasureTime(unsigned char* imageRGBValues, unsigned char* outputImageRGBValues, int width, int height, int size, int row, float& timeGPU) { unsigned char* imageRGBValuesGPU, * outputImageRGBValuesGPU; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaMalloc((void**)&imageRGBValuesGPU, size); cudaMalloc((void**)&outputImageRGBValuesGPU, size); cudaMemcpy(imageRGBValuesGPU, imageRGBValues, size, cudaMemcpyHostToDevice); embossFilteringOnGPU<<<(width * height + 255) / 256, 256>>>(imageRGBValuesGPU, outputImageRGBValuesGPU, width, height, size, row); cudaMemcpy(outputImageRGBValues, outputImageRGBValuesGPU, size, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&timeGPU, start, stop); timeGPU /= 1000.0; cudaEventDestroy(start); cudaEventDestroy(stop); } __global__ void embossFilteringOnGPU(unsigned char* imageRGBValues, unsigned char* outputImageRGBValues, int width, int height, int size, int row) { int i = blockDim.x * blockIdx.x + threadIdx.x; i *= 3; if (i > size) return; int y = i / row; int x = (i - y * row) / 3; if (y < 0 || y > height) return; if (x < 0 || x > row) return; int pixelPosistion = y * row + 3 * x; calculateNewPixelColorValuesOnGPU(imageRGBValues, pixelPosistion, width, height, outputImageRGBValues); } __device__ void calculateNewPixelColorValuesOnGPU(unsigned char* imageRGBValues, int position, int width, int height, unsigned char* outputImageRGBValues) { int pos[] = { 3 * -width + 3 * -1, 3 * -width, 3 * -width + 3 * 1, 3 * -1, 0, 3 * 1, 3 * width + 3 * -1, 3 * width, 3 * width + 3 * 1 }; int mask[] = { -2, -1, 0, -1, 1, 1, 0, 1, 2 }; int rSum = 0, bSum = 0, gSum = 0; for (int i = 0; i < 9; i++) { int positionTemp = position + pos[i]; int x = positionTemp % (3 * width); int xMiddle = position % (3 * width); if (positionTemp >= 0 && positionTemp < width * 3 * (height - 1) + 3 * (width - 1) && abs(x - xMiddle) < 3 * 2) { rSum += imageRGBValues[positionTemp + 2] * mask[i]; gSum += imageRGBValues[positionTemp + 1] * mask[i]; bSum += imageRGBValues[positionTemp] * mask[i]; } } if (rSum > 255) rSum = 255; if (rSum < 0) rSum = 0; if (gSum > 255) gSum = 255; if (gSum < 0) gSum = 0; if (bSum > 255) bSum = 255; if (bSum < 0) bSum = 0; outputImageRGBValues[position + 2] = rSum; outputImageRGBValues[position + 1] = gSum; outputImageRGBValues[position] = bSum; }
687ab810d79850bfc7e4d259cd3bc92a7118d593.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void sgemm_nn_128x128( float* param_C, const float* param_A, const float* param_B, float param_alpha, float param_beta, int param_flags, int param_lda, int param_ldb, int param_ldc, int param_m, int param_n, int param_k, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[128*8*2 + 128*8*2 + 4]; *param_C = share[0]; }
687ab810d79850bfc7e4d259cd3bc92a7118d593.cu
extern "C" __global__ void sgemm_nn_128x128( float* param_C, const float* param_A, const float* param_B, float param_alpha, float param_beta, int param_flags, int param_lda, int param_ldb, int param_ldc, int param_m, int param_n, int param_k, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[128*8*2 + 128*8*2 + 4]; *param_C = share[0]; }
d7f8c340fb5490cc69e9eeefc7512729b5c1ab18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stdlib.h> #include <algorithm> typedef struct { int width, height; float* elements; }Matrix; #define BLOCK_SIZE 16 __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width= A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipError_t err = hipMalloc(&d_A.elements, size); printf("CUDA Malloc A: %s\n", hipGetErrorString(err)); err = hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); printf("Copy A to device: %s\n", hipGetErrorString(err)); Matrix d_B; d_B.width= B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); err = hipMalloc(&d_B.elements, size); printf("CUDA Malloc B: %s\n", hipGetErrorString(err)); err = hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); printf("Copy B to device: %s\n", hipGetErrorString(err)); Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); err = hipMalloc(&d_C.elements, size); printf("CUDA Malloc C: %s\n", hipGetErrorString(err)); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((B.width + dimBlock.x - 1) / dimBlock.x, (A.height + dimBlock.y - 1) / dimBlock.y); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); hipEventRecord(stop); err = hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Run kernel: %s\n", hipGetErrorString(err)); printf("Timing: %.2f ms\n", milliseconds); // Read C from device memory err = hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); printf("Copy C off of device: %s\n",hipGetErrorString(err)); // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); // hipFree(d_C.elements); } __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0.0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row >= A.height -1 || col >= B.width - 1) return ; for(int e = 0; e < A.width; ++e) { Cvalue = A.elements[row * A.width + e] * B.elements[e * B.width + col]; } C.elements[row * C.width + col] = Cvalue; } int main(int argc, char **argv) { Matrix A, B, C; int a1, a2, b1, b2; // Read some values from the commandline a1 = atoi(argv[1]); /* Height of A */ a2 = atoi(argv[2]); /* Width of A */ b1 = a2; /* Height of B */ b2 = atoi(argv[3]); /* Width of B */ A.height = a1; A.width = a2; A.elements = (float*)malloc(A.width * A.height * sizeof(float)); B.height = b1; B.width = b2; B.elements = (float*)malloc(B.width * B.height * sizeof(float)); C.height = A.height; C.width = B.width; C.elements = (float*)malloc(C.width * C.height * sizeof(float)); for(int i = 0; i < A.height; i++) for(int j = 0; j < A.width; j++) A.elements[i*A.width + j] = (float)(rand() % 3); for(int i = 0; i < B.height; i++) for(int j = 0; j < B.width; j++) B.elements[i*B.width + j] = (float)(rand() % 2); MatMul(A, B, C); // Print up to a 10x10 portion of the three matrices for(int i = 0; i < min(10, A.height); i++){ for(int j = 0; j < min(10, A.width); j++) printf("%f ", A.elements[i*A.width + j]); printf("\n"); } printf("\n"); for(int i = 0; i < min(10, B.height); i++){ for(int j = 0; j < min(10, B.width); j++) printf("%f ", B.elements[i*B.width + j]); printf("\n"); } printf("\n"); for(int i = 0; i < min(10, C.height); i++){ for(int j = 0; j < min(10, C.width); j++) printf("%f ", C.elements[i*C.width + j]); printf("\n"); } printf("\n"); return 0; }
d7f8c340fb5490cc69e9eeefc7512729b5c1ab18.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stdlib.h> #include <algorithm> typedef struct { int width, height; float* elements; }Matrix; #define BLOCK_SIZE 16 __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width= A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaError_t err = cudaMalloc(&d_A.elements, size); printf("CUDA Malloc A: %s\n", cudaGetErrorString(err)); err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); printf("Copy A to device: %s\n", cudaGetErrorString(err)); Matrix d_B; d_B.width= B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); err = cudaMalloc(&d_B.elements, size); printf("CUDA Malloc B: %s\n", cudaGetErrorString(err)); err = cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); printf("Copy B to device: %s\n", cudaGetErrorString(err)); Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); err = cudaMalloc(&d_C.elements, size); printf("CUDA Malloc C: %s\n", cudaGetErrorString(err)); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((B.width + dimBlock.x - 1) / dimBlock.x, (A.height + dimBlock.y - 1) / dimBlock.y); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaEventRecord(stop); err = cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Run kernel: %s\n", cudaGetErrorString(err)); printf("Timing: %.2f ms\n", milliseconds); // Read C from device memory err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); printf("Copy C off of device: %s\n",cudaGetErrorString(err)); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); // cudaFree(d_C.elements); } __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0.0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row >= A.height -1 || col >= B.width - 1) return ; for(int e = 0; e < A.width; ++e) { Cvalue = A.elements[row * A.width + e] * B.elements[e * B.width + col]; } C.elements[row * C.width + col] = Cvalue; } int main(int argc, char **argv) { Matrix A, B, C; int a1, a2, b1, b2; // Read some values from the commandline a1 = atoi(argv[1]); /* Height of A */ a2 = atoi(argv[2]); /* Width of A */ b1 = a2; /* Height of B */ b2 = atoi(argv[3]); /* Width of B */ A.height = a1; A.width = a2; A.elements = (float*)malloc(A.width * A.height * sizeof(float)); B.height = b1; B.width = b2; B.elements = (float*)malloc(B.width * B.height * sizeof(float)); C.height = A.height; C.width = B.width; C.elements = (float*)malloc(C.width * C.height * sizeof(float)); for(int i = 0; i < A.height; i++) for(int j = 0; j < A.width; j++) A.elements[i*A.width + j] = (float)(rand() % 3); for(int i = 0; i < B.height; i++) for(int j = 0; j < B.width; j++) B.elements[i*B.width + j] = (float)(rand() % 2); MatMul(A, B, C); // Print up to a 10x10 portion of the three matrices for(int i = 0; i < min(10, A.height); i++){ for(int j = 0; j < min(10, A.width); j++) printf("%f ", A.elements[i*A.width + j]); printf("\n"); } printf("\n"); for(int i = 0; i < min(10, B.height); i++){ for(int j = 0; j < min(10, B.width); j++) printf("%f ", B.elements[i*B.width + j]); printf("\n"); } printf("\n"); for(int i = 0; i < min(10, C.height); i++){ for(int j = 0; j < min(10, C.width); j++) printf("%f ", C.elements[i*C.width + j]); printf("\n"); } printf("\n"); return 0; }
4886f7151f95471305ee11357c344e3eedc4524c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // collisions.cu // CUDADSMC // // Created by Christopher Watkins on 19/08/2014. // Copyright (c) 2014 WIJ. All rights reserved. // #include <stdio.h> #include <hiprand/hiprand_kernel.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include "declareInitialSystemParameters.cuh" #include "deviceSystemParameters.cuh" #include "collisions.cuh" #include "cudaHelpers.cuh" #pragma mark - Indexing double indexAtoms( double3 *d_pos, int *d_cellID, int *d_atomID, int3 cellsPerDimension, int numberOfAtoms ) { double *d_radius; cudaCalloc( (void **)&d_radius, numberOfAtoms, sizeof(double) ); h_calculateRadius(d_pos, d_radius, d_atomID, numberOfAtoms ); double medianR = findMedian( d_radius, numberOfAtoms ); printf("The median radius is %f\n", medianR ); h_findAtomIndex(d_pos, d_cellID, d_atomID, medianR, numberOfAtoms, cellsPerDimension ); hipFree( d_radius ); return medianR; } void h_calculateRadius( double3 *d_pos, double *d_radius, int *d_atomID, int numberOfAtoms ) { int blockSize; int gridSize; #ifdef CUDA65 int minGridSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (const void *) calculateRadius, 0, numberOfAtoms ); gridSize = (numberOfAtoms + blockSize - 1) / blockSize; #else int device; hipGetDevice ( &device ); int numSMs; hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, device); gridSize = 256*numSMs; blockSize = NUM_THREADS; #endif hipLaunchKernelGGL(( calculateRadius), dim3(gridSize),dim3(blockSize), 0, 0, d_pos, d_radius, d_atomID, numberOfAtoms ); return; } __global__ void calculateRadius( double3 *pos, double *radius, int *atomID, int numberOfAtoms ) { for (int atom = blockIdx.x * blockDim.x + threadIdx.x; atom < numberOfAtoms; atom += blockDim.x * gridDim.x) { radius[atom] = length( pos[atomID[atom]] ); } return; } double findMedian( double *v, int N ) { thrust::device_ptr<double> ptr = thrust::device_pointer_cast( v ); thrust::sort( ptr, ptr + N ); double *d_median; cudaCalloc( (void **)&d_median, 1, sizeof(double) ); hipLaunchKernelGGL(( getMedian), dim3(1),dim3(1), 0, 0, v, d_median, N ); double h_median; hipMemcpy( (void *)&h_median, d_median, 1*sizeof(double), hipMemcpyDeviceToHost ); hipFree( d_median ); return h_median; } __global__ void getMedian( double *v, double *median, int N) { if (N % 2 == 0) { median[0] = 0.5*(v[N/2-1] + v[N/2]); } else { median[0] = v[(N-1)/2]; } return; } void h_findAtomIndex( double3 *d_pos, int *d_cellID, int *d_atomID, double medianR, int numberOfAtoms, int3 cellsPerDimension ) { int blockSize; int gridSize; #ifdef CUDA65 int minGridSize; hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, (const void *) calculateRadius, 0, numberOfAtoms ); gridSize = (numberOfAtoms + blockSize - 1) / blockSize; #else int device; hipGetDevice ( &device ); int numSMs; hipDeviceGetAttribute( &numSMs, hipDeviceAttributeMultiprocessorCount, device); gridSize = 256*numSMs; blockSize = NUM_THREADS; #endif hipLaunchKernelGGL(( findAtomIndex), dim3(gridSize),dim3(blockSize), 0, 0, d_pos, d_cellID, d_atomID, medianR, numberOfAtoms, cellsPerDimension ); return; } __global__ void findAtomIndex( double3 *pos, int *cellID, int *atomID, double medianR, int numberOfAtoms, int3 cellsPerDimension ) { for (int atom = blockIdx.x * blockDim.x + threadIdx.x; atom < numberOfAtoms; atom += blockDim.x * gridDim.x) { double3 l_pos = pos[atomID[atom]]; double3 gridMin = getGridMin( medianR ); double3 cellLength = getCellLength( medianR, cellsPerDimension ); int3 cellIndices = getCellIndices(l_pos, gridMin, cellLength ); cellID[atomID[atom]] = getCellID( cellIndices, cellsPerDimension ); } return; } __device__ double3 getCellLength( double medianR, int3 cellsPerDimension ) { double3 cellLength = 2.0 * d_maxGridWidth / cellsPerDimension; return cellLength; } __device__ int3 getCellIndices( double3 pos, double3 gridMin, double3 cellLength ) { int3 index = { 0, 0, 0 }; index.x = __double2int_rd ( (pos.x - gridMin.x) / cellLength.x ); index.y = __double2int_rd ( (pos.y - gridMin.y) / cellLength.y ); index.z = __double2int_rd ( (pos.z - gridMin.z) / cellLength.z ); // printf("pos.x = %f, gridMin.x = %f, cellLength.x = %f, index.x = %i\n", pos.x, gridMin.x, cellLength.x, index.x ); return index; } __device__ int getCellID( int3 index, int3 cellsPerDimension ) { int cellID = 0; if (index.x > -1 && index.x < cellsPerDimension.x && index.y > -1 && index.y < cellsPerDimension.y && index.z > -1 && index.z < cellsPerDimension.z) { cellID = index.z*cellsPerDimension.x*cellsPerDimension.y + index.y*cellsPerDimension.x + index.x; } else { cellID = cellsPerDimension.x * cellsPerDimension.y * cellsPerDimension.z; } return cellID; } __device__ double3 getGridMin( double medianR ) { double3 gridMin = -1.0 * d_maxGridWidth; return gridMin; } __global__ void cellStartandEndKernel( int *cellID, int *atomID, int2 *cellStartEnd, int initialNumberOfAtoms, int numberOfAtoms ) { for (int atom = blockIdx.x * blockDim.x + threadIdx.x; atom < numberOfAtoms; atom += blockDim.x * gridDim.x) { // Find the beginning of the cell if (atom == 0) { cellStartEnd[cellID[atom]].x = 0; } else if (cellID[atom] != cellID[atom-1]) { cellStartEnd[cellID[atom]].x = atom; } // Find the end of the cell if (atom == numberOfAtoms - 1) { cellStartEnd[cellID[atom]].y = numberOfAtoms-1; } else if (cellID[atom] != cellID[atom+1]) { cellStartEnd[cellID[atom]].y = atom; } } return; } __global__ void findNumberOfAtomsInCell( int2 *cellStartEnd, int *numberOfAtomsInCell, int numberOfCells ) { for ( int cell = blockIdx.x * blockDim.x + threadIdx.x; cell < numberOfCells+1; cell += blockDim.x * gridDim.x) { if (cellStartEnd[cell].x == -1) { numberOfAtomsInCell[cell] = 0; } else { numberOfAtomsInCell[cell] = cellStartEnd[cell].y - cellStartEnd[cell].x + 1; } } return; } #pragma mark - Sorting void sortArrays(int *d_cellID, int *d_atomID , int numberOfAtoms ) { thrust::device_ptr<int> th_cellID = thrust::device_pointer_cast( d_cellID ); thrust::device_ptr<int> th_atomID = thrust::device_pointer_cast( d_atomID ); thrust::sort_by_key(th_cellID, th_cellID + numberOfAtoms, th_atomID ); return; } #pragma mark - Collisions __global__ void collide( double3 *vel, double *sigvrmax, int *prefixScanNumberOfAtomsInCell, int *collisionCount, double medianR, double alpha, int3 cellsPerDimension, int numberOfCells, hiprandState_t *rngState, int *atomID ) { for ( int cell = blockIdx.x * blockDim.x + threadIdx.x; cell < numberOfCells; cell += blockDim.x * gridDim.x) { int numberOfAtomsInCell = prefixScanNumberOfAtomsInCell[cell+1] - prefixScanNumberOfAtomsInCell[cell]; if (numberOfAtomsInCell > 1) { double3 cellLength = getCellLength( medianR, cellsPerDimension ); double crossSection = 8.*d_pi*d_a*d_a; d_dt = 1.0e-7; double tau = 128.*sqrt( d_mRb*pow( d_pi, 3 )*pow( d_kB*d_Temp,5 ) ) / ( crossSection*d_N*pow( d_gs*d_muB*2.16, 3 ) ); // d_loopsPerCollision = ceil( 0.1*tau / d_dt ); d_loopsPerCollision = 10; double cellVolume = cellLength.x * cellLength.y * cellLength.z; double Mc = 0.5 * (numberOfAtomsInCell - 1) * numberOfAtomsInCell; double lambda = ceil( Mc * alpha * d_loopsPerCollision * d_dt * sigvrmax[cell] / cellVolume ) / Mc; int Ncol = Mc*lambda; if (numberOfAtomsInCell ==1) { Ncol = 1; } double3 velcm, newVel, pointOnSphere; double magVrel; double ProbCol; hiprandState_t l_rngState = rngState[cell]; collisionCount[cell] = 0; for ( int l_collision = 0; l_collision < Ncol; l_collision++ ) { int2 collidingAtoms = {0,0}; // collidingAtoms = chooseCollidingAtoms( numberOfAtomsInCell, // prefixScanNumberOfAtomsInCell, // cellsPerDimension, // &l_rngState, // cell ); if (numberOfAtomsInCell == 1) { collidingAtoms.x = prefixScanNumberOfAtomsInCell[cell] + 0; int3 cellIndices = extractCellIndices( cell, cellsPerDimension ); int3 newCellIndices = make_int3( 0, 0, 0 ); int newCell = 0; int newCellPopulation = 0; int numberOfTries = 0; while (newCellPopulation == 0 && numberOfTries < 15) { newCellIndices.x = cellIndices.x + 2 * ( round(hiprand_uniform_double( &l_rngState )) - 0.5 ); newCellIndices.y = cellIndices.y + 2 * ( round(hiprand_uniform_double( &l_rngState )) - 0.5 ); newCellIndices.z = cellIndices.z + 2 * ( round(hiprand_uniform_double( &l_rngState )) - 0.5 ); if (newCellIndices.x < 0) { newCellIndices.x = 0; } if (newCellIndices.y < 0) { newCellIndices.y = 0; } if (newCellIndices.z < 0) { newCellIndices.z = 0; } if (newCellIndices.x > cellsPerDimension.x-1) { newCellIndices.x = cellsPerDimension.x-1; } if (newCellIndices.y > cellsPerDimension.y-1) { newCellIndices.y = cellsPerDimension.y-1; } if (newCellIndices.z > cellsPerDimension.z-1) { newCellIndices.z = cellsPerDimension.z-1; } newCell = getCellID( newCellIndices, cellsPerDimension ); newCellPopulation = prefixScanNumberOfAtomsInCell[newCell+1] - prefixScanNumberOfAtomsInCell[newCell]; numberOfTries++; } // Randomly choose particles in this cell to collide. collidingAtoms.y = prefixScanNumberOfAtomsInCell[newCell] + (int)floor( hiprand_uniform_double ( &l_rngState ) * (newCellPopulation-1) ); } else if (numberOfAtomsInCell == 2) { collidingAtoms.x = atomID[prefixScanNumberOfAtomsInCell[cell] + 0]; collidingAtoms.y = atomID[prefixScanNumberOfAtomsInCell[cell] + 1]; } else { // Randomly choose particles in this cell to collide. while (collidingAtoms.x == collidingAtoms.y) { collidingAtoms = double2Toint2_rd( make_double2( hiprand_uniform_double ( &l_rngState ), hiprand_uniform_double ( &l_rngState ) ) * (numberOfAtomsInCell-1) ); } // collidingAtoms = prefixScanNumberOfAtomsInCell[cell] + collidingAtoms; collidingAtoms.x = atomID[prefixScanNumberOfAtomsInCell[cell] + collidingAtoms.x]; collidingAtoms.y = atomID[prefixScanNumberOfAtomsInCell[cell] + collidingAtoms.y]; } magVrel = calculateRelativeVelocity( vel, collidingAtoms ); // Check if this is the more probable than current most probable. if (magVrel*crossSection > sigvrmax[cell]) { sigvrmax[cell] = magVrel * crossSection; } ProbCol = alpha * d_loopsPerCollision * d_dt / cellVolume * magVrel * crossSection / lambda; // Collide with the collision probability. if ( ProbCol > hiprand_uniform_double ( &l_rngState ) ) { // Find centre of mass velocities. velcm = 0.5*(vel[collidingAtoms.x] + vel[collidingAtoms.y]); // Generate a random velocity on the unit sphere. pointOnSphere = getRandomPointOnSphere( &l_rngState ); newVel = magVrel * pointOnSphere; vel[collidingAtoms.x] = velcm - 0.5 * newVel; vel[collidingAtoms.y] = velcm + 0.5 * newVel; collisionCount[cell]++; } } rngState[cell] = l_rngState; } } return; } __device__ int2 chooseCollidingAtoms( int numberOfAtomsInCell, int *prefixScanNumberOfAtomsInCell, int3 cellsPerDimension, hiprandState_t *rngState, int cell ) { int2 collidingAtoms = { 0, 0 }; if (numberOfAtomsInCell == 0) { collidingAtoms.x = prefixScanNumberOfAtomsInCell[cell] + 0; int3 cellIndices = extractCellIndices( cell, cellsPerDimension ); int3 newCellIndices = make_int3( 0, 0, 0 ); int newCell = 0; int newCellPopulation = 0; int numberOfTries = 0; while (newCellPopulation == 0 && numberOfTries < 15) { newCellIndices.x = cellIndices.x + 2 * ( round(hiprand_uniform_double( rngState )) - 0.5 ); newCellIndices.y = cellIndices.y + 2 * ( round(hiprand_uniform_double( rngState )) - 0.5 ); newCellIndices.z = cellIndices.z + 2 * ( round(hiprand_uniform_double( rngState )) - 0.5 ); if (newCellIndices.x < 0) { newCellIndices.x = 0; } if (newCellIndices.y < 0) { newCellIndices.y = 0; } if (newCellIndices.z < 0) { newCellIndices.z = 0; } if (newCellIndices.x > cellsPerDimension.x-1) { newCellIndices.x = cellsPerDimension.x-1; } if (newCellIndices.y > cellsPerDimension.y-1) { newCellIndices.y = cellsPerDimension.y-1; } if (newCellIndices.z > cellsPerDimension.z-1) { newCellIndices.z = cellsPerDimension.z-1; } newCell = getCellID( newCellIndices, cellsPerDimension ); newCellPopulation = prefixScanNumberOfAtomsInCell[newCell+1] - prefixScanNumberOfAtomsInCell[newCell]; numberOfTries++; } // Randomly choose particles in this cell to collide. collidingAtoms.y = prefixScanNumberOfAtomsInCell[newCell] + (int)floor( hiprand_uniform_double ( rngState ) * (newCellPopulation-1) ); } else if (numberOfAtomsInCell == 2) { collidingAtoms.x = prefixScanNumberOfAtomsInCell[cell] + 0; collidingAtoms.y = prefixScanNumberOfAtomsInCell[cell] + 1; } else { // Randomly choose particles in this cell to collide. while (collidingAtoms.x == collidingAtoms.y) { collidingAtoms = double2Toint2_rd( make_double2( hiprand_uniform_double ( rngState ), hiprand_uniform_double ( rngState ) ) * (numberOfAtomsInCell-1) ); } collidingAtoms = prefixScanNumberOfAtomsInCell[cell] + collidingAtoms; } return collidingAtoms; } __device__ int3 extractCellIndices( int cell, int3 cellsPerDimension ) { int3 cellIndices = make_int3( 0, 0, 0 ); cellIndices.z = cell / (cellsPerDimension.x*cellsPerDimension.y); cellIndices.y = (cell - cellIndices.z*cellsPerDimension.x*cellsPerDimension.y) / cellsPerDimension.x; cellIndices.x = cell - cellIndices.z*cellsPerDimension.x*cellsPerDimension.y - cellIndices.y*cellsPerDimension.x; return cellIndices; } __device__ double calculateRelativeVelocity( double3 *vel, int2 collidingAtoms ) { double3 vRel = vel[collidingAtoms.x] - vel[collidingAtoms.y]; double magVrel = sqrt(vRel.x*vRel.x + vRel.y*vRel.y + vRel.z*vRel.z); return magVrel; } __device__ double3 getRandomPointOnSphere( hiprandState_t *rngState ) { double r1 = hiprand_normal_double ( rngState ); double r2 = hiprand_normal_double ( rngState ); double r3 = hiprand_normal_double ( rngState ); double3 pointOnSphere = make_double3( r1, r2, r3 ) * rsqrt( r1*r1 + r2*r2 + r3*r3 ); return pointOnSphere; }
4886f7151f95471305ee11357c344e3eedc4524c.cu
// // collisions.cu // CUDADSMC // // Created by Christopher Watkins on 19/08/2014. // Copyright (c) 2014 WIJ. All rights reserved. // #include <stdio.h> #include <curand_kernel.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include "declareInitialSystemParameters.cuh" #include "deviceSystemParameters.cuh" #include "collisions.cuh" #include "cudaHelpers.cuh" #pragma mark - Indexing double indexAtoms( double3 *d_pos, int *d_cellID, int *d_atomID, int3 cellsPerDimension, int numberOfAtoms ) { double *d_radius; cudaCalloc( (void **)&d_radius, numberOfAtoms, sizeof(double) ); h_calculateRadius(d_pos, d_radius, d_atomID, numberOfAtoms ); double medianR = findMedian( d_radius, numberOfAtoms ); printf("The median radius is %f\n", medianR ); h_findAtomIndex(d_pos, d_cellID, d_atomID, medianR, numberOfAtoms, cellsPerDimension ); cudaFree( d_radius ); return medianR; } void h_calculateRadius( double3 *d_pos, double *d_radius, int *d_atomID, int numberOfAtoms ) { int blockSize; int gridSize; #ifdef CUDA65 int minGridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (const void *) calculateRadius, 0, numberOfAtoms ); gridSize = (numberOfAtoms + blockSize - 1) / blockSize; #else int device; cudaGetDevice ( &device ); int numSMs; cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, device); gridSize = 256*numSMs; blockSize = NUM_THREADS; #endif calculateRadius<<<gridSize,blockSize>>>(d_pos, d_radius, d_atomID, numberOfAtoms ); return; } __global__ void calculateRadius( double3 *pos, double *radius, int *atomID, int numberOfAtoms ) { for (int atom = blockIdx.x * blockDim.x + threadIdx.x; atom < numberOfAtoms; atom += blockDim.x * gridDim.x) { radius[atom] = length( pos[atomID[atom]] ); } return; } double findMedian( double *v, int N ) { thrust::device_ptr<double> ptr = thrust::device_pointer_cast( v ); thrust::sort( ptr, ptr + N ); double *d_median; cudaCalloc( (void **)&d_median, 1, sizeof(double) ); getMedian<<<1,1>>>( v, d_median, N ); double h_median; cudaMemcpy( (void *)&h_median, d_median, 1*sizeof(double), cudaMemcpyDeviceToHost ); cudaFree( d_median ); return h_median; } __global__ void getMedian( double *v, double *median, int N) { if (N % 2 == 0) { median[0] = 0.5*(v[N/2-1] + v[N/2]); } else { median[0] = v[(N-1)/2]; } return; } void h_findAtomIndex( double3 *d_pos, int *d_cellID, int *d_atomID, double medianR, int numberOfAtoms, int3 cellsPerDimension ) { int blockSize; int gridSize; #ifdef CUDA65 int minGridSize; cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, (const void *) calculateRadius, 0, numberOfAtoms ); gridSize = (numberOfAtoms + blockSize - 1) / blockSize; #else int device; cudaGetDevice ( &device ); int numSMs; cudaDeviceGetAttribute( &numSMs, cudaDevAttrMultiProcessorCount, device); gridSize = 256*numSMs; blockSize = NUM_THREADS; #endif findAtomIndex<<<gridSize,blockSize>>>(d_pos, d_cellID, d_atomID, medianR, numberOfAtoms, cellsPerDimension ); return; } __global__ void findAtomIndex( double3 *pos, int *cellID, int *atomID, double medianR, int numberOfAtoms, int3 cellsPerDimension ) { for (int atom = blockIdx.x * blockDim.x + threadIdx.x; atom < numberOfAtoms; atom += blockDim.x * gridDim.x) { double3 l_pos = pos[atomID[atom]]; double3 gridMin = getGridMin( medianR ); double3 cellLength = getCellLength( medianR, cellsPerDimension ); int3 cellIndices = getCellIndices(l_pos, gridMin, cellLength ); cellID[atomID[atom]] = getCellID( cellIndices, cellsPerDimension ); } return; } __device__ double3 getCellLength( double medianR, int3 cellsPerDimension ) { double3 cellLength = 2.0 * d_maxGridWidth / cellsPerDimension; return cellLength; } __device__ int3 getCellIndices( double3 pos, double3 gridMin, double3 cellLength ) { int3 index = { 0, 0, 0 }; index.x = __double2int_rd ( (pos.x - gridMin.x) / cellLength.x ); index.y = __double2int_rd ( (pos.y - gridMin.y) / cellLength.y ); index.z = __double2int_rd ( (pos.z - gridMin.z) / cellLength.z ); // printf("pos.x = %f, gridMin.x = %f, cellLength.x = %f, index.x = %i\n", pos.x, gridMin.x, cellLength.x, index.x ); return index; } __device__ int getCellID( int3 index, int3 cellsPerDimension ) { int cellID = 0; if (index.x > -1 && index.x < cellsPerDimension.x && index.y > -1 && index.y < cellsPerDimension.y && index.z > -1 && index.z < cellsPerDimension.z) { cellID = index.z*cellsPerDimension.x*cellsPerDimension.y + index.y*cellsPerDimension.x + index.x; } else { cellID = cellsPerDimension.x * cellsPerDimension.y * cellsPerDimension.z; } return cellID; } __device__ double3 getGridMin( double medianR ) { double3 gridMin = -1.0 * d_maxGridWidth; return gridMin; } __global__ void cellStartandEndKernel( int *cellID, int *atomID, int2 *cellStartEnd, int initialNumberOfAtoms, int numberOfAtoms ) { for (int atom = blockIdx.x * blockDim.x + threadIdx.x; atom < numberOfAtoms; atom += blockDim.x * gridDim.x) { // Find the beginning of the cell if (atom == 0) { cellStartEnd[cellID[atom]].x = 0; } else if (cellID[atom] != cellID[atom-1]) { cellStartEnd[cellID[atom]].x = atom; } // Find the end of the cell if (atom == numberOfAtoms - 1) { cellStartEnd[cellID[atom]].y = numberOfAtoms-1; } else if (cellID[atom] != cellID[atom+1]) { cellStartEnd[cellID[atom]].y = atom; } } return; } __global__ void findNumberOfAtomsInCell( int2 *cellStartEnd, int *numberOfAtomsInCell, int numberOfCells ) { for ( int cell = blockIdx.x * blockDim.x + threadIdx.x; cell < numberOfCells+1; cell += blockDim.x * gridDim.x) { if (cellStartEnd[cell].x == -1) { numberOfAtomsInCell[cell] = 0; } else { numberOfAtomsInCell[cell] = cellStartEnd[cell].y - cellStartEnd[cell].x + 1; } } return; } #pragma mark - Sorting void sortArrays(int *d_cellID, int *d_atomID , int numberOfAtoms ) { thrust::device_ptr<int> th_cellID = thrust::device_pointer_cast( d_cellID ); thrust::device_ptr<int> th_atomID = thrust::device_pointer_cast( d_atomID ); thrust::sort_by_key(th_cellID, th_cellID + numberOfAtoms, th_atomID ); return; } #pragma mark - Collisions __global__ void collide( double3 *vel, double *sigvrmax, int *prefixScanNumberOfAtomsInCell, int *collisionCount, double medianR, double alpha, int3 cellsPerDimension, int numberOfCells, curandState_t *rngState, int *atomID ) { for ( int cell = blockIdx.x * blockDim.x + threadIdx.x; cell < numberOfCells; cell += blockDim.x * gridDim.x) { int numberOfAtomsInCell = prefixScanNumberOfAtomsInCell[cell+1] - prefixScanNumberOfAtomsInCell[cell]; if (numberOfAtomsInCell > 1) { double3 cellLength = getCellLength( medianR, cellsPerDimension ); double crossSection = 8.*d_pi*d_a*d_a; d_dt = 1.0e-7; double tau = 128.*sqrt( d_mRb*pow( d_pi, 3 )*pow( d_kB*d_Temp,5 ) ) / ( crossSection*d_N*pow( d_gs*d_muB*2.16, 3 ) ); // d_loopsPerCollision = ceil( 0.1*tau / d_dt ); d_loopsPerCollision = 10; double cellVolume = cellLength.x * cellLength.y * cellLength.z; double Mc = 0.5 * (numberOfAtomsInCell - 1) * numberOfAtomsInCell; double lambda = ceil( Mc * alpha * d_loopsPerCollision * d_dt * sigvrmax[cell] / cellVolume ) / Mc; int Ncol = Mc*lambda; if (numberOfAtomsInCell ==1) { Ncol = 1; } double3 velcm, newVel, pointOnSphere; double magVrel; double ProbCol; curandState_t l_rngState = rngState[cell]; collisionCount[cell] = 0; for ( int l_collision = 0; l_collision < Ncol; l_collision++ ) { int2 collidingAtoms = {0,0}; // collidingAtoms = chooseCollidingAtoms( numberOfAtomsInCell, // prefixScanNumberOfAtomsInCell, // cellsPerDimension, // &l_rngState, // cell ); if (numberOfAtomsInCell == 1) { collidingAtoms.x = prefixScanNumberOfAtomsInCell[cell] + 0; int3 cellIndices = extractCellIndices( cell, cellsPerDimension ); int3 newCellIndices = make_int3( 0, 0, 0 ); int newCell = 0; int newCellPopulation = 0; int numberOfTries = 0; while (newCellPopulation == 0 && numberOfTries < 15) { newCellIndices.x = cellIndices.x + 2 * ( round(curand_uniform_double( &l_rngState )) - 0.5 ); newCellIndices.y = cellIndices.y + 2 * ( round(curand_uniform_double( &l_rngState )) - 0.5 ); newCellIndices.z = cellIndices.z + 2 * ( round(curand_uniform_double( &l_rngState )) - 0.5 ); if (newCellIndices.x < 0) { newCellIndices.x = 0; } if (newCellIndices.y < 0) { newCellIndices.y = 0; } if (newCellIndices.z < 0) { newCellIndices.z = 0; } if (newCellIndices.x > cellsPerDimension.x-1) { newCellIndices.x = cellsPerDimension.x-1; } if (newCellIndices.y > cellsPerDimension.y-1) { newCellIndices.y = cellsPerDimension.y-1; } if (newCellIndices.z > cellsPerDimension.z-1) { newCellIndices.z = cellsPerDimension.z-1; } newCell = getCellID( newCellIndices, cellsPerDimension ); newCellPopulation = prefixScanNumberOfAtomsInCell[newCell+1] - prefixScanNumberOfAtomsInCell[newCell]; numberOfTries++; } // Randomly choose particles in this cell to collide. collidingAtoms.y = prefixScanNumberOfAtomsInCell[newCell] + (int)floor( curand_uniform_double ( &l_rngState ) * (newCellPopulation-1) ); } else if (numberOfAtomsInCell == 2) { collidingAtoms.x = atomID[prefixScanNumberOfAtomsInCell[cell] + 0]; collidingAtoms.y = atomID[prefixScanNumberOfAtomsInCell[cell] + 1]; } else { // Randomly choose particles in this cell to collide. while (collidingAtoms.x == collidingAtoms.y) { collidingAtoms = double2Toint2_rd( make_double2( curand_uniform_double ( &l_rngState ), curand_uniform_double ( &l_rngState ) ) * (numberOfAtomsInCell-1) ); } // collidingAtoms = prefixScanNumberOfAtomsInCell[cell] + collidingAtoms; collidingAtoms.x = atomID[prefixScanNumberOfAtomsInCell[cell] + collidingAtoms.x]; collidingAtoms.y = atomID[prefixScanNumberOfAtomsInCell[cell] + collidingAtoms.y]; } magVrel = calculateRelativeVelocity( vel, collidingAtoms ); // Check if this is the more probable than current most probable. if (magVrel*crossSection > sigvrmax[cell]) { sigvrmax[cell] = magVrel * crossSection; } ProbCol = alpha * d_loopsPerCollision * d_dt / cellVolume * magVrel * crossSection / lambda; // Collide with the collision probability. if ( ProbCol > curand_uniform_double ( &l_rngState ) ) { // Find centre of mass velocities. velcm = 0.5*(vel[collidingAtoms.x] + vel[collidingAtoms.y]); // Generate a random velocity on the unit sphere. pointOnSphere = getRandomPointOnSphere( &l_rngState ); newVel = magVrel * pointOnSphere; vel[collidingAtoms.x] = velcm - 0.5 * newVel; vel[collidingAtoms.y] = velcm + 0.5 * newVel; collisionCount[cell]++; } } rngState[cell] = l_rngState; } } return; } __device__ int2 chooseCollidingAtoms( int numberOfAtomsInCell, int *prefixScanNumberOfAtomsInCell, int3 cellsPerDimension, curandState_t *rngState, int cell ) { int2 collidingAtoms = { 0, 0 }; if (numberOfAtomsInCell == 0) { collidingAtoms.x = prefixScanNumberOfAtomsInCell[cell] + 0; int3 cellIndices = extractCellIndices( cell, cellsPerDimension ); int3 newCellIndices = make_int3( 0, 0, 0 ); int newCell = 0; int newCellPopulation = 0; int numberOfTries = 0; while (newCellPopulation == 0 && numberOfTries < 15) { newCellIndices.x = cellIndices.x + 2 * ( round(curand_uniform_double( rngState )) - 0.5 ); newCellIndices.y = cellIndices.y + 2 * ( round(curand_uniform_double( rngState )) - 0.5 ); newCellIndices.z = cellIndices.z + 2 * ( round(curand_uniform_double( rngState )) - 0.5 ); if (newCellIndices.x < 0) { newCellIndices.x = 0; } if (newCellIndices.y < 0) { newCellIndices.y = 0; } if (newCellIndices.z < 0) { newCellIndices.z = 0; } if (newCellIndices.x > cellsPerDimension.x-1) { newCellIndices.x = cellsPerDimension.x-1; } if (newCellIndices.y > cellsPerDimension.y-1) { newCellIndices.y = cellsPerDimension.y-1; } if (newCellIndices.z > cellsPerDimension.z-1) { newCellIndices.z = cellsPerDimension.z-1; } newCell = getCellID( newCellIndices, cellsPerDimension ); newCellPopulation = prefixScanNumberOfAtomsInCell[newCell+1] - prefixScanNumberOfAtomsInCell[newCell]; numberOfTries++; } // Randomly choose particles in this cell to collide. collidingAtoms.y = prefixScanNumberOfAtomsInCell[newCell] + (int)floor( curand_uniform_double ( rngState ) * (newCellPopulation-1) ); } else if (numberOfAtomsInCell == 2) { collidingAtoms.x = prefixScanNumberOfAtomsInCell[cell] + 0; collidingAtoms.y = prefixScanNumberOfAtomsInCell[cell] + 1; } else { // Randomly choose particles in this cell to collide. while (collidingAtoms.x == collidingAtoms.y) { collidingAtoms = double2Toint2_rd( make_double2( curand_uniform_double ( rngState ), curand_uniform_double ( rngState ) ) * (numberOfAtomsInCell-1) ); } collidingAtoms = prefixScanNumberOfAtomsInCell[cell] + collidingAtoms; } return collidingAtoms; } __device__ int3 extractCellIndices( int cell, int3 cellsPerDimension ) { int3 cellIndices = make_int3( 0, 0, 0 ); cellIndices.z = cell / (cellsPerDimension.x*cellsPerDimension.y); cellIndices.y = (cell - cellIndices.z*cellsPerDimension.x*cellsPerDimension.y) / cellsPerDimension.x; cellIndices.x = cell - cellIndices.z*cellsPerDimension.x*cellsPerDimension.y - cellIndices.y*cellsPerDimension.x; return cellIndices; } __device__ double calculateRelativeVelocity( double3 *vel, int2 collidingAtoms ) { double3 vRel = vel[collidingAtoms.x] - vel[collidingAtoms.y]; double magVrel = sqrt(vRel.x*vRel.x + vRel.y*vRel.y + vRel.z*vRel.z); return magVrel; } __device__ double3 getRandomPointOnSphere( curandState_t *rngState ) { double r1 = curand_normal_double ( rngState ); double r2 = curand_normal_double ( rngState ); double r3 = curand_normal_double ( rngState ); double3 pointOnSphere = make_double3( r1, r2, r3 ) * rsqrt( r1*r1 + r2*r2 + r3*r3 ); return pointOnSphere; }
7d78a24ff95e02182de2c6d0a68a88026cc784bc.hip
// !!! This is a file automatically generated by hipify!!! /* * histo_kernel.cu */ #include <hip/hip_runtime_api.h> #include <iostream> #include <stdio.h> #include "util.h" using namespace std; // declarations // Number of Threads per Block int NumberOfThreadsPerBlock = 1024; /* * The histogram kernel computes the index of the nearest cluster of <clusters> for each feature of <features> on the * GPU. The value of <histogram> at the computed index is incremented. <bins> is the number of bins of the histogram, * <count> is the number of features and <size> the dimension of each feature / cluster. */ __global__ void histo_kernel(float *features, float *clusters, unsigned int *histo, const unsigned int bins, const long count, const unsigned int size); /* * Computes the euclidean distance between <point1> and <point2> on the GPU and returns it as a float. Both points * must have <size> many dimensions. */ __device__ float euclidean_distance_2_gpu(float *point1, float *point2, const unsigned int size); /** * Computes the index of nearest cluster of <clusters> to <point> on the GPU. Both the clusters and the point must have * <size> many dimensions. <clusters> is an array of size <k>. To obtain the nearest cluster, the euclidean distance is * measured. */ __device__ int nearest_cluster_gpu(float *point, const unsigned int size, float *clusters, const unsigned int k); // function definitions void histo_gpu(float **features, float **clusters, float *histo, const unsigned int k, const long count, const unsigned int size) { int numberOfBlocksInGrid = count / NumberOfThreadsPerBlock; dim3 dimGrid(numberOfBlocksInGrid, 1); dim3 dimBlock(NumberOfThreadsPerBlock, 1); // Use an integer histo to sum the total counts for each bin unsigned int *intHisto = (unsigned int*) new unsigned int[k]; // Arrays on the GPU unsigned int *deviceHisto = 0; float *deviceFeatures = 0; float *deviceClusters = 0; // Calculate memory needed for data on the GPU size_t featureMem = sizeof(float) * count * size; size_t clusterMem = sizeof(float) * k * size; size_t histoMem = sizeof(unsigned int) * k; size_t sharedMem = sizeof(float) * k; // Convert the two dimensional clusters and features array to a one dimensional array. float *featureArray = (float *) new float[count * size]; float *clusterArray = (float *) new float[k * size]; for (int i = 0; i < count; i++) { for (int j = 0; j < size; j++) featureArray[i * size + j] = features[i][j]; } for (int i = 0; i < k; i++) { for (int j = 0; j < size; j++) clusterArray[i * size + j] = clusters[i][j]; } // Assure at least one block in the grid if (numberOfBlocksInGrid < 1) { numberOfBlocksInGrid = 1; } // Assume we have a cuda device... hipSetDevice(0); // Allocate memory on the GPU and copy data. Important: Set deviceHisto to all 0! hipMalloc((void**) &deviceFeatures, featureMem); hipMalloc((void**) &deviceClusters, clusterMem); hipMalloc((void**) &deviceHisto, histoMem); hipMemcpy(deviceFeatures, featureArray, featureMem, hipMemcpyHostToDevice); hipMemcpy(deviceClusters, clusterArray, clusterMem, hipMemcpyHostToDevice); hipMemset(deviceHisto, 0, histoMem); // TODO: DEBUG //cout << "Starting histogram kernel" << endl; //cout << " Blocks: " << numberOfBlocksInGrid << endl; //cout << " Threads: " << NumberOfThreadsPerBlock << endl; // kernel invocation hipLaunchKernelGGL(( histo_kernel), dim3(numberOfBlocksInGrid), dim3(dimBlock), sharedMem, 0, deviceFeatures, deviceClusters, deviceHisto, k, count, size); checkCUDAError("createHistogram - Kernel"); hipDeviceSynchronize(); // Copy results from device to host hipMemcpy(intHisto, deviceHisto, histoMem, hipMemcpyDeviceToHost); // Compute frequencies as floats from the total counts in intHisto for (int i = 0; i < k; i++) { histo[i] = ((float) intHisto[i]) / count; } // Free everything hipFree((void*) deviceFeatures); hipFree((void*) deviceClusters); hipFree((void*) deviceHisto); free(featureArray); free(clusterArray); free(intHisto); deviceFeatures = 0; deviceClusters = 0; deviceHisto = 0; featureArray = 0; clusterArray = 0; intHisto = 0; } __global__ void histo_kernel(float *features, float *clusters, unsigned int *histo, unsigned int bins, long count, unsigned int size) { // Allocate shared memory per block for private histogram values extern __shared__ int sharedMemory[]; unsigned int *histo_private = (unsigned int *) sharedMemory; if (threadIdx.x < bins) { histo_private[threadIdx.x] = 0; } __syncthreads(); // Compute global index and offset (size of block) unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; unsigned int stride = blockDim.x * gridDim.x; // Compute for the nearsest cluster and therefore the nearest bin and increment the // corresponding bin in the private histogram while (i < count) { float *feature = &features[i * size]; int bin = nearest_cluster_gpu(feature, size, clusters, bins); atomicAdd(&(histo_private[bin]), 1); i += stride; } __syncthreads(); // The first bin threads are the "master" threads and accumulate the private values // of the shared histograms into the global if (threadIdx.x < bins) { atomicAdd(&(histo[threadIdx.x]), histo_private[threadIdx.x]); } } __device__ float euclidean_distance_2_gpu(float *point1, float *point2, const unsigned int size) { float dist = 0.f; for (int i = 0; i < size; i++) { dist += (point1[i] - point2[i]) * (point1[i] - point2[i]); } return dist; } __device__ int nearest_cluster_gpu(float *point, const unsigned int size, float *clusters, const unsigned int k) { int index = 0; float minDist = euclidean_distance_2_gpu(point, clusters, size); for (int i = 1; i < k; i++) { float dist = euclidean_distance_2_gpu(point, &clusters[i * size], size); if (dist < minDist) { // square root? minDist = dist; index = i; } } return index; }
7d78a24ff95e02182de2c6d0a68a88026cc784bc.cu
/* * histo_kernel.cu */ #include <cuda_runtime_api.h> #include <iostream> #include <stdio.h> #include "util.h" using namespace std; // declarations // Number of Threads per Block int NumberOfThreadsPerBlock = 1024; /* * The histogram kernel computes the index of the nearest cluster of <clusters> for each feature of <features> on the * GPU. The value of <histogram> at the computed index is incremented. <bins> is the number of bins of the histogram, * <count> is the number of features and <size> the dimension of each feature / cluster. */ __global__ void histo_kernel(float *features, float *clusters, unsigned int *histo, const unsigned int bins, const long count, const unsigned int size); /* * Computes the euclidean distance between <point1> and <point2> on the GPU and returns it as a float. Both points * must have <size> many dimensions. */ __device__ float euclidean_distance_2_gpu(float *point1, float *point2, const unsigned int size); /** * Computes the index of nearest cluster of <clusters> to <point> on the GPU. Both the clusters and the point must have * <size> many dimensions. <clusters> is an array of size <k>. To obtain the nearest cluster, the euclidean distance is * measured. */ __device__ int nearest_cluster_gpu(float *point, const unsigned int size, float *clusters, const unsigned int k); // function definitions void histo_gpu(float **features, float **clusters, float *histo, const unsigned int k, const long count, const unsigned int size) { int numberOfBlocksInGrid = count / NumberOfThreadsPerBlock; dim3 dimGrid(numberOfBlocksInGrid, 1); dim3 dimBlock(NumberOfThreadsPerBlock, 1); // Use an integer histo to sum the total counts for each bin unsigned int *intHisto = (unsigned int*) new unsigned int[k]; // Arrays on the GPU unsigned int *deviceHisto = 0; float *deviceFeatures = 0; float *deviceClusters = 0; // Calculate memory needed for data on the GPU size_t featureMem = sizeof(float) * count * size; size_t clusterMem = sizeof(float) * k * size; size_t histoMem = sizeof(unsigned int) * k; size_t sharedMem = sizeof(float) * k; // Convert the two dimensional clusters and features array to a one dimensional array. float *featureArray = (float *) new float[count * size]; float *clusterArray = (float *) new float[k * size]; for (int i = 0; i < count; i++) { for (int j = 0; j < size; j++) featureArray[i * size + j] = features[i][j]; } for (int i = 0; i < k; i++) { for (int j = 0; j < size; j++) clusterArray[i * size + j] = clusters[i][j]; } // Assure at least one block in the grid if (numberOfBlocksInGrid < 1) { numberOfBlocksInGrid = 1; } // Assume we have a cuda device... cudaSetDevice(0); // Allocate memory on the GPU and copy data. Important: Set deviceHisto to all 0! cudaMalloc((void**) &deviceFeatures, featureMem); cudaMalloc((void**) &deviceClusters, clusterMem); cudaMalloc((void**) &deviceHisto, histoMem); cudaMemcpy(deviceFeatures, featureArray, featureMem, cudaMemcpyHostToDevice); cudaMemcpy(deviceClusters, clusterArray, clusterMem, cudaMemcpyHostToDevice); cudaMemset(deviceHisto, 0, histoMem); // TODO: DEBUG //cout << "Starting histogram kernel" << endl; //cout << " Blocks: " << numberOfBlocksInGrid << endl; //cout << " Threads: " << NumberOfThreadsPerBlock << endl; // kernel invocation histo_kernel<<<numberOfBlocksInGrid, dimBlock, sharedMem>>>(deviceFeatures, deviceClusters, deviceHisto, k, count, size); checkCUDAError("createHistogram - Kernel"); cudaDeviceSynchronize(); // Copy results from device to host cudaMemcpy(intHisto, deviceHisto, histoMem, cudaMemcpyDeviceToHost); // Compute frequencies as floats from the total counts in intHisto for (int i = 0; i < k; i++) { histo[i] = ((float) intHisto[i]) / count; } // Free everything cudaFree((void*) deviceFeatures); cudaFree((void*) deviceClusters); cudaFree((void*) deviceHisto); free(featureArray); free(clusterArray); free(intHisto); deviceFeatures = 0; deviceClusters = 0; deviceHisto = 0; featureArray = 0; clusterArray = 0; intHisto = 0; } __global__ void histo_kernel(float *features, float *clusters, unsigned int *histo, unsigned int bins, long count, unsigned int size) { // Allocate shared memory per block for private histogram values extern __shared__ int sharedMemory[]; unsigned int *histo_private = (unsigned int *) sharedMemory; if (threadIdx.x < bins) { histo_private[threadIdx.x] = 0; } __syncthreads(); // Compute global index and offset (size of block) unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; unsigned int stride = blockDim.x * gridDim.x; // Compute for the nearsest cluster and therefore the nearest bin and increment the // corresponding bin in the private histogram while (i < count) { float *feature = &features[i * size]; int bin = nearest_cluster_gpu(feature, size, clusters, bins); atomicAdd(&(histo_private[bin]), 1); i += stride; } __syncthreads(); // The first bin threads are the "master" threads and accumulate the private values // of the shared histograms into the global if (threadIdx.x < bins) { atomicAdd(&(histo[threadIdx.x]), histo_private[threadIdx.x]); } } __device__ float euclidean_distance_2_gpu(float *point1, float *point2, const unsigned int size) { float dist = 0.f; for (int i = 0; i < size; i++) { dist += (point1[i] - point2[i]) * (point1[i] - point2[i]); } return dist; } __device__ int nearest_cluster_gpu(float *point, const unsigned int size, float *clusters, const unsigned int k) { int index = 0; float minDist = euclidean_distance_2_gpu(point, clusters, size); for (int i = 1; i < k; i++) { float dist = euclidean_distance_2_gpu(point, &clusters[i * size], size); if (dist < minDist) { // square root? minDist = dist; index = i; } } return index; }
760e3004c86a50251b34bc1a803361878498dd88.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #define ACC_J 19.013 #define ACC_K 25.253 #define ACC_L 6503.0 __constant__ int d_excitations_number,d_ionizations_number, d_datapoints, d_block_mult; __constant__ double d_T_r; void gauss_integration_setup32(double *weights, double *x) { x[0]=1.3680690752591596E-03; x[1]=7.1942442273659202E-03; x[2]=1.7618872206246805E-02; x[3]=3.2546962031130167E-02; x[4]=5.1839422116973843E-02; x[5]=7.5316193133715015E-02; x[6]=1.0275810201602886E-01; x[7]=1.3390894062985509E-01; x[8]=1.6847786653489233E-01; x[9]=2.0614212137961868E-01; x[10]=2.4655004553388526E-01; x[11]=2.8932436193468253E-01; x[12]=3.3406569885893617E-01; x[13]=3.8035631887393162E-01; x[14]=4.2776401920860185E-01; x[15]=4.7584616715613093E-01; x[16]=5.2415383284386907E-01; x[17]=5.7223598079139815E-01; x[18]=6.1964368112606838E-01; x[19]=6.6593430114106378E-01; x[20]=7.1067563806531764E-01; x[21]=7.5344995446611462E-01; x[22]=7.9385787862038115E-01; x[23]=8.3152213346510750E-01; x[24]=8.6609105937014474E-01; x[25]=8.9724189798397114E-01; x[26]=9.2468380686628515E-01; x[27]=9.4816057788302599E-01; x[28]=9.6745303796886994E-01; x[29]=9.8238112779375319E-01; x[30]=9.9280575577263397E-01; x[31]=9.9863193092474067E-01; weights[0]=3.5093050047349198E-03; weights[1]=8.1371973654528751E-03; weights[2]=1.2696032654631021E-02; weights[3]=1.7136931456510726E-02; weights[4]=2.1417949011113720E-02; weights[5]=2.5499029631187890E-02; weights[6]=2.9342046739268091E-02; weights[7]=3.2911111388180682E-02; weights[8]=3.6172897054423871E-02; weights[9]=3.9096947893535162E-02; weights[10]=4.1655962113473763E-02; weights[11]=4.3826046502202044E-02; weights[12]=4.5586939347882056E-02; weights[13]=4.6922199540401971E-02; weights[14]=4.7819360039637472E-02; weights[15]=4.8270044257364274E-02; weights[16]=4.8270044257363830E-02; weights[17]=4.7819360039637784E-02; weights[18]=4.6922199540401846E-02; weights[19]=4.5586939347881918E-02; weights[20]=4.3826046502201850E-02; weights[21]=4.1655962113473798E-02; weights[22]=3.9096947893534850E-02; weights[23]=3.6172897054424745E-02; weights[24]=3.2911111388180932E-02; weights[25]=2.9342046739267064E-02; weights[26]=2.5499029631188164E-02; weights[27]=2.1417949011113362E-02; weights[28]=1.7136931456510799E-02; weights[29]=1.2696032654631212E-02; weights[30]=8.1371973654529653E-03; weights[31]=3.5093050047351631E-03; } __device__ double d_j_int(double E0,double E_j,double *B_vector) { double Eq=E_j/E0; double integrand=-log(Eq)*B_vector[0]+B_vector[1]+Eq*(B_vector[2]+Eq*B_vector[3]); return integrand; } __device__ double d_k_int(double E0,double E1,double E_i,double *C_vector) { double E_i_pow=E_i*E_i, E0_pow=E0*E0, E1_pow=E1*E1, E1_prime=E0-E_i-E1, E1_prime_pow=E1_prime*E1_prime; double a=0.5*(sqrt(E0_pow+4.0*E_i_pow)-E0); double b=a+E_i; double integrand=(1.0/((E1+a)*(E1+b))+1.0/((E1_prime+a)*(E1_prime+b)))*C_vector[0]; integrand+=2.0*C_vector[1]/E0; integrand+=2.0*C_vector[2]*(E0-E_i)/E0_pow; E0_pow*=E0; integrand+=3.0*C_vector[3]*(E1_pow+E1_prime_pow)/E0_pow; E0_pow*=E0; E1_pow*=E1; E1_prime_pow*=E1_prime; integrand+=4.0*C_vector[4]*(E1_pow+E1_prime_pow)/E0_pow; integrand*=0.5/E_i; return integrand; } __device__ double d_l_int(double EGamma,double T_r,double *D_vector) { double exp_EG=exp(EGamma/T_r)-1.0; double integrand=(D_vector[0]+D_vector[1]/EGamma)/exp_EG; return integrand; } //Carry out an integral for the collisional excitation coefficient __global__ void d_j_calc(double *d_params, double *E_j, double *B_vector, double *d_j_up, double *w, double *x) { //Calculate the integrand on each thread int node_num, integral_num; integral_num=threadIdx.x/d_datapoints; node_num=threadIdx.x % d_datapoints; integral_num+=blockIdx.x*d_block_mult; extern __shared__ double temp[]; //Integrand is stored in shared memory double *d_j_up_temp=&temp[0]; double lim=(E_j[integral_num]+2.0*fabs(d_params[1])+ACC_J*d_params[0]); double E0=x[node_num]*lim+E_j[integral_num]; double integrand=d_j_int(E0,E_j[integral_num],B_vector+integral_num*4)*w[node_num]; double fermi=1.0/(1.0+exp((E0-d_params[1])/d_params[0])); double fermi_m=1.0/(1.0+exp((E0-E_j[integral_num]-d_params[1])/d_params[0])); d_j_up_temp[threadIdx.x]=integrand*fermi*(1.0-fermi_m); //Perform a standard reduction to integrate for (int d = d_datapoints>>1; d > 0; d >>= 1) { __syncthreads(); if (node_num<d) { d_j_up_temp[threadIdx.x] += d_j_up_temp[threadIdx.x+d]; } } __syncthreads(); if (node_num==0) { d_j_up[integral_num]=d_j_up_temp[threadIdx.x]*lim; } } //Carry out a double integral for the collisional ionization coefficient //Involves now two reductions and the intermediate results stored in main GPU memory __global__ void d_k_calc(double *d_params, double *E_i, double *C_vector,double *d_k_up, double *w, double *x) { //Calculate the integrand on each thread int node_num, integral_num; integral_num=threadIdx.x/d_datapoints; node_num=threadIdx.x % d_datapoints; integral_num+=blockIdx.x*d_block_mult; extern __shared__ double temp[]; double *d_k_up_temp=&temp[0]; double lim=(fabs(d_params[1])+50.0+ACC_K*d_params[0]); double E0prime=x[node_num]*lim; double E0=E0prime+E_i[integral_num]; double fermiE0=1.0/(1.0+exp((E0-d_params[1])/d_params[0])); double E1, integrand, fermiE1, fermiE0prime, int_w_w=0; for (int idx=0;idx<d_datapoints;idx++) { E1=x[idx]*E0prime; integrand=d_k_int(E0,E1,E_i[integral_num],C_vector+integral_num*5); fermiE1=1.0/(1.0+exp((E1-d_params[1])/d_params[0])); fermiE0prime=1.0/(1.0+exp((E0prime-E1-d_params[1])/d_params[0])); int_w_w+=integrand*w[node_num]*w[idx]*E0prime*fermiE0*(1.0-fermiE1)*(1.0-fermiE0prime); } d_k_up_temp[threadIdx.x]=int_w_w; //Perform a standard reduction to integrate for (int d = d_datapoints>>1; d > 0; d >>= 1) { __syncthreads(); if (node_num<d) { d_k_up_temp[threadIdx.x] += d_k_up_temp[threadIdx.x+d]; } } __syncthreads(); if (node_num==0) { d_k_up[integral_num]=d_k_up_temp[threadIdx.x]*lim; } } //Carry out an integral for the photoionization coefficient and energy change due to photoionization //Single integral, similar to d_j_calc() __global__ void d_l_calc(double *d_params, double *E_i, double *D_vector, double *d_l, double *d_le, double *w, double *x) { int node_num, integral_num; integral_num=threadIdx.x/d_datapoints; node_num=threadIdx.x % d_datapoints; integral_num+=blockIdx.x*d_block_mult; extern __shared__ double temp[]; double *d_l_temp=&temp[0]; double *d_le_temp=&temp[blockDim.x]; double lim=ACC_L; if(d_params[1]>0.0){lim+=d_params[1];} double EGammaPrime=x[node_num]*lim; double EGamma=EGammaPrime+E_i[integral_num]; double fermi_m=1.0-1.0/(1.0+exp((EGammaPrime-d_params[1])/d_params[0])); double integrand=d_l_int(EGamma,d_T_r,D_vector+integral_num*2)*w[node_num]*fermi_m; d_l_temp[threadIdx.x]=integrand; d_le_temp[threadIdx.x]=integrand*EGammaPrime; for (int d = d_datapoints>>1; d > 0; d >>= 1) { __syncthreads(); if (node_num<d) { d_l_temp[threadIdx.x] += d_l_temp[threadIdx.x+d]; d_le_temp[threadIdx.x] += d_le_temp[threadIdx.x+d]; } } __syncthreads(); if (node_num==0) { d_l[integral_num]=d_l_temp[threadIdx.x]*lim; d_le[integral_num]=d_le_temp[threadIdx.x]*lim; } } void d_setup(double **d_params, double **d_B_vector, double **d_C_vector, double **d_D_vector, double **d_E_j, double **d_E_i, double **d_j, double **d_k, double **d_l, double **d_x, double **d_w, double *B_vector, double *C_vector, double *D_vector, double *E_j, double *E_i, double T_r, double *h_x, double *h_w, int ionizations_number, int excitations_number, int h_datapoints, hipStream_t *streams, int h_block_mult) { hipMalloc((void **)d_params,sizeof(double)*2); hipMalloc((void **)d_B_vector,sizeof(double)*excitations_number*4); hipMalloc((void **)d_C_vector,sizeof(double)*ionizations_number*5); hipMalloc((void **)d_D_vector,sizeof(double)*ionizations_number*2); hipMalloc((void **)d_E_j,sizeof(double)*excitations_number); hipMalloc((void **)d_E_i,sizeof(double)*ionizations_number); hipMalloc((void **)d_j,sizeof(double)*excitations_number); hipMalloc((void **)d_k,sizeof(double)*ionizations_number); hipMalloc((void **)d_l,2*sizeof(double)*ionizations_number); hipMalloc((void **)d_x,sizeof(double)*h_datapoints); hipMalloc((void **)d_w,sizeof(double)*h_datapoints); hipMemcpyToSymbol(d_ionizations_number,&ionizations_number,sizeof(ionizations_number)); hipMemcpyToSymbol(d_excitations_number,&excitations_number,sizeof(excitations_number)); hipMemcpyToSymbol(d_datapoints,&h_datapoints,sizeof(h_datapoints)); hipMemcpyToSymbol(d_block_mult,&h_block_mult,sizeof(h_block_mult)); hipMemcpyToSymbol(d_T_r,&T_r,sizeof(T_r)); hipMemcpy(*d_B_vector,B_vector,sizeof(double)*excitations_number*4,hipMemcpyHostToDevice); hipMemcpy(*d_C_vector,C_vector,sizeof(double)*ionizations_number*5,hipMemcpyHostToDevice); hipMemcpy(*d_D_vector,D_vector,sizeof(double)*ionizations_number*2,hipMemcpyHostToDevice); hipMemcpy(*d_E_j,E_j,sizeof(double)*excitations_number,hipMemcpyHostToDevice); hipMemcpy(*d_E_i,E_i,sizeof(double)*ionizations_number,hipMemcpyHostToDevice); hipMemcpy(*d_x,h_x,sizeof(double)*h_datapoints,hipMemcpyHostToDevice); hipMemcpy(*d_w,h_w,sizeof(double)*h_datapoints,hipMemcpyHostToDevice); hipStreamCreate(&streams[0]); hipStreamCreate(&streams[1]); } void d_cleanup(double *d_params, double *d_B_vector, double *d_C_vector, double *d_E_j, double *d_E_i, double *d_j, double *d_k, double *d_l, double *d_x, double *d_w) { hipFree(d_params); hipFree(d_B_vector); hipFree(d_C_vector); hipFree(d_E_j); hipFree(d_E_i); hipFree(d_j); hipFree(d_k); hipFree(d_l); hipFree(d_x); hipFree(d_w); hipDeviceReset(); } void d_calculate_rates(double *d_params,double *d_B_vector, double *d_C_vector, double *d_D_vector, double *d_E_j, double *d_E_i, double *d_j, double *d_k, double *d_l, double *d_x, double *d_w,double *h_params, double *h_j,double *h_k,double *h_l,double *h_w, double *h_x, double T_r, int ionizations_number,int excitations_number,int h_datapoints,hipStream_t *streams, int h_block_mult) { dim3 block_dim(h_datapoints*h_block_mult,h_datapoints); hipMemcpy(d_params,h_params,sizeof(double)*2,hipMemcpyHostToDevice); hipLaunchKernelGGL(( d_j_calc), dim3(excitations_number/h_block_mult),dim3(h_datapoints*h_block_mult),h_datapoints*h_block_mult*sizeof(double),streams[0], d_params,d_E_j,d_B_vector,d_j,d_w,d_x); hipLaunchKernelGGL(( d_l_calc), dim3(ionizations_number/h_block_mult),dim3(h_datapoints*h_block_mult),2*h_datapoints*h_block_mult*sizeof(double),streams[0], d_params,d_E_i,d_D_vector,d_l,d_l+ionizations_number,d_w,d_x); hipLaunchKernelGGL(( d_k_calc), dim3(ionizations_number/h_block_mult),dim3(h_datapoints*h_block_mult),h_datapoints*h_block_mult*sizeof(double),streams[1], d_params,d_E_i,d_C_vector,d_k,d_w,d_x); hipMemcpyAsync(h_j,d_j,sizeof(double)*excitations_number,hipMemcpyDeviceToHost,streams[0]); hipMemcpyAsync(h_l,d_l,2*sizeof(double)*ionizations_number,hipMemcpyDeviceToHost,streams[0]); hipMemcpyAsync(h_k,d_k,sizeof(double)*ionizations_number,hipMemcpyDeviceToHost,streams[1]); hipDeviceSynchronize(); } //CPU memory allocation void h_allocate_arrays(int ionizations_number, int excitations_number, int h_datapoints, double **h_params, double **E_i,double **E_j,double **B_vector, double **C_vector, double **D_vector, double **h_j, double **h_k, double **h_l, double **h_w, double **h_x) { *h_params=(double*)malloc(2*sizeof(double)); *E_i=(double*)malloc(ionizations_number*sizeof(double)); *E_j=(double*)malloc(excitations_number*sizeof(double)); *B_vector=(double*)malloc(excitations_number*4*sizeof(double)); *C_vector=(double*)malloc(ionizations_number*5*sizeof(double)); *D_vector=(double*)malloc(ionizations_number*2*sizeof(double)); *h_j=(double*)malloc(excitations_number*sizeof(double)); *h_k=(double*)malloc(ionizations_number*sizeof(double)); *h_l=(double*)malloc(2*ionizations_number*sizeof(double)); *h_x=(double*)malloc(h_datapoints*sizeof(double)); *h_w=(double*)malloc(h_datapoints*sizeof(double)); } double h_j_int(double E0,double E_j,double *B_vector) { double Eq=E_j/E0; double integrand=-log(Eq)*B_vector[0]+B_vector[1]+Eq*(B_vector[2]+Eq*B_vector[3]); return integrand; } //Evaluate the differential cross section for collisional ionization //A Mott-type cross section, compatable with the BELI formula, is used double h_k_int(double E0,double E1,double E_i,double *C_vector) { double E_i_pow=E_i*E_i, E0_pow=E0*E0, E1_pow=E1*E1, E1_prime=E0-E_i-E1, E1_prime_pow=E1_prime*E1_prime; double a=0.5*(sqrt(E0_pow+4.0*E_i_pow)-E0); double b=a+E_i; double integrand=(1.0/((E1+a)*(E1+b))+1.0/((E1_prime+a)*(E1_prime+b)))*C_vector[0]; integrand+=2.0*C_vector[1]/E0; integrand+=2.0*C_vector[2]*(E0-E_i)/E0_pow; E0_pow*=E0; integrand+=3.0*C_vector[3]*(E1_pow+E1_prime_pow)/E0_pow; E0_pow*=E0; E1_pow*=E1; E1_prime_pow*=E1_prime; integrand+=4.0*C_vector[4]*(E1_pow+E1_prime_pow)/E0_pow; integrand*=0.5/E_i; return integrand; } //Evaluate photoionization cross section double h_l_int(double EGamma,double E_i, double T_r,double *D_vector) { double exp_EG=exp(EGamma/T_r)-1.0; double integrand=(D_vector[0]+D_vector[1]/EGamma)/exp_EG; return integrand; } //Full collisional excitation calculation void h_j_gauss_integration(double T_e,double mu,double E_j,double *B_vector, int datapoints, double *h_j_up, double *weights, double *x) { double integrand=0.0, E0, fermi, fermi_m, integ_temp; double region_difference=(E_j+2.0*fabs(mu)+ACC_J*T_e); int idx; for(idx=0;idx<datapoints;idx++) { E0=x[idx]*region_difference+E_j; integ_temp=h_j_int(E0,E_j,B_vector); fermi=1.0/(1.0+exp((E0-mu)/T_e)); fermi_m=1.0/(1.0+exp((E0-E_j-mu)/T_e)); integrand+=weights[idx]*integ_temp*fermi*(1.0-fermi_m); } *h_j_up=integrand*region_difference; } void h_k_gauss_integration(double T_e,double mu,double E_i,double *C_vector, int datapoints, double *k_up, double *weights, double *x) { double integrand0=0.0, integrand1, E0, E1, E0prime, fermiE0, fermiE1, fermiE0prime, integ_temp; double region_difference=(fabs(mu)+50.0+ACC_K*T_e); int idx0,idx1; for(idx0=0;idx0<datapoints;idx0++) { E0prime=x[idx0]*region_difference; E0=E0prime+E_i; integrand1=0.0; for(idx1=0;idx1<datapoints;idx1++) { E1=x[idx1]*E0prime; integ_temp=h_k_int(E0, E1, E_i,C_vector)*weights[idx1]; fermiE0=1.0/(1.0+exp((E0-mu)/T_e)); fermiE1=1.0/(1.0+exp((E1-mu)/T_e)); fermiE0prime=1.0/(1.0+exp((E0prime-E1-mu)/T_e)); integrand1+=integ_temp*fermiE0*(1.0-fermiE1)*(1.0-fermiE0prime); } integrand0+=weights[idx0]*E0prime*integrand1; } *k_up=integrand0*region_difference; } void h_l_gauss_integration(double T_e,double mu,double E_i,double T_r,double *D_vector, int datapoints, double *h_l, double *h_le, double *weights, double *x) { double integrand0=0.0, integrand1=0.0, EGamma, EGammaPrime, fermi_m, integ_temp; double region_difference=ACC_L; if (mu>0.0){region_difference+=mu;} int idx; for(idx=0;idx<datapoints;idx++) { EGammaPrime=x[idx]*region_difference; EGamma=EGammaPrime+E_i; fermi_m=1.0-1.0/(1.0+exp((EGammaPrime-mu)/T_e)); integ_temp=h_l_int(EGamma,E_i,T_r,D_vector)*weights[idx]*fermi_m; integrand0+=integ_temp; integrand1+=integ_temp*EGammaPrime; } *h_l=integrand0*region_difference; *h_le=integrand1*region_difference; } //The following functions carry out sequential integration for all relevant states void h_j_gauss_integration_full(int excitations_number,double T_e,double mu,double *E_j,double *B_vector, int datapoints, double *h_j, double *weights, double *x) { int idx_j; for (idx_j=0;idx_j<excitations_number;idx_j++) { h_j_gauss_integration(T_e,mu,E_j[idx_j],B_vector+idx_j*4,datapoints,h_j+idx_j,weights,x); } } void h_k_gauss_integration_full(int ionizations_number,double T_e,double mu,double *E_i,double *C_vector, int datapoints, double *h_k, double *weights, double *x) { int idx_k; for (idx_k=0;idx_k<ionizations_number;idx_k++) { h_k_gauss_integration(T_e,mu,E_i[idx_k],C_vector+idx_k*5,datapoints,h_k+idx_k,weights,x); } } void h_l_gauss_integration_full(int ionizations_number,double T_e,double mu,double T_r,double *E_i,double *D_vector, int datapoints, double *h_l, double *weights, double *x) { int idx_l; for (idx_l=0;idx_l<ionizations_number;idx_l++) { h_l_gauss_integration(T_e,mu,E_i[idx_l],T_r,D_vector+idx_l*2,datapoints,h_l+idx_l,h_l+ionizations_number+idx_l,weights,x); } } int main(int argc, char *argv[]) { int h_datapoints=32, ionizations_number=10000, excitations_number=10000, idx, h_block_mult=1; if (argc>1){ionizations_number=atoi(argv[1]);} if (argc>2){excitations_number=atoi(argv[2]);} if (argc>3){h_block_mult=atoi(argv[3]);} double *h_params, *E_i, *E_j, *B_vector, *C_vector, *D_vector, T_r; double *h_j, *h_k, *h_l, *h_x, *h_w, h_j2,h_k2,h_l2,h_le2; FILE *INPUTFILE1, *INPUTFILE2; clock_t h_start_t, h_end_t; double h_total_t; h_allocate_arrays(ionizations_number,excitations_number,h_datapoints,&h_params,&E_i,&E_j,&B_vector,&C_vector, &D_vector,&h_j, &h_k,&h_l,&h_w,&h_x); gauss_integration_setup32(h_w,h_x); h_params[0]=10.0; h_params[1]=3.0; T_r=300.0; if ((INPUTFILE1=fopen("Test_Ionization_Coeffs.txt", "r"))==NULL) { printf("Cannot open file! Error!\n"); exit(2); } for(idx=0;idx<ionizations_number;idx++) { fscanf(INPUTFILE1,"%lf %lf %lf %lf %lf %lf %lf %lf", &E_i[idx], &C_vector[idx*5], &C_vector[idx*5+1], &C_vector[idx*5+2], &C_vector[idx*5+3], &C_vector[idx*5+4], &D_vector[idx*2], &D_vector[idx*2+1]); } fclose(INPUTFILE1); if ((INPUTFILE2=fopen("Test_Excitation_Coeffs.txt", "r"))==NULL) { printf("Cannot open file! Error!\n"); exit(2); } for(idx=0;idx<excitations_number;idx++) { fscanf(INPUTFILE2,"%lf %lf %lf %lf %lf", &E_j[idx], &B_vector[idx*4], &B_vector[idx*4+1], &B_vector[idx*4+2], &B_vector[idx*4+3]); } fclose(INPUTFILE2); int device_count=0; hipGetDeviceCount(&device_count); printf("Device count: %i\n",device_count); hipSetDevice(0); //Run on device 0 by default - can be changed if multiple GPUs etc are present hipStream_t streams[2]; float gpu_time, gpu_time1, gpu_time2; hipEvent_t start, stop, start1, stop1, start2, stop2; hipEventCreate(&start); hipEventCreate(&stop); hipEventCreate(&start1); hipEventCreate(&stop1); hipEventCreate(&start2); hipEventCreate(&stop2); double *d_params, *d_B_vector, *d_C_vector, *d_D_vector, *d_E_j, *d_E_i, *d_j, *d_k, *d_l, *d_x, *d_w; d_setup(&d_params,&d_B_vector,&d_C_vector,&d_D_vector,&d_E_j,&d_E_i, &d_j, &d_k, &d_l, &d_x, &d_w,B_vector, C_vector, D_vector, E_j, E_i, T_r, h_x,h_w, ionizations_number,excitations_number, h_datapoints,streams,h_block_mult); h_start_t=clock(); hipEventRecord(start); hipEventRecord(start1,streams[0]); hipEventRecord(start2,streams[1]); d_calculate_rates(d_params,d_B_vector, d_C_vector,d_D_vector, d_E_j, d_E_i, d_j, d_k, d_l, d_x, d_w,h_params,h_j,h_k,h_l,h_w,h_x,T_r, ionizations_number,excitations_number,h_datapoints,streams,h_block_mult); hipEventRecord(stop); hipEventRecord(stop1,streams[0]); hipEventRecord(stop2,streams[1]); hipEventSynchronize(stop); hipEventSynchronize(stop1); hipEventSynchronize(stop2); hipEventElapsedTime(&gpu_time, start, stop); hipEventElapsedTime(&gpu_time1, start1, stop1); hipEventElapsedTime(&gpu_time2, start2, stop2); h_end_t=clock(); h_total_t=(double)(h_end_t-h_start_t)/CLOCKS_PER_SEC; printf("Time: %E CUDA times: %f %f %f\n", h_total_t, gpu_time, gpu_time1, gpu_time2); for(idx=0;idx<10;idx++) { h_j_gauss_integration(h_params[0],h_params[1],E_j[idx],B_vector+4*idx,h_datapoints,&h_j2, h_w, h_x); h_k_gauss_integration(h_params[0],h_params[1],E_i[idx],C_vector+5*idx,h_datapoints,&h_k2, h_w, h_x); h_l_gauss_integration(h_params[0],h_params[1],E_i[idx],T_r,D_vector+2*idx,h_datapoints,&h_l2, &h_le2, h_w, h_x); printf("%E %E %E %E %E %E %E %E\n",h_j[idx],h_j2,h_k[idx],h_k2,h_l[idx],h_l2,h_l[idx+ionizations_number],h_le2); } h_j_gauss_integration(h_params[0],h_params[1],E_j[excitations_number-1],B_vector+4*(excitations_number-1),h_datapoints,&h_j2, h_w, h_x); h_k_gauss_integration(h_params[0],h_params[1],E_i[ionizations_number-1],C_vector+5*(ionizations_number-1),h_datapoints,&h_k2, h_w, h_x); h_l_gauss_integration(h_params[0],h_params[1],E_i[ionizations_number-1],T_r,D_vector+2*(ionizations_number-1),h_datapoints,&h_l2, &h_le2, h_w, h_x); printf("%E %E %E %E %E %E %E %E\n",h_j[excitations_number-1],h_j2,h_k[ionizations_number-1],h_k2,h_l[ionizations_number-1],h_l2,h_l[2*ionizations_number-1],h_le2); d_cleanup(d_params, d_B_vector, d_C_vector, d_E_j, d_E_i, d_j, d_k, d_l, d_x, d_w); exit(0); }
760e3004c86a50251b34bc1a803361878498dd88.cu
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <cuda.h> #define ACC_J 19.013 #define ACC_K 25.253 #define ACC_L 6503.0 __constant__ int d_excitations_number,d_ionizations_number, d_datapoints, d_block_mult; __constant__ double d_T_r; void gauss_integration_setup32(double *weights, double *x) { x[0]=1.3680690752591596E-03; x[1]=7.1942442273659202E-03; x[2]=1.7618872206246805E-02; x[3]=3.2546962031130167E-02; x[4]=5.1839422116973843E-02; x[5]=7.5316193133715015E-02; x[6]=1.0275810201602886E-01; x[7]=1.3390894062985509E-01; x[8]=1.6847786653489233E-01; x[9]=2.0614212137961868E-01; x[10]=2.4655004553388526E-01; x[11]=2.8932436193468253E-01; x[12]=3.3406569885893617E-01; x[13]=3.8035631887393162E-01; x[14]=4.2776401920860185E-01; x[15]=4.7584616715613093E-01; x[16]=5.2415383284386907E-01; x[17]=5.7223598079139815E-01; x[18]=6.1964368112606838E-01; x[19]=6.6593430114106378E-01; x[20]=7.1067563806531764E-01; x[21]=7.5344995446611462E-01; x[22]=7.9385787862038115E-01; x[23]=8.3152213346510750E-01; x[24]=8.6609105937014474E-01; x[25]=8.9724189798397114E-01; x[26]=9.2468380686628515E-01; x[27]=9.4816057788302599E-01; x[28]=9.6745303796886994E-01; x[29]=9.8238112779375319E-01; x[30]=9.9280575577263397E-01; x[31]=9.9863193092474067E-01; weights[0]=3.5093050047349198E-03; weights[1]=8.1371973654528751E-03; weights[2]=1.2696032654631021E-02; weights[3]=1.7136931456510726E-02; weights[4]=2.1417949011113720E-02; weights[5]=2.5499029631187890E-02; weights[6]=2.9342046739268091E-02; weights[7]=3.2911111388180682E-02; weights[8]=3.6172897054423871E-02; weights[9]=3.9096947893535162E-02; weights[10]=4.1655962113473763E-02; weights[11]=4.3826046502202044E-02; weights[12]=4.5586939347882056E-02; weights[13]=4.6922199540401971E-02; weights[14]=4.7819360039637472E-02; weights[15]=4.8270044257364274E-02; weights[16]=4.8270044257363830E-02; weights[17]=4.7819360039637784E-02; weights[18]=4.6922199540401846E-02; weights[19]=4.5586939347881918E-02; weights[20]=4.3826046502201850E-02; weights[21]=4.1655962113473798E-02; weights[22]=3.9096947893534850E-02; weights[23]=3.6172897054424745E-02; weights[24]=3.2911111388180932E-02; weights[25]=2.9342046739267064E-02; weights[26]=2.5499029631188164E-02; weights[27]=2.1417949011113362E-02; weights[28]=1.7136931456510799E-02; weights[29]=1.2696032654631212E-02; weights[30]=8.1371973654529653E-03; weights[31]=3.5093050047351631E-03; } __device__ double d_j_int(double E0,double E_j,double *B_vector) { double Eq=E_j/E0; double integrand=-log(Eq)*B_vector[0]+B_vector[1]+Eq*(B_vector[2]+Eq*B_vector[3]); return integrand; } __device__ double d_k_int(double E0,double E1,double E_i,double *C_vector) { double E_i_pow=E_i*E_i, E0_pow=E0*E0, E1_pow=E1*E1, E1_prime=E0-E_i-E1, E1_prime_pow=E1_prime*E1_prime; double a=0.5*(sqrt(E0_pow+4.0*E_i_pow)-E0); double b=a+E_i; double integrand=(1.0/((E1+a)*(E1+b))+1.0/((E1_prime+a)*(E1_prime+b)))*C_vector[0]; integrand+=2.0*C_vector[1]/E0; integrand+=2.0*C_vector[2]*(E0-E_i)/E0_pow; E0_pow*=E0; integrand+=3.0*C_vector[3]*(E1_pow+E1_prime_pow)/E0_pow; E0_pow*=E0; E1_pow*=E1; E1_prime_pow*=E1_prime; integrand+=4.0*C_vector[4]*(E1_pow+E1_prime_pow)/E0_pow; integrand*=0.5/E_i; return integrand; } __device__ double d_l_int(double EGamma,double T_r,double *D_vector) { double exp_EG=exp(EGamma/T_r)-1.0; double integrand=(D_vector[0]+D_vector[1]/EGamma)/exp_EG; return integrand; } //Carry out an integral for the collisional excitation coefficient __global__ void d_j_calc(double *d_params, double *E_j, double *B_vector, double *d_j_up, double *w, double *x) { //Calculate the integrand on each thread int node_num, integral_num; integral_num=threadIdx.x/d_datapoints; node_num=threadIdx.x % d_datapoints; integral_num+=blockIdx.x*d_block_mult; extern __shared__ double temp[]; //Integrand is stored in shared memory double *d_j_up_temp=&temp[0]; double lim=(E_j[integral_num]+2.0*fabs(d_params[1])+ACC_J*d_params[0]); double E0=x[node_num]*lim+E_j[integral_num]; double integrand=d_j_int(E0,E_j[integral_num],B_vector+integral_num*4)*w[node_num]; double fermi=1.0/(1.0+exp((E0-d_params[1])/d_params[0])); double fermi_m=1.0/(1.0+exp((E0-E_j[integral_num]-d_params[1])/d_params[0])); d_j_up_temp[threadIdx.x]=integrand*fermi*(1.0-fermi_m); //Perform a standard reduction to integrate for (int d = d_datapoints>>1; d > 0; d >>= 1) { __syncthreads(); if (node_num<d) { d_j_up_temp[threadIdx.x] += d_j_up_temp[threadIdx.x+d]; } } __syncthreads(); if (node_num==0) { d_j_up[integral_num]=d_j_up_temp[threadIdx.x]*lim; } } //Carry out a double integral for the collisional ionization coefficient //Involves now two reductions and the intermediate results stored in main GPU memory __global__ void d_k_calc(double *d_params, double *E_i, double *C_vector,double *d_k_up, double *w, double *x) { //Calculate the integrand on each thread int node_num, integral_num; integral_num=threadIdx.x/d_datapoints; node_num=threadIdx.x % d_datapoints; integral_num+=blockIdx.x*d_block_mult; extern __shared__ double temp[]; double *d_k_up_temp=&temp[0]; double lim=(fabs(d_params[1])+50.0+ACC_K*d_params[0]); double E0prime=x[node_num]*lim; double E0=E0prime+E_i[integral_num]; double fermiE0=1.0/(1.0+exp((E0-d_params[1])/d_params[0])); double E1, integrand, fermiE1, fermiE0prime, int_w_w=0; for (int idx=0;idx<d_datapoints;idx++) { E1=x[idx]*E0prime; integrand=d_k_int(E0,E1,E_i[integral_num],C_vector+integral_num*5); fermiE1=1.0/(1.0+exp((E1-d_params[1])/d_params[0])); fermiE0prime=1.0/(1.0+exp((E0prime-E1-d_params[1])/d_params[0])); int_w_w+=integrand*w[node_num]*w[idx]*E0prime*fermiE0*(1.0-fermiE1)*(1.0-fermiE0prime); } d_k_up_temp[threadIdx.x]=int_w_w; //Perform a standard reduction to integrate for (int d = d_datapoints>>1; d > 0; d >>= 1) { __syncthreads(); if (node_num<d) { d_k_up_temp[threadIdx.x] += d_k_up_temp[threadIdx.x+d]; } } __syncthreads(); if (node_num==0) { d_k_up[integral_num]=d_k_up_temp[threadIdx.x]*lim; } } //Carry out an integral for the photoionization coefficient and energy change due to photoionization //Single integral, similar to d_j_calc() __global__ void d_l_calc(double *d_params, double *E_i, double *D_vector, double *d_l, double *d_le, double *w, double *x) { int node_num, integral_num; integral_num=threadIdx.x/d_datapoints; node_num=threadIdx.x % d_datapoints; integral_num+=blockIdx.x*d_block_mult; extern __shared__ double temp[]; double *d_l_temp=&temp[0]; double *d_le_temp=&temp[blockDim.x]; double lim=ACC_L; if(d_params[1]>0.0){lim+=d_params[1];} double EGammaPrime=x[node_num]*lim; double EGamma=EGammaPrime+E_i[integral_num]; double fermi_m=1.0-1.0/(1.0+exp((EGammaPrime-d_params[1])/d_params[0])); double integrand=d_l_int(EGamma,d_T_r,D_vector+integral_num*2)*w[node_num]*fermi_m; d_l_temp[threadIdx.x]=integrand; d_le_temp[threadIdx.x]=integrand*EGammaPrime; for (int d = d_datapoints>>1; d > 0; d >>= 1) { __syncthreads(); if (node_num<d) { d_l_temp[threadIdx.x] += d_l_temp[threadIdx.x+d]; d_le_temp[threadIdx.x] += d_le_temp[threadIdx.x+d]; } } __syncthreads(); if (node_num==0) { d_l[integral_num]=d_l_temp[threadIdx.x]*lim; d_le[integral_num]=d_le_temp[threadIdx.x]*lim; } } void d_setup(double **d_params, double **d_B_vector, double **d_C_vector, double **d_D_vector, double **d_E_j, double **d_E_i, double **d_j, double **d_k, double **d_l, double **d_x, double **d_w, double *B_vector, double *C_vector, double *D_vector, double *E_j, double *E_i, double T_r, double *h_x, double *h_w, int ionizations_number, int excitations_number, int h_datapoints, cudaStream_t *streams, int h_block_mult) { cudaMalloc((void **)d_params,sizeof(double)*2); cudaMalloc((void **)d_B_vector,sizeof(double)*excitations_number*4); cudaMalloc((void **)d_C_vector,sizeof(double)*ionizations_number*5); cudaMalloc((void **)d_D_vector,sizeof(double)*ionizations_number*2); cudaMalloc((void **)d_E_j,sizeof(double)*excitations_number); cudaMalloc((void **)d_E_i,sizeof(double)*ionizations_number); cudaMalloc((void **)d_j,sizeof(double)*excitations_number); cudaMalloc((void **)d_k,sizeof(double)*ionizations_number); cudaMalloc((void **)d_l,2*sizeof(double)*ionizations_number); cudaMalloc((void **)d_x,sizeof(double)*h_datapoints); cudaMalloc((void **)d_w,sizeof(double)*h_datapoints); cudaMemcpyToSymbol(d_ionizations_number,&ionizations_number,sizeof(ionizations_number)); cudaMemcpyToSymbol(d_excitations_number,&excitations_number,sizeof(excitations_number)); cudaMemcpyToSymbol(d_datapoints,&h_datapoints,sizeof(h_datapoints)); cudaMemcpyToSymbol(d_block_mult,&h_block_mult,sizeof(h_block_mult)); cudaMemcpyToSymbol(d_T_r,&T_r,sizeof(T_r)); cudaMemcpy(*d_B_vector,B_vector,sizeof(double)*excitations_number*4,cudaMemcpyHostToDevice); cudaMemcpy(*d_C_vector,C_vector,sizeof(double)*ionizations_number*5,cudaMemcpyHostToDevice); cudaMemcpy(*d_D_vector,D_vector,sizeof(double)*ionizations_number*2,cudaMemcpyHostToDevice); cudaMemcpy(*d_E_j,E_j,sizeof(double)*excitations_number,cudaMemcpyHostToDevice); cudaMemcpy(*d_E_i,E_i,sizeof(double)*ionizations_number,cudaMemcpyHostToDevice); cudaMemcpy(*d_x,h_x,sizeof(double)*h_datapoints,cudaMemcpyHostToDevice); cudaMemcpy(*d_w,h_w,sizeof(double)*h_datapoints,cudaMemcpyHostToDevice); cudaStreamCreate(&streams[0]); cudaStreamCreate(&streams[1]); } void d_cleanup(double *d_params, double *d_B_vector, double *d_C_vector, double *d_E_j, double *d_E_i, double *d_j, double *d_k, double *d_l, double *d_x, double *d_w) { cudaFree(d_params); cudaFree(d_B_vector); cudaFree(d_C_vector); cudaFree(d_E_j); cudaFree(d_E_i); cudaFree(d_j); cudaFree(d_k); cudaFree(d_l); cudaFree(d_x); cudaFree(d_w); cudaDeviceReset(); } void d_calculate_rates(double *d_params,double *d_B_vector, double *d_C_vector, double *d_D_vector, double *d_E_j, double *d_E_i, double *d_j, double *d_k, double *d_l, double *d_x, double *d_w,double *h_params, double *h_j,double *h_k,double *h_l,double *h_w, double *h_x, double T_r, int ionizations_number,int excitations_number,int h_datapoints,cudaStream_t *streams, int h_block_mult) { dim3 block_dim(h_datapoints*h_block_mult,h_datapoints); cudaMemcpy(d_params,h_params,sizeof(double)*2,cudaMemcpyHostToDevice); d_j_calc<<<excitations_number/h_block_mult,h_datapoints*h_block_mult,h_datapoints*h_block_mult*sizeof(double),streams[0]>>>(d_params,d_E_j,d_B_vector,d_j,d_w,d_x); d_l_calc<<<ionizations_number/h_block_mult,h_datapoints*h_block_mult,2*h_datapoints*h_block_mult*sizeof(double),streams[0]>>>(d_params,d_E_i,d_D_vector,d_l,d_l+ionizations_number,d_w,d_x); d_k_calc<<<ionizations_number/h_block_mult,h_datapoints*h_block_mult,h_datapoints*h_block_mult*sizeof(double),streams[1]>>>(d_params,d_E_i,d_C_vector,d_k,d_w,d_x); cudaMemcpyAsync(h_j,d_j,sizeof(double)*excitations_number,cudaMemcpyDeviceToHost,streams[0]); cudaMemcpyAsync(h_l,d_l,2*sizeof(double)*ionizations_number,cudaMemcpyDeviceToHost,streams[0]); cudaMemcpyAsync(h_k,d_k,sizeof(double)*ionizations_number,cudaMemcpyDeviceToHost,streams[1]); cudaDeviceSynchronize(); } //CPU memory allocation void h_allocate_arrays(int ionizations_number, int excitations_number, int h_datapoints, double **h_params, double **E_i,double **E_j,double **B_vector, double **C_vector, double **D_vector, double **h_j, double **h_k, double **h_l, double **h_w, double **h_x) { *h_params=(double*)malloc(2*sizeof(double)); *E_i=(double*)malloc(ionizations_number*sizeof(double)); *E_j=(double*)malloc(excitations_number*sizeof(double)); *B_vector=(double*)malloc(excitations_number*4*sizeof(double)); *C_vector=(double*)malloc(ionizations_number*5*sizeof(double)); *D_vector=(double*)malloc(ionizations_number*2*sizeof(double)); *h_j=(double*)malloc(excitations_number*sizeof(double)); *h_k=(double*)malloc(ionizations_number*sizeof(double)); *h_l=(double*)malloc(2*ionizations_number*sizeof(double)); *h_x=(double*)malloc(h_datapoints*sizeof(double)); *h_w=(double*)malloc(h_datapoints*sizeof(double)); } double h_j_int(double E0,double E_j,double *B_vector) { double Eq=E_j/E0; double integrand=-log(Eq)*B_vector[0]+B_vector[1]+Eq*(B_vector[2]+Eq*B_vector[3]); return integrand; } //Evaluate the differential cross section for collisional ionization //A Mott-type cross section, compatable with the BELI formula, is used double h_k_int(double E0,double E1,double E_i,double *C_vector) { double E_i_pow=E_i*E_i, E0_pow=E0*E0, E1_pow=E1*E1, E1_prime=E0-E_i-E1, E1_prime_pow=E1_prime*E1_prime; double a=0.5*(sqrt(E0_pow+4.0*E_i_pow)-E0); double b=a+E_i; double integrand=(1.0/((E1+a)*(E1+b))+1.0/((E1_prime+a)*(E1_prime+b)))*C_vector[0]; integrand+=2.0*C_vector[1]/E0; integrand+=2.0*C_vector[2]*(E0-E_i)/E0_pow; E0_pow*=E0; integrand+=3.0*C_vector[3]*(E1_pow+E1_prime_pow)/E0_pow; E0_pow*=E0; E1_pow*=E1; E1_prime_pow*=E1_prime; integrand+=4.0*C_vector[4]*(E1_pow+E1_prime_pow)/E0_pow; integrand*=0.5/E_i; return integrand; } //Evaluate photoionization cross section double h_l_int(double EGamma,double E_i, double T_r,double *D_vector) { double exp_EG=exp(EGamma/T_r)-1.0; double integrand=(D_vector[0]+D_vector[1]/EGamma)/exp_EG; return integrand; } //Full collisional excitation calculation void h_j_gauss_integration(double T_e,double mu,double E_j,double *B_vector, int datapoints, double *h_j_up, double *weights, double *x) { double integrand=0.0, E0, fermi, fermi_m, integ_temp; double region_difference=(E_j+2.0*fabs(mu)+ACC_J*T_e); int idx; for(idx=0;idx<datapoints;idx++) { E0=x[idx]*region_difference+E_j; integ_temp=h_j_int(E0,E_j,B_vector); fermi=1.0/(1.0+exp((E0-mu)/T_e)); fermi_m=1.0/(1.0+exp((E0-E_j-mu)/T_e)); integrand+=weights[idx]*integ_temp*fermi*(1.0-fermi_m); } *h_j_up=integrand*region_difference; } void h_k_gauss_integration(double T_e,double mu,double E_i,double *C_vector, int datapoints, double *k_up, double *weights, double *x) { double integrand0=0.0, integrand1, E0, E1, E0prime, fermiE0, fermiE1, fermiE0prime, integ_temp; double region_difference=(fabs(mu)+50.0+ACC_K*T_e); int idx0,idx1; for(idx0=0;idx0<datapoints;idx0++) { E0prime=x[idx0]*region_difference; E0=E0prime+E_i; integrand1=0.0; for(idx1=0;idx1<datapoints;idx1++) { E1=x[idx1]*E0prime; integ_temp=h_k_int(E0, E1, E_i,C_vector)*weights[idx1]; fermiE0=1.0/(1.0+exp((E0-mu)/T_e)); fermiE1=1.0/(1.0+exp((E1-mu)/T_e)); fermiE0prime=1.0/(1.0+exp((E0prime-E1-mu)/T_e)); integrand1+=integ_temp*fermiE0*(1.0-fermiE1)*(1.0-fermiE0prime); } integrand0+=weights[idx0]*E0prime*integrand1; } *k_up=integrand0*region_difference; } void h_l_gauss_integration(double T_e,double mu,double E_i,double T_r,double *D_vector, int datapoints, double *h_l, double *h_le, double *weights, double *x) { double integrand0=0.0, integrand1=0.0, EGamma, EGammaPrime, fermi_m, integ_temp; double region_difference=ACC_L; if (mu>0.0){region_difference+=mu;} int idx; for(idx=0;idx<datapoints;idx++) { EGammaPrime=x[idx]*region_difference; EGamma=EGammaPrime+E_i; fermi_m=1.0-1.0/(1.0+exp((EGammaPrime-mu)/T_e)); integ_temp=h_l_int(EGamma,E_i,T_r,D_vector)*weights[idx]*fermi_m; integrand0+=integ_temp; integrand1+=integ_temp*EGammaPrime; } *h_l=integrand0*region_difference; *h_le=integrand1*region_difference; } //The following functions carry out sequential integration for all relevant states void h_j_gauss_integration_full(int excitations_number,double T_e,double mu,double *E_j,double *B_vector, int datapoints, double *h_j, double *weights, double *x) { int idx_j; for (idx_j=0;idx_j<excitations_number;idx_j++) { h_j_gauss_integration(T_e,mu,E_j[idx_j],B_vector+idx_j*4,datapoints,h_j+idx_j,weights,x); } } void h_k_gauss_integration_full(int ionizations_number,double T_e,double mu,double *E_i,double *C_vector, int datapoints, double *h_k, double *weights, double *x) { int idx_k; for (idx_k=0;idx_k<ionizations_number;idx_k++) { h_k_gauss_integration(T_e,mu,E_i[idx_k],C_vector+idx_k*5,datapoints,h_k+idx_k,weights,x); } } void h_l_gauss_integration_full(int ionizations_number,double T_e,double mu,double T_r,double *E_i,double *D_vector, int datapoints, double *h_l, double *weights, double *x) { int idx_l; for (idx_l=0;idx_l<ionizations_number;idx_l++) { h_l_gauss_integration(T_e,mu,E_i[idx_l],T_r,D_vector+idx_l*2,datapoints,h_l+idx_l,h_l+ionizations_number+idx_l,weights,x); } } int main(int argc, char *argv[]) { int h_datapoints=32, ionizations_number=10000, excitations_number=10000, idx, h_block_mult=1; if (argc>1){ionizations_number=atoi(argv[1]);} if (argc>2){excitations_number=atoi(argv[2]);} if (argc>3){h_block_mult=atoi(argv[3]);} double *h_params, *E_i, *E_j, *B_vector, *C_vector, *D_vector, T_r; double *h_j, *h_k, *h_l, *h_x, *h_w, h_j2,h_k2,h_l2,h_le2; FILE *INPUTFILE1, *INPUTFILE2; clock_t h_start_t, h_end_t; double h_total_t; h_allocate_arrays(ionizations_number,excitations_number,h_datapoints,&h_params,&E_i,&E_j,&B_vector,&C_vector, &D_vector,&h_j, &h_k,&h_l,&h_w,&h_x); gauss_integration_setup32(h_w,h_x); h_params[0]=10.0; h_params[1]=3.0; T_r=300.0; if ((INPUTFILE1=fopen("Test_Ionization_Coeffs.txt", "r"))==NULL) { printf("Cannot open file! Error!\n"); exit(2); } for(idx=0;idx<ionizations_number;idx++) { fscanf(INPUTFILE1,"%lf %lf %lf %lf %lf %lf %lf %lf", &E_i[idx], &C_vector[idx*5], &C_vector[idx*5+1], &C_vector[idx*5+2], &C_vector[idx*5+3], &C_vector[idx*5+4], &D_vector[idx*2], &D_vector[idx*2+1]); } fclose(INPUTFILE1); if ((INPUTFILE2=fopen("Test_Excitation_Coeffs.txt", "r"))==NULL) { printf("Cannot open file! Error!\n"); exit(2); } for(idx=0;idx<excitations_number;idx++) { fscanf(INPUTFILE2,"%lf %lf %lf %lf %lf", &E_j[idx], &B_vector[idx*4], &B_vector[idx*4+1], &B_vector[idx*4+2], &B_vector[idx*4+3]); } fclose(INPUTFILE2); int device_count=0; cudaGetDeviceCount(&device_count); printf("Device count: %i\n",device_count); cudaSetDevice(0); //Run on device 0 by default - can be changed if multiple GPUs etc are present cudaStream_t streams[2]; float gpu_time, gpu_time1, gpu_time2; cudaEvent_t start, stop, start1, stop1, start2, stop2; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaEventCreate(&start2); cudaEventCreate(&stop2); double *d_params, *d_B_vector, *d_C_vector, *d_D_vector, *d_E_j, *d_E_i, *d_j, *d_k, *d_l, *d_x, *d_w; d_setup(&d_params,&d_B_vector,&d_C_vector,&d_D_vector,&d_E_j,&d_E_i, &d_j, &d_k, &d_l, &d_x, &d_w,B_vector, C_vector, D_vector, E_j, E_i, T_r, h_x,h_w, ionizations_number,excitations_number, h_datapoints,streams,h_block_mult); h_start_t=clock(); cudaEventRecord(start); cudaEventRecord(start1,streams[0]); cudaEventRecord(start2,streams[1]); d_calculate_rates(d_params,d_B_vector, d_C_vector,d_D_vector, d_E_j, d_E_i, d_j, d_k, d_l, d_x, d_w,h_params,h_j,h_k,h_l,h_w,h_x,T_r, ionizations_number,excitations_number,h_datapoints,streams,h_block_mult); cudaEventRecord(stop); cudaEventRecord(stop1,streams[0]); cudaEventRecord(stop2,streams[1]); cudaEventSynchronize(stop); cudaEventSynchronize(stop1); cudaEventSynchronize(stop2); cudaEventElapsedTime(&gpu_time, start, stop); cudaEventElapsedTime(&gpu_time1, start1, stop1); cudaEventElapsedTime(&gpu_time2, start2, stop2); h_end_t=clock(); h_total_t=(double)(h_end_t-h_start_t)/CLOCKS_PER_SEC; printf("Time: %E CUDA times: %f %f %f\n", h_total_t, gpu_time, gpu_time1, gpu_time2); for(idx=0;idx<10;idx++) { h_j_gauss_integration(h_params[0],h_params[1],E_j[idx],B_vector+4*idx,h_datapoints,&h_j2, h_w, h_x); h_k_gauss_integration(h_params[0],h_params[1],E_i[idx],C_vector+5*idx,h_datapoints,&h_k2, h_w, h_x); h_l_gauss_integration(h_params[0],h_params[1],E_i[idx],T_r,D_vector+2*idx,h_datapoints,&h_l2, &h_le2, h_w, h_x); printf("%E %E %E %E %E %E %E %E\n",h_j[idx],h_j2,h_k[idx],h_k2,h_l[idx],h_l2,h_l[idx+ionizations_number],h_le2); } h_j_gauss_integration(h_params[0],h_params[1],E_j[excitations_number-1],B_vector+4*(excitations_number-1),h_datapoints,&h_j2, h_w, h_x); h_k_gauss_integration(h_params[0],h_params[1],E_i[ionizations_number-1],C_vector+5*(ionizations_number-1),h_datapoints,&h_k2, h_w, h_x); h_l_gauss_integration(h_params[0],h_params[1],E_i[ionizations_number-1],T_r,D_vector+2*(ionizations_number-1),h_datapoints,&h_l2, &h_le2, h_w, h_x); printf("%E %E %E %E %E %E %E %E\n",h_j[excitations_number-1],h_j2,h_k[ionizations_number-1],h_k2,h_l[ionizations_number-1],h_l2,h_l[2*ionizations_number-1],h_le2); d_cleanup(d_params, d_B_vector, d_C_vector, d_E_j, d_E_i, d_j, d_k, d_l, d_x, d_w); exit(0); }
a45e566cdcde1b2298b7d42aa4104f0d1587121b.hip
// !!! This is a file automatically generated by hipify!!! #include <heart/cuda_common.h> #include <stdio.h> int _ConvertSMVer2Cores(int major, int minor); int findCudaDevice(int argc, const char **argv) { hipDeviceProp_t deviceProp; // Pick the device with highest Gflops/s int devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors(hipSetDevice(devID)); checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID)); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); return devID; } int gpuGetMaxGflopsDeviceId() { int current_device = 0, sm_per_multiproc = 0; int max_compute_perf = 0, max_perf_device = 0; int device_count = 0, best_SM_arch = 0; hipDeviceProp_t deviceProp; hipGetDeviceCount(&device_count); // Find the best major SM Architecture GPU device while (current_device < device_count) { hipGetDeviceProperties(&deviceProp, current_device); // If this GPU is not running on Compute Mode prohibited, then we can add it to the list if (deviceProp.computeMode != hipComputeModeProhibited) { if (deviceProp.major > 0 && deviceProp.major < 9999) { best_SM_arch = (best_SM_arch > deviceProp.major) ? best_SM_arch : deviceProp.major; } } current_device++; } // Find the best CUDA capable GPU device current_device = 0; while (current_device < device_count) { hipGetDeviceProperties(&deviceProp, current_device); // If this GPU is not running on Compute Mode prohibited, then we can add it to the list if (deviceProp.computeMode != hipComputeModeProhibited) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; if (compute_perf > max_compute_perf) { // If we find GPU with SM major > 2, search only these if (best_SM_arch > 2) { // If our device==dest_SM_arch, choose this, or else pass if (deviceProp.major == best_SM_arch) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { max_compute_perf = compute_perf; max_perf_device = current_device; } } } ++current_device; } return max_perf_device; } // Beginning of GPU Architecture definitions int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { { 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class { 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class { 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class { 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class { 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class { 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class { 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class { 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class { -1, -1 } }; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one to run properly printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[7].Cores); return nGpuArchCoresPerSM[7].Cores; } const char *_cudaGetErrorEnum(hipError_t error) { switch (error) { case hipSuccess: return "hipSuccess"; case hipErrorMissingConfiguration: return "hipErrorMissingConfiguration"; case hipErrorMemoryAllocation: return "hipErrorMemoryAllocation"; case hipErrorInitializationError: return "hipErrorInitializationError"; case hipErrorLaunchFailure: return "hipErrorLaunchFailure"; case hipErrorPriorLaunchFailure: return "hipErrorPriorLaunchFailure"; case hipErrorLaunchTimeOut: return "hipErrorLaunchTimeOut"; case hipErrorLaunchOutOfResources: return "hipErrorLaunchOutOfResources"; case hipErrorInvalidDeviceFunction: return "hipErrorInvalidDeviceFunction"; case hipErrorInvalidConfiguration: return "hipErrorInvalidConfiguration"; case hipErrorInvalidDevice: return "hipErrorInvalidDevice"; case hipErrorInvalidValue: return "hipErrorInvalidValue"; case hipErrorInvalidPitchValue: return "hipErrorInvalidPitchValue"; case hipErrorInvalidSymbol: return "hipErrorInvalidSymbol"; case hipErrorMapFailed: return "hipErrorMapFailed"; case hipErrorUnmapFailed: return "hipErrorUnmapFailed"; case hipErrorInvalidHostPointer: return "hipErrorInvalidHostPointer"; case hipErrorInvalidDevicePointer: return "hipErrorInvalidDevicePointer"; case hipErrorInvalidTexture: return "hipErrorInvalidTexture"; case hipErrorInvalidTextureBinding: return "hipErrorInvalidTextureBinding"; case hipErrorInvalidChannelDescriptor: return "hipErrorInvalidChannelDescriptor"; case hipErrorInvalidMemcpyDirection: return "hipErrorInvalidMemcpyDirection"; case hipErrorAddressOfConstant: return "hipErrorAddressOfConstant"; case hipErrorTextureFetchFailed: return "hipErrorTextureFetchFailed"; case hipErrorTextureNotBound: return "hipErrorTextureNotBound"; case hipErrorSynchronizationError: return "hipErrorSynchronizationError"; case hipErrorInvalidFilterSetting: return "hipErrorInvalidFilterSetting"; case hipErrorInvalidNormSetting: return "hipErrorInvalidNormSetting"; case hipErrorMixedDeviceExecution: return "hipErrorMixedDeviceExecution"; case hipErrorDeinitialized: return "hipErrorDeinitialized"; case hipErrorUnknown: return "hipErrorUnknown"; case hipErrorNotYetImplemented: return "hipErrorNotYetImplemented"; case hipErrorMemoryValueTooLarge: return "hipErrorMemoryValueTooLarge"; case hipErrorInvalidResourceHandle: return "hipErrorInvalidResourceHandle"; case hipErrorNotReady: return "hipErrorNotReady"; case hipErrorInsufficientDriver: return "hipErrorInsufficientDriver"; case hipErrorSetOnActiveProcess: return "hipErrorSetOnActiveProcess"; case hipErrorInvalidSurface: return "hipErrorInvalidSurface"; case hipErrorNoDevice: return "hipErrorNoDevice"; case hipErrorECCNotCorrectable: return "hipErrorECCNotCorrectable"; case hipErrorSharedObjectSymbolNotFound: return "hipErrorSharedObjectSymbolNotFound"; case hipErrorSharedObjectInitFailed: return "hipErrorSharedObjectInitFailed"; case hipErrorUnsupportedLimit: return "hipErrorUnsupportedLimit"; case hipErrorDuplicateVariableName: return "hipErrorDuplicateVariableName"; case hipErrorDuplicateTextureName: return "hipErrorDuplicateTextureName"; case hipErrorDuplicateSurfaceName: return "hipErrorDuplicateSurfaceName"; case hipErrorDevicesUnavailable: return "hipErrorDevicesUnavailable"; case hipErrorInvalidImage: return "hipErrorInvalidImage"; case hipErrorNoBinaryForGpu: return "hipErrorNoBinaryForGpu"; case hipErrorIncompatibleDriverContext: return "hipErrorIncompatibleDriverContext"; case hipErrorPeerAccessAlreadyEnabled: return "hipErrorPeerAccessAlreadyEnabled"; case hipErrorPeerAccessNotEnabled: return "hipErrorPeerAccessNotEnabled"; case hipErrorDeviceAlreadyInUse: return "hipErrorDeviceAlreadyInUse"; case hipErrorProfilerDisabled: return "hipErrorProfilerDisabled"; case hipErrorProfilerNotInitialized: return "hipErrorProfilerNotInitialized"; case hipErrorProfilerAlreadyStarted: return "hipErrorProfilerAlreadyStarted"; case hipErrorProfilerAlreadyStopped: return "hipErrorProfilerAlreadyStopped"; #if __CUDA_API_VERSION >= 0x4000 case hipErrorAssert: return "hipErrorAssert"; case hipErrorTooManyPeers: return "hipErrorTooManyPeers"; case hipErrorHostMemoryAlreadyRegistered: return "hipErrorHostMemoryAlreadyRegistered"; case hipErrorHostMemoryNotRegistered: return "hipErrorHostMemoryNotRegistered"; #endif case hipErrorStartupFailure: return "hipErrorStartupFailure"; case hipErrorApiFailureBase: return "hipErrorApiFailureBase"; } return "<unknown>"; }
a45e566cdcde1b2298b7d42aa4104f0d1587121b.cu
#include <heart/cuda_common.h> #include <stdio.h> int _ConvertSMVer2Cores(int major, int minor); int findCudaDevice(int argc, const char **argv) { cudaDeviceProp deviceProp; // Pick the device with highest Gflops/s int devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors(cudaSetDevice(devID)); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID)); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); return devID; } int gpuGetMaxGflopsDeviceId() { int current_device = 0, sm_per_multiproc = 0; int max_compute_perf = 0, max_perf_device = 0; int device_count = 0, best_SM_arch = 0; cudaDeviceProp deviceProp; cudaGetDeviceCount(&device_count); // Find the best major SM Architecture GPU device while (current_device < device_count) { cudaGetDeviceProperties(&deviceProp, current_device); // If this GPU is not running on Compute Mode prohibited, then we can add it to the list if (deviceProp.computeMode != cudaComputeModeProhibited) { if (deviceProp.major > 0 && deviceProp.major < 9999) { best_SM_arch = (best_SM_arch > deviceProp.major) ? best_SM_arch : deviceProp.major; } } current_device++; } // Find the best CUDA capable GPU device current_device = 0; while (current_device < device_count) { cudaGetDeviceProperties(&deviceProp, current_device); // If this GPU is not running on Compute Mode prohibited, then we can add it to the list if (deviceProp.computeMode != cudaComputeModeProhibited) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; if (compute_perf > max_compute_perf) { // If we find GPU with SM major > 2, search only these if (best_SM_arch > 2) { // If our device==dest_SM_arch, choose this, or else pass if (deviceProp.major == best_SM_arch) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { max_compute_perf = compute_perf; max_perf_device = current_device; } } } ++current_device; } return max_perf_device; } // Beginning of GPU Architecture definitions int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { { 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class { 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class { 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class { 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class { 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class { 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class { 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class { 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class { -1, -1 } }; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one to run properly printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[7].Cores); return nGpuArchCoresPerSM[7].Cores; } const char *_cudaGetErrorEnum(cudaError_t error) { switch (error) { case cudaSuccess: return "cudaSuccess"; case cudaErrorMissingConfiguration: return "cudaErrorMissingConfiguration"; case cudaErrorMemoryAllocation: return "cudaErrorMemoryAllocation"; case cudaErrorInitializationError: return "cudaErrorInitializationError"; case cudaErrorLaunchFailure: return "cudaErrorLaunchFailure"; case cudaErrorPriorLaunchFailure: return "cudaErrorPriorLaunchFailure"; case cudaErrorLaunchTimeout: return "cudaErrorLaunchTimeout"; case cudaErrorLaunchOutOfResources: return "cudaErrorLaunchOutOfResources"; case cudaErrorInvalidDeviceFunction: return "cudaErrorInvalidDeviceFunction"; case cudaErrorInvalidConfiguration: return "cudaErrorInvalidConfiguration"; case cudaErrorInvalidDevice: return "cudaErrorInvalidDevice"; case cudaErrorInvalidValue: return "cudaErrorInvalidValue"; case cudaErrorInvalidPitchValue: return "cudaErrorInvalidPitchValue"; case cudaErrorInvalidSymbol: return "cudaErrorInvalidSymbol"; case cudaErrorMapBufferObjectFailed: return "cudaErrorMapBufferObjectFailed"; case cudaErrorUnmapBufferObjectFailed: return "cudaErrorUnmapBufferObjectFailed"; case cudaErrorInvalidHostPointer: return "cudaErrorInvalidHostPointer"; case cudaErrorInvalidDevicePointer: return "cudaErrorInvalidDevicePointer"; case cudaErrorInvalidTexture: return "cudaErrorInvalidTexture"; case cudaErrorInvalidTextureBinding: return "cudaErrorInvalidTextureBinding"; case cudaErrorInvalidChannelDescriptor: return "cudaErrorInvalidChannelDescriptor"; case cudaErrorInvalidMemcpyDirection: return "cudaErrorInvalidMemcpyDirection"; case cudaErrorAddressOfConstant: return "cudaErrorAddressOfConstant"; case cudaErrorTextureFetchFailed: return "cudaErrorTextureFetchFailed"; case cudaErrorTextureNotBound: return "cudaErrorTextureNotBound"; case cudaErrorSynchronizationError: return "cudaErrorSynchronizationError"; case cudaErrorInvalidFilterSetting: return "cudaErrorInvalidFilterSetting"; case cudaErrorInvalidNormSetting: return "cudaErrorInvalidNormSetting"; case cudaErrorMixedDeviceExecution: return "cudaErrorMixedDeviceExecution"; case cudaErrorCudartUnloading: return "cudaErrorCudartUnloading"; case cudaErrorUnknown: return "cudaErrorUnknown"; case cudaErrorNotYetImplemented: return "cudaErrorNotYetImplemented"; case cudaErrorMemoryValueTooLarge: return "cudaErrorMemoryValueTooLarge"; case cudaErrorInvalidResourceHandle: return "cudaErrorInvalidResourceHandle"; case cudaErrorNotReady: return "cudaErrorNotReady"; case cudaErrorInsufficientDriver: return "cudaErrorInsufficientDriver"; case cudaErrorSetOnActiveProcess: return "cudaErrorSetOnActiveProcess"; case cudaErrorInvalidSurface: return "cudaErrorInvalidSurface"; case cudaErrorNoDevice: return "cudaErrorNoDevice"; case cudaErrorECCUncorrectable: return "cudaErrorECCUncorrectable"; case cudaErrorSharedObjectSymbolNotFound: return "cudaErrorSharedObjectSymbolNotFound"; case cudaErrorSharedObjectInitFailed: return "cudaErrorSharedObjectInitFailed"; case cudaErrorUnsupportedLimit: return "cudaErrorUnsupportedLimit"; case cudaErrorDuplicateVariableName: return "cudaErrorDuplicateVariableName"; case cudaErrorDuplicateTextureName: return "cudaErrorDuplicateTextureName"; case cudaErrorDuplicateSurfaceName: return "cudaErrorDuplicateSurfaceName"; case cudaErrorDevicesUnavailable: return "cudaErrorDevicesUnavailable"; case cudaErrorInvalidKernelImage: return "cudaErrorInvalidKernelImage"; case cudaErrorNoKernelImageForDevice: return "cudaErrorNoKernelImageForDevice"; case cudaErrorIncompatibleDriverContext: return "cudaErrorIncompatibleDriverContext"; case cudaErrorPeerAccessAlreadyEnabled: return "cudaErrorPeerAccessAlreadyEnabled"; case cudaErrorPeerAccessNotEnabled: return "cudaErrorPeerAccessNotEnabled"; case cudaErrorDeviceAlreadyInUse: return "cudaErrorDeviceAlreadyInUse"; case cudaErrorProfilerDisabled: return "cudaErrorProfilerDisabled"; case cudaErrorProfilerNotInitialized: return "cudaErrorProfilerNotInitialized"; case cudaErrorProfilerAlreadyStarted: return "cudaErrorProfilerAlreadyStarted"; case cudaErrorProfilerAlreadyStopped: return "cudaErrorProfilerAlreadyStopped"; #if __CUDA_API_VERSION >= 0x4000 case cudaErrorAssert: return "cudaErrorAssert"; case cudaErrorTooManyPeers: return "cudaErrorTooManyPeers"; case cudaErrorHostMemoryAlreadyRegistered: return "cudaErrorHostMemoryAlreadyRegistered"; case cudaErrorHostMemoryNotRegistered: return "cudaErrorHostMemoryNotRegistered"; #endif case cudaErrorStartupFailure: return "cudaErrorStartupFailure"; case cudaErrorApiFailureBase: return "cudaErrorApiFailureBase"; } return "<unknown>"; }
5fff7cd614a9a0bff472b3a20c7e22e1cf0ef4f3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "stencil_1D.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *in = NULL; hipMalloc(&in, XSIZE*YSIZE); int *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int dim = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( stencil_1D), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,dim); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( stencil_1D), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,dim); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( stencil_1D), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,dim); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5fff7cd614a9a0bff472b3a20c7e22e1cf0ef4f3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "stencil_1D.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); int *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int dim = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); stencil_1D<<<gridBlock,threadBlock>>>(in,out,dim); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { stencil_1D<<<gridBlock,threadBlock>>>(in,out,dim); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { stencil_1D<<<gridBlock,threadBlock>>>(in,out,dim); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8008692d55d239d96756db73c57b5ded42c9b40b.hip
// !!! This is a file automatically generated by hipify!!! // // auto-generated by op2.m on 29-Oct-2012 09:32:03 // // header #include "op_lib_cpp.h" #include "op_cuda_rt_support.h" #include "op_cuda_reduction.h" // global constants #ifndef MAX_CONST_SIZE #define MAX_CONST_SIZE 128 #endif __constant__ double gam; __constant__ double gm1; __constant__ double cfl; __constant__ double eps; __constant__ double mach; __constant__ double alpha; __constant__ double qinf[4]; void op_decl_const_char(int dim, char const *type, int size, char *dat, char const *name) { if (!strcmp(name,"gam")) { cutilSafeCall(hipMemcpyToSymbol(gam, dat, dim*size)); } else if (!strcmp(name,"gm1")) { cutilSafeCall(hipMemcpyToSymbol(gm1, dat, dim*size)); } else if (!strcmp(name,"cfl")) { cutilSafeCall(hipMemcpyToSymbol(cfl, dat, dim*size)); } else if (!strcmp(name,"eps")) { cutilSafeCall(hipMemcpyToSymbol(eps, dat, dim*size)); } else if (!strcmp(name,"mach")) { cutilSafeCall(hipMemcpyToSymbol(mach, dat, dim*size)); } else if (!strcmp(name,"alpha")) { cutilSafeCall(hipMemcpyToSymbol(alpha, dat, dim*size)); } else if (!strcmp(name,"qinf")) { cutilSafeCall(hipMemcpyToSymbol(qinf, dat, dim*size)); } else { printf("error: unknown const name\n"); exit(1); } } // user kernel files #include "save_soln_kernel.cu" #include "adt_calc_kernel.hip" #include "res_calc_kernel.cu" #include "bres_calc_kernel.cu" #include "update_kernel.hip"
8008692d55d239d96756db73c57b5ded42c9b40b.cu
// // auto-generated by op2.m on 29-Oct-2012 09:32:03 // // header #include "op_lib_cpp.h" #include "op_cuda_rt_support.h" #include "op_cuda_reduction.h" // global constants #ifndef MAX_CONST_SIZE #define MAX_CONST_SIZE 128 #endif __constant__ double gam; __constant__ double gm1; __constant__ double cfl; __constant__ double eps; __constant__ double mach; __constant__ double alpha; __constant__ double qinf[4]; void op_decl_const_char(int dim, char const *type, int size, char *dat, char const *name) { if (!strcmp(name,"gam")) { cutilSafeCall(cudaMemcpyToSymbol(gam, dat, dim*size)); } else if (!strcmp(name,"gm1")) { cutilSafeCall(cudaMemcpyToSymbol(gm1, dat, dim*size)); } else if (!strcmp(name,"cfl")) { cutilSafeCall(cudaMemcpyToSymbol(cfl, dat, dim*size)); } else if (!strcmp(name,"eps")) { cutilSafeCall(cudaMemcpyToSymbol(eps, dat, dim*size)); } else if (!strcmp(name,"mach")) { cutilSafeCall(cudaMemcpyToSymbol(mach, dat, dim*size)); } else if (!strcmp(name,"alpha")) { cutilSafeCall(cudaMemcpyToSymbol(alpha, dat, dim*size)); } else if (!strcmp(name,"qinf")) { cutilSafeCall(cudaMemcpyToSymbol(qinf, dat, dim*size)); } else { printf("error: unknown const name\n"); exit(1); } } // user kernel files #include "save_soln_kernel.cu" #include "adt_calc_kernel.cu" #include "res_calc_kernel.cu" #include "bres_calc_kernel.cu" #include "update_kernel.cu"
5312cebea18ce0099b1ff59f20db26f1d2295e68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include <assert.h> #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #define TILE_SIZE 1024 // You can use any other block size you wish. #define BLOCK_SIZE 256 // Kernel Functions //Exclusive Scan __global__ void Ex_Scan_Kernel(unsigned int *blockSum, unsigned int *output_Kernel, unsigned int *input_Kernel, int numElements) { __shared__ unsigned int scan_array[2*BLOCK_SIZE]; unsigned int i = 2 * blockIdx.x * BLOCK_SIZE; unsigned int t = threadIdx.x; unsigned int ls_element; if (i + t < numElements) { scan_array[t] = input_Kernel[i+t]; } else scan_array[t] = 0; if (i + BLOCK_SIZE + t < numElements) scan_array[BLOCK_SIZE + t] = input_Kernel[i + BLOCK_SIZE + t]; else scan_array[BLOCK_SIZE + t] = 0; //Save the last element of each block for further use __syncthreads(); if(t==0) ls_element = scan_array[2*blockDim.x-1]; __syncthreads(); //Start reduction int stride = 1; while(stride <= BLOCK_SIZE) { int index = (threadIdx.x+1)*stride*2 - 1; if(index < 2*BLOCK_SIZE) scan_array[index] += scan_array[index-stride]; stride = stride*2; __syncthreads(); } //Start postscan if (t == 0){ scan_array[2*blockDim.x-1] = 0; if(blockIdx.x ==0) blockSum[0] = 0; } stride = BLOCK_SIZE; while(stride > 0) { int index = (threadIdx.x+1)*stride*2 - 1; if(index < 2* BLOCK_SIZE) { float temp = scan_array[index]; scan_array[index] += scan_array[index-stride]; scan_array[index-stride] = temp; } stride = stride / 2; __syncthreads(); } //Write the scan result back to the array if(i + t < numElements){ output_Kernel[i + t] = scan_array[t]; } else{ output_Kernel[i + t] = 0; } if(i + BLOCK_SIZE + t < numElements){ output_Kernel[i + BLOCK_SIZE + t] = scan_array[t + BLOCK_SIZE]; } else{ output_Kernel[i + BLOCK_SIZE + t] = 0; } if (t == 0) { blockSum[blockIdx.x] = scan_array[2*blockDim.x-1] + ls_element; } } //Vector Addition for the final result __global__ void vector_addition(unsigned int *outArray, unsigned int *inArray, int numElements) { __shared__ unsigned int addition; int index = 2* blockIdx.x * BLOCK_SIZE + threadIdx.x; if(threadIdx.x==0) { addition = inArray[blockIdx.x]; } __syncthreads(); if(index < numElements) { outArray[index] += addition; outArray[index + BLOCK_SIZE] += addition; } } //Make a recursive function of exclusive scan void Recursive_Ex_Scan(unsigned int *outArray, int numElements) { unsigned int *blockSum; int GRID_SIZE_Re = ceil(numElements/(2.0*BLOCK_SIZE)); hipMalloc( (void**) &blockSum, sizeof(unsigned int) * (GRID_SIZE_Re+1)); //Exclusive Scan the array hipLaunchKernelGGL(( Ex_Scan_Kernel), dim3(GRID_SIZE_Re), dim3(BLOCK_SIZE), 0, 0, blockSum, outArray, outArray, numElements); //Do recursive exclusive scan and return the final result if(GRID_SIZE_Re > 1) { Recursive_Ex_Scan(blockSum, GRID_SIZE_Re); hipLaunchKernelGGL(( vector_addition), dim3(GRID_SIZE_Re) , dim3(BLOCK_SIZE), 0, 0, outArray,blockSum,numElements); } hipFree(blockSum); } // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. Make your own kernel // functions in this file, and then call them from here. // Note that the code has been modified to ensure numElements is a multiple // of TILE_SIZE void prescanArray(unsigned int *outArray, unsigned int *inArray, int numElements) { int GRID_SIZE = ceil(numElements/(2.0*BLOCK_SIZE)); //int num = numElements; unsigned int *blockSum; hipMalloc( (void**) &blockSum, sizeof(unsigned int) * GRID_SIZE); hipLaunchKernelGGL(( Ex_Scan_Kernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, blockSum, outArray, inArray, numElements); if(GRID_SIZE > 1) { Recursive_Ex_Scan(blockSum, GRID_SIZE); hipLaunchKernelGGL(( vector_addition), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, outArray, blockSum, numElements); } hipFree(blockSum); } // **===-----------------------------------------------------------===** #endif // _PRESCAN_CU_
5312cebea18ce0099b1ff59f20db26f1d2295e68.cu
#ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include <assert.h> #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #define TILE_SIZE 1024 // You can use any other block size you wish. #define BLOCK_SIZE 256 // Kernel Functions //Exclusive Scan __global__ void Ex_Scan_Kernel(unsigned int *blockSum, unsigned int *output_Kernel, unsigned int *input_Kernel, int numElements) { __shared__ unsigned int scan_array[2*BLOCK_SIZE]; unsigned int i = 2 * blockIdx.x * BLOCK_SIZE; unsigned int t = threadIdx.x; unsigned int ls_element; if (i + t < numElements) { scan_array[t] = input_Kernel[i+t]; } else scan_array[t] = 0; if (i + BLOCK_SIZE + t < numElements) scan_array[BLOCK_SIZE + t] = input_Kernel[i + BLOCK_SIZE + t]; else scan_array[BLOCK_SIZE + t] = 0; //Save the last element of each block for further use __syncthreads(); if(t==0) ls_element = scan_array[2*blockDim.x-1]; __syncthreads(); //Start reduction int stride = 1; while(stride <= BLOCK_SIZE) { int index = (threadIdx.x+1)*stride*2 - 1; if(index < 2*BLOCK_SIZE) scan_array[index] += scan_array[index-stride]; stride = stride*2; __syncthreads(); } //Start postscan if (t == 0){ scan_array[2*blockDim.x-1] = 0; if(blockIdx.x ==0) blockSum[0] = 0; } stride = BLOCK_SIZE; while(stride > 0) { int index = (threadIdx.x+1)*stride*2 - 1; if(index < 2* BLOCK_SIZE) { float temp = scan_array[index]; scan_array[index] += scan_array[index-stride]; scan_array[index-stride] = temp; } stride = stride / 2; __syncthreads(); } //Write the scan result back to the array if(i + t < numElements){ output_Kernel[i + t] = scan_array[t]; } else{ output_Kernel[i + t] = 0; } if(i + BLOCK_SIZE + t < numElements){ output_Kernel[i + BLOCK_SIZE + t] = scan_array[t + BLOCK_SIZE]; } else{ output_Kernel[i + BLOCK_SIZE + t] = 0; } if (t == 0) { blockSum[blockIdx.x] = scan_array[2*blockDim.x-1] + ls_element; } } //Vector Addition for the final result __global__ void vector_addition(unsigned int *outArray, unsigned int *inArray, int numElements) { __shared__ unsigned int addition; int index = 2* blockIdx.x * BLOCK_SIZE + threadIdx.x; if(threadIdx.x==0) { addition = inArray[blockIdx.x]; } __syncthreads(); if(index < numElements) { outArray[index] += addition; outArray[index + BLOCK_SIZE] += addition; } } //Make a recursive function of exclusive scan void Recursive_Ex_Scan(unsigned int *outArray, int numElements) { unsigned int *blockSum; int GRID_SIZE_Re = ceil(numElements/(2.0*BLOCK_SIZE)); cudaMalloc( (void**) &blockSum, sizeof(unsigned int) * (GRID_SIZE_Re+1)); //Exclusive Scan the array Ex_Scan_Kernel<<<GRID_SIZE_Re, BLOCK_SIZE>>>(blockSum, outArray, outArray, numElements); //Do recursive exclusive scan and return the final result if(GRID_SIZE_Re > 1) { Recursive_Ex_Scan(blockSum, GRID_SIZE_Re); vector_addition<<<GRID_SIZE_Re , BLOCK_SIZE>>>(outArray,blockSum,numElements); } cudaFree(blockSum); } // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. Make your own kernel // functions in this file, and then call them from here. // Note that the code has been modified to ensure numElements is a multiple // of TILE_SIZE void prescanArray(unsigned int *outArray, unsigned int *inArray, int numElements) { int GRID_SIZE = ceil(numElements/(2.0*BLOCK_SIZE)); //int num = numElements; unsigned int *blockSum; cudaMalloc( (void**) &blockSum, sizeof(unsigned int) * GRID_SIZE); Ex_Scan_Kernel<<<GRID_SIZE, BLOCK_SIZE>>>(blockSum, outArray, inArray, numElements); if(GRID_SIZE > 1) { Recursive_Ex_Scan(blockSum, GRID_SIZE); vector_addition<<<GRID_SIZE, BLOCK_SIZE>>>(outArray, blockSum, numElements); } cudaFree(blockSum); } // **===-----------------------------------------------------------===** #endif // _PRESCAN_CU_
09ac1ab47543f10b7f3ded37adbfe670118b00b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <optix.h> #include "optixPathTracer.h" #include "random.h" #include <sutil/vec_math.h> #include <stdio.h> extern "C" { __constant__ Params params; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ struct RadiancePRD { // TODO: move some state directly into payload registers? float3 emitted; float3 radiance; float3 attenuation; float3 origin; float3 direction; uint32_t seed; int32_t countEmitted; int32_t done; int32_t pad; }; struct Onb { __forceinline__ __device__ Onb(const float3& normal) { m_normal = normal; if( fabs(m_normal.x) > fabs(m_normal.z) ) { m_binormal.x = -m_normal.y; m_binormal.y = m_normal.x; m_binormal.z = 0; } else { m_binormal.x = 0; m_binormal.y = -m_normal.z; m_binormal.z = m_normal.y; } m_binormal = normalize(m_binormal); m_tangent = cross( m_binormal, m_normal ); } __forceinline__ __device__ void inverse_transform(float3& p) const { p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal; } float3 m_tangent; float3 m_binormal; float3 m_normal; }; //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ #define print_x 512 #define print_y 384 #define print_pixel(...) \ { \ const uint3 idx__ = optixGetLaunchIndex(); \ if( idx__.x == print_y && idx__.y == print_x ) \ printf( __VA_ARGS__ ); \ } static __forceinline__ __device__ void* unpackPointer( uint32_t i0, uint32_t i1 ) { const uint64_t uptr = static_cast<uint64_t>( i0 ) << 32 | i1; void* ptr = reinterpret_cast<void*>( uptr ); return ptr; } static __forceinline__ __device__ void packPointer( void* ptr, uint32_t& i0, uint32_t& i1 ) { const uint64_t uptr = reinterpret_cast<uint64_t>( ptr ); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } static __forceinline__ __device__ RadiancePRD* getPRD() { const uint32_t u0 = optixGetPayload_0(); const uint32_t u1 = optixGetPayload_1(); return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) ); } static __forceinline__ __device__ void setPayloadOcclusion( bool occluded ) { optixSetPayload_0( static_cast<uint32_t>( occluded ) ); } static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p) { // Uniformly sample disk. const float r = sqrtf( u1 ); const float phi = 2.0f*M_PIf * u2; p.x = r * cosf( phi ); p.y = r * sinf( phi ); // Project up to hemisphere. p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) ); } static __forceinline__ __device__ void traceRadiance( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RadiancePRD* prd ) { // TODO: deduce stride from num ray-types passed in params uint32_t u0, u1; packPointer( prd, u0, u1 ); optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } static __forceinline__ __device__ bool traceOcclusion( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax ) { uint32_t occluded = 0u; optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex occluded ); return occluded; } __forceinline__ __device__ uchar4 make_color( const float3& c ) { const float gamma = 2.2f; return make_uchar4( static_cast<uint8_t>( powf( clamp( c.x, 0.0f, 1.0f ), 1.0/gamma )*255.0f ), static_cast<uint8_t>( powf( clamp( c.y, 0.0f, 1.0f ), 1.0/gamma )*255.0f ), static_cast<uint8_t>( powf( clamp( c.z, 0.0f, 1.0f ), 1.0/gamma )*255.0f ), 255u ); } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__rg() { const int w = params.width; const int h = params.height; const float3 eye = params.eye; const float3 U = params.U; const float3 V = params.V; const float3 W = params.W; const uint3 idx = optixGetLaunchIndex(); const int subframe_index = params.subframe_index; uint32_t seed = tea<4>( idx.y*w + idx.x, subframe_index ); float3 result = make_float3( 0.0f ); int i = params.samples_per_launch; do { const float2 subpixel_jitter = make_float2( rnd( seed )-0.5f, rnd( seed )-0.5f ); const float2 d = 2.0f * make_float2( ( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ), ( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h ) ) - 1.0f; float3 ray_direction = normalize(d.x*U + d.y*V + W); float3 ray_origin = eye; RadiancePRD prd; prd.emitted = make_float3(0.f); prd.radiance = make_float3(0.f); prd.attenuation = make_float3(1.f); prd.countEmitted = true; prd.done = false; prd.seed = seed; int depth = 0; for( ;; ) { traceRadiance( params.handle, ray_origin, ray_direction, 0.01f, // tmin // TODO: smarter offset 1e16f, // tmax &prd ); result += prd.emitted; result += prd.radiance * prd.attenuation; if( prd.done || depth >= 3 ) // TODO RR, variable for depth break; ray_origin = prd.origin; ray_direction = prd.direction; ++depth; } } while( --i ); const uint3 launch_index = optixGetLaunchIndex(); const uint32_t image_index = launch_index.y * params.width + launch_index.x; float3 accum_color = result / static_cast<float>( params.samples_per_launch ); if( subframe_index > 0 ) { const float a = 1.0f / static_cast<float>( subframe_index+1 ); const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]); accum_color = lerp( accum_color_prev, accum_color, a ); } params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f); params.frame_buffer[ image_index ] = make_color ( accum_color ); } extern "C" __global__ void __miss__radiance() { MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); RadiancePRD* prd = getPRD(); prd->radiance = make_float3( rt_data->bg_color ); prd->done = true; } extern "C" __global__ void __closesthit__occlusion() { setPayloadOcclusion( true ); } extern "C" __global__ void __closesthit__radiance() { HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); const int prim_idx = optixGetPrimitiveIndex(); const float3 ray_dir = optixGetWorldRayDirection(); const int vert_idx_offset = prim_idx*3; const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] ); const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] ); const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] ); const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) ); const float3 N = faceforward( N_0, -ray_dir, N_0 ); const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir; RadiancePRD* prd = getPRD(); if( prd->countEmitted ) prd->emitted = rt_data->emission_color; else prd->emitted = make_float3( 0.0f ); uint32_t seed = prd->seed; { const float z1 = rnd(seed); const float z2 = rnd(seed); float3 w_in; cosine_sample_hemisphere( z1, z2, w_in ); Onb onb( N ); onb.inverse_transform( w_in ); prd->direction = w_in; prd->origin = P; prd->attenuation *= rt_data->diffuse_color; prd->countEmitted = false; } const float z1 = rnd(seed); const float z2 = rnd(seed); prd->seed = seed; ParallelogramLight light = params.light; const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2; // Calculate properties of light sample (for area based pdf) const float Ldist = length(light_pos - P ); const float3 L = normalize(light_pos - P ); const float nDl = dot( N, L ); const float LnDl = -dot( light.normal, L ); float weight = 0.0f; if( nDl > 0.0f && LnDl > 0.0f ) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin Ldist - 0.01f // tmax ); if( !occluded ) { const float A = length(cross(light.v1, light.v2)); weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist); } } prd->radiance += light.emission * weight; }
09ac1ab47543f10b7f3ded37adbfe670118b00b5.cu
// // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <optix.h> #include "optixPathTracer.h" #include "random.h" #include <sutil/vec_math.h> #include <stdio.h> extern "C" { __constant__ Params params; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ struct RadiancePRD { // TODO: move some state directly into payload registers? float3 emitted; float3 radiance; float3 attenuation; float3 origin; float3 direction; uint32_t seed; int32_t countEmitted; int32_t done; int32_t pad; }; struct Onb { __forceinline__ __device__ Onb(const float3& normal) { m_normal = normal; if( fabs(m_normal.x) > fabs(m_normal.z) ) { m_binormal.x = -m_normal.y; m_binormal.y = m_normal.x; m_binormal.z = 0; } else { m_binormal.x = 0; m_binormal.y = -m_normal.z; m_binormal.z = m_normal.y; } m_binormal = normalize(m_binormal); m_tangent = cross( m_binormal, m_normal ); } __forceinline__ __device__ void inverse_transform(float3& p) const { p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal; } float3 m_tangent; float3 m_binormal; float3 m_normal; }; //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ #define print_x 512 #define print_y 384 #define print_pixel(...) \ { \ const uint3 idx__ = optixGetLaunchIndex(); \ if( idx__.x == print_y && idx__.y == print_x ) \ printf( __VA_ARGS__ ); \ } static __forceinline__ __device__ void* unpackPointer( uint32_t i0, uint32_t i1 ) { const uint64_t uptr = static_cast<uint64_t>( i0 ) << 32 | i1; void* ptr = reinterpret_cast<void*>( uptr ); return ptr; } static __forceinline__ __device__ void packPointer( void* ptr, uint32_t& i0, uint32_t& i1 ) { const uint64_t uptr = reinterpret_cast<uint64_t>( ptr ); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } static __forceinline__ __device__ RadiancePRD* getPRD() { const uint32_t u0 = optixGetPayload_0(); const uint32_t u1 = optixGetPayload_1(); return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) ); } static __forceinline__ __device__ void setPayloadOcclusion( bool occluded ) { optixSetPayload_0( static_cast<uint32_t>( occluded ) ); } static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p) { // Uniformly sample disk. const float r = sqrtf( u1 ); const float phi = 2.0f*M_PIf * u2; p.x = r * cosf( phi ); p.y = r * sinf( phi ); // Project up to hemisphere. p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) ); } static __forceinline__ __device__ void traceRadiance( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RadiancePRD* prd ) { // TODO: deduce stride from num ray-types passed in params uint32_t u0, u1; packPointer( prd, u0, u1 ); optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } static __forceinline__ __device__ bool traceOcclusion( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax ) { uint32_t occluded = 0u; optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex occluded ); return occluded; } __forceinline__ __device__ uchar4 make_color( const float3& c ) { const float gamma = 2.2f; return make_uchar4( static_cast<uint8_t>( powf( clamp( c.x, 0.0f, 1.0f ), 1.0/gamma )*255.0f ), static_cast<uint8_t>( powf( clamp( c.y, 0.0f, 1.0f ), 1.0/gamma )*255.0f ), static_cast<uint8_t>( powf( clamp( c.z, 0.0f, 1.0f ), 1.0/gamma )*255.0f ), 255u ); } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__rg() { const int w = params.width; const int h = params.height; const float3 eye = params.eye; const float3 U = params.U; const float3 V = params.V; const float3 W = params.W; const uint3 idx = optixGetLaunchIndex(); const int subframe_index = params.subframe_index; uint32_t seed = tea<4>( idx.y*w + idx.x, subframe_index ); float3 result = make_float3( 0.0f ); int i = params.samples_per_launch; do { const float2 subpixel_jitter = make_float2( rnd( seed )-0.5f, rnd( seed )-0.5f ); const float2 d = 2.0f * make_float2( ( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ), ( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h ) ) - 1.0f; float3 ray_direction = normalize(d.x*U + d.y*V + W); float3 ray_origin = eye; RadiancePRD prd; prd.emitted = make_float3(0.f); prd.radiance = make_float3(0.f); prd.attenuation = make_float3(1.f); prd.countEmitted = true; prd.done = false; prd.seed = seed; int depth = 0; for( ;; ) { traceRadiance( params.handle, ray_origin, ray_direction, 0.01f, // tmin // TODO: smarter offset 1e16f, // tmax &prd ); result += prd.emitted; result += prd.radiance * prd.attenuation; if( prd.done || depth >= 3 ) // TODO RR, variable for depth break; ray_origin = prd.origin; ray_direction = prd.direction; ++depth; } } while( --i ); const uint3 launch_index = optixGetLaunchIndex(); const uint32_t image_index = launch_index.y * params.width + launch_index.x; float3 accum_color = result / static_cast<float>( params.samples_per_launch ); if( subframe_index > 0 ) { const float a = 1.0f / static_cast<float>( subframe_index+1 ); const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]); accum_color = lerp( accum_color_prev, accum_color, a ); } params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f); params.frame_buffer[ image_index ] = make_color ( accum_color ); } extern "C" __global__ void __miss__radiance() { MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); RadiancePRD* prd = getPRD(); prd->radiance = make_float3( rt_data->bg_color ); prd->done = true; } extern "C" __global__ void __closesthit__occlusion() { setPayloadOcclusion( true ); } extern "C" __global__ void __closesthit__radiance() { HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); const int prim_idx = optixGetPrimitiveIndex(); const float3 ray_dir = optixGetWorldRayDirection(); const int vert_idx_offset = prim_idx*3; const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] ); const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] ); const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] ); const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) ); const float3 N = faceforward( N_0, -ray_dir, N_0 ); const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir; RadiancePRD* prd = getPRD(); if( prd->countEmitted ) prd->emitted = rt_data->emission_color; else prd->emitted = make_float3( 0.0f ); uint32_t seed = prd->seed; { const float z1 = rnd(seed); const float z2 = rnd(seed); float3 w_in; cosine_sample_hemisphere( z1, z2, w_in ); Onb onb( N ); onb.inverse_transform( w_in ); prd->direction = w_in; prd->origin = P; prd->attenuation *= rt_data->diffuse_color; prd->countEmitted = false; } const float z1 = rnd(seed); const float z2 = rnd(seed); prd->seed = seed; ParallelogramLight light = params.light; const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2; // Calculate properties of light sample (for area based pdf) const float Ldist = length(light_pos - P ); const float3 L = normalize(light_pos - P ); const float nDl = dot( N, L ); const float LnDl = -dot( light.normal, L ); float weight = 0.0f; if( nDl > 0.0f && LnDl > 0.0f ) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin Ldist - 0.01f // tmax ); if( !occluded ) { const float A = length(cross(light.v1, light.v2)); weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist); } } prd->radiance += light.emission * weight; }
a0e72b2f0b42669eb22847320acc7bbe79fde460.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex intega(int a) { return hipComplex(a,0.0); } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex bi(3.14159265484898,0.0); const float scale = 1.0; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(0.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex nom = expc(tir*bi*ai*q); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (2.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */ cue=nom; cue = unity/cue; for(v=0;v<30;v++) { cue = cue - (cue*(intega(3) + intega(2) *cue- aon*intega(12)* cue*cue - intega(13)* cue*uon*cue + intega(21) *cue*cue*cue*cue + intega(6) *cue*cue*cue*cue*cue - intega(12) *cue*cue*cue*cue*cue*cue + intega(3)*cue*cue*cue*cue*cue*cue*cue))/((intega(-1) + cue) *(intega(-3) - intega(7)* cue+ intega(29)*cue*cue + intega(81)* cue*cue*cue - intega(24)* cue*cue*cue*cue - intega(60)* cue*cue*cue*cue*cue + intega(24)* cue*cue*cue*cue*cue*cue)); } double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); }
a0e72b2f0b42669eb22847320acc7bbe79fde460.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex intega(int a) { return cuComplex(a,0.0); } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex bi(3.14159265484898,0.0); const float scale = 1.0; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(0.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex nom = expc(tir*bi*ai*q); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (2.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */ cue=nom; cue = unity/cue; for(v=0;v<30;v++) { cue = cue - (cue*(intega(3) + intega(2) *cue- aon*intega(12)* cue*cue - intega(13)* cue*uon*cue + intega(21) *cue*cue*cue*cue + intega(6) *cue*cue*cue*cue*cue - intega(12) *cue*cue*cue*cue*cue*cue + intega(3)*cue*cue*cue*cue*cue*cue*cue))/((intega(-1) + cue) *(intega(-3) - intega(7)* cue+ intega(29)*cue*cue + intega(81)* cue*cue*cue - intega(24)* cue*cue*cue*cue - intega(60)* cue*cue*cue*cue*cue + intega(24)* cue*cue*cue*cue*cue*cue)); } double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); }
e42cc20d29cc005227f7f3aa0b73cc200450c4a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp" #include "HugeCTR/include/utils.cuh" #include "cub/hipcub/hipcub.hpp" #include "cub/hipcub/hipcub.hpp" namespace HugeCTR { namespace { __global__ void value_count_kernel_2(int nnz, const uint32_t *new_hash_value_flag, const uint32_t *hash_value_flag_sumed, uint32_t *hash_value_index_index, uint32_t *counter) { for (int gid = blockIdx.x * blockDim.x + threadIdx.x; gid < nnz; gid += blockDim.x * gridDim.x) { uint32_t flag = new_hash_value_flag[gid]; if (flag == 1) { hash_value_index_index[hash_value_flag_sumed[gid] - 1] = gid; } } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { *counter = hash_value_flag_sumed[nnz - 1]; hash_value_index_index[*counter] = nnz; } } // expand sample id by row_offset template <typename TypeKey> __global__ void sample_id_expand_kernel(int batch_size, int slot_num, const TypeKey *row_offset, TypeKey *sample_id) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < (batch_size * slot_num)) { TypeKey offset = row_offset[gid]; int value_num = row_offset[gid + 1] - offset; for (int i = 0; i < value_num; i++) { sample_id[offset + i] = gid; } } } __global__ void value_count_kernel_1(int nnz, const size_t *hash_value_index_sort, uint32_t *new_hash_value_flag) { for (int gid = blockIdx.x * blockDim.x + threadIdx.x; gid < nnz; gid += blockDim.x * gridDim.x) { size_t cur_value = hash_value_index_sort[gid]; if (gid > 0) { size_t former_value = hash_value_index_sort[gid - 1]; // decide if this is the start of a group(the elements in this group have the same // hash_value_index_sort) if (cur_value != former_value) { new_hash_value_flag[gid] = 1; } else { new_hash_value_flag[gid] = 0; } } else { // gid == 0 new_hash_value_flag[gid] = 1; } } } // calculate weights update value(deltaw) by adam opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_adam_kernel_global(uint32_t hash_value_index_count_num, int embedding_vec_size, const AdamOptHyperParams<TypeEmbeddingComp> adam, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float mi = TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.m_ptr[feature_index]) + (1.0f - adam.beta1) * gi / adam.beta1; float vi = TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.v_ptr[feature_index]) + (1.0f - adam.beta2) * gi * gi / adam.beta2; adam.m_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mi); adam.v_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(vi); } } template <typename TypeEmbeddingComp> __global__ void adam_update_kernel_global(int embedding_vec_size, size_t table_size, // vocabulary size / factor const AdamOptHyperParams<TypeEmbeddingComp> adam, float alpha_t, float *hash_table_value) { const int TILE_SIZE = blockDim.x * gridDim.x; for (size_t feature_index = blockIdx.x * blockDim.x + threadIdx.x; feature_index < table_size * embedding_vec_size; feature_index += TILE_SIZE) { float mi = adam.beta1 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.m_ptr[feature_index]); float vi = adam.beta2 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.v_ptr[feature_index]); adam.m_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mi); adam.v_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(vi); float weight_diff = -alpha_t * mi / (sqrtf(vi) + adam.epsilon); hash_table_value[feature_index] += weight_diff; } } // calculate weights update value(deltaw) by momentum_sgd opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_momentum_sgd_kernel_global( uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const MomentumSGDOptHyperParams<TypeEmbeddingComp> momentum, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float mo = TypeConvertFunc<float, TypeEmbeddingComp>::convert(momentum.momentum_ptr[feature_index]) - lr * gi / momentum.factor; momentum.momentum_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mo); } } template <typename TypeEmbeddingComp> __global__ void momentum_sgd_update_kernel_global( int embedding_vec_size, size_t table_size, // vocabulary size / factor const MomentumSGDOptHyperParams<TypeEmbeddingComp> momentum, float *hash_table_value) { const int TILE_SIZE = blockDim.x * gridDim.x; for (size_t feature_index = blockIdx.x * blockDim.x + threadIdx.x; feature_index < table_size * embedding_vec_size; feature_index += TILE_SIZE) { float mo = TypeConvertFunc<float, TypeEmbeddingComp>::convert(momentum.momentum_ptr[feature_index]); mo *= momentum.factor; hash_table_value[feature_index] += mo; momentum.momentum_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mo); } } template <typename TypeEmbeddingComp> __global__ void nesterov_global_update_kernel_global( int embedding_vec_size, size_t table_size, // vocabulary size / factor const NesterovOptHyperParams<TypeEmbeddingComp> nesterov, float *hash_table_value) { const int TILE_SIZE = blockDim.x * gridDim.x; for (size_t feature_index = blockIdx.x * blockDim.x + threadIdx.x; feature_index < table_size * embedding_vec_size; feature_index += TILE_SIZE) { float accm = TypeConvertFunc<float, TypeEmbeddingComp>::convert(nesterov.accm_ptr[feature_index]); accm *= nesterov.mu; nesterov.accm_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(accm); hash_table_value[feature_index] += accm * nesterov.mu; } } // calculate weights update value(deltaw) by nesterov opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void nesterov_local_update_kernel_global( uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const NesterovOptHyperParams<TypeEmbeddingComp> nesterov, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, float *hash_table_value, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float accm = TypeConvertFunc<float, TypeEmbeddingComp>::convert(nesterov.accm_ptr[feature_index]); accm -= lr * gi; nesterov.accm_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(accm); hash_table_value[feature_index] -= (1 + nesterov.mu) * (lr * gi); } } template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_sgd_kernel_global(uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, float *hash_table_value, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // update size_t value_index = hash_value_index_sort[offset]; size_t feature_index = value_index * embedding_vec_size + tid; hash_table_value[feature_index] -= lr * gi; } } // calculate weights update value(deltaw) by adam opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_adam_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size, const AdamOptHyperParams<TypeEmbeddingComp> adam, float alpha_t, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, size_t *deltaw_hash_value_index, float *deltaw, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float mi = adam.beta1 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.m_ptr[feature_index]) + (1.0f - adam.beta1) * gi; float vi = adam.beta2 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.v_ptr[feature_index]) + (1.0f - adam.beta2) * gi * gi; adam.m_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mi); adam.v_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(vi); float weight_diff = -alpha_t * mi / (sqrtf(vi) + adam.epsilon); // save weights diff deltaw[bid * embedding_vec_size + tid] = weight_diff; // save hash value_indexs(corresponding to deltaw) if (tid == 0) { deltaw_hash_value_index[bid] = row_index; } } } // calculate weights update value(deltaw) by momentum_sgd opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_momentum_sgd_kernel( uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const MomentumSGDOptHyperParams<TypeEmbeddingComp> momentum, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, size_t *deltaw_hash_value_index, float *deltaw, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float mo = momentum.factor * TypeConvertFunc<float, TypeEmbeddingComp>::convert( momentum.momentum_ptr[feature_index]) - lr * gi; momentum.momentum_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mo); // save weights diff deltaw[bid * embedding_vec_size + tid] = mo; // save hash value_indexs(corresponding to deltaw) if (tid == 0) { deltaw_hash_value_index[bid] = row_index; } } } // calculate weights update value(deltaw) by nesterov opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_nesterov_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const NesterovOptHyperParams<TypeEmbeddingComp> nesterov, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, size_t *deltaw_hash_value_index, float *deltaw, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float accm_old = TypeConvertFunc<float, TypeEmbeddingComp>::convert(nesterov.accm_ptr[feature_index]); float accm_new = nesterov.mu * accm_old - lr * gi; nesterov.accm_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(accm_new); float weight_diff = -nesterov.mu * accm_old + (1.0f + nesterov.mu) * accm_new; // save weights diff deltaw[bid * embedding_vec_size + tid] = weight_diff; // save hash value_indexs(corresponding to deltaw) if (tid == 0) { deltaw_hash_value_index[bid] = row_index; } } } template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_sgd_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, size_t *deltaw_hash_value_index, float *deltaw, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; float weight_diff = -lr * gi; // save weights diff deltaw[bid * embedding_vec_size + tid] = weight_diff; // save hash value_indexs(corresponding to deltaw) if (tid == 0) { deltaw_hash_value_index[bid] = row_index; } } } // update embedding table(weights) by deltaw __global__ void update_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size, const size_t *deltaw_hash_value_index, const float *deltaw, float *hash_table_value) { int tid = threadIdx.x; int bid = blockIdx.x; if ((bid < hash_value_index_count_num) && (tid < embedding_vec_size)) { size_t value_index = deltaw_hash_value_index[bid]; size_t feature_index = value_index * embedding_vec_size + tid; hash_table_value[feature_index] += deltaw[bid * embedding_vec_size + tid]; } } template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_sgd_atomic_kernel(int nnz, int embedding_vec_size, float lr_scale, const size_t *hash_value_index, const TypeKey *sample_ids, const TypeEmbeddingComp *wgrad, float *hash_table_value) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < nnz) { for (int key_id = bid; key_id < nnz; key_id += gridDim.x) { int sample_id = sample_ids[key_id]; float deltaw = -lr_scale * TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_id * embedding_vec_size + tid]); // atomic update size_t value_index = hash_value_index[key_id]; size_t feature_index = value_index * embedding_vec_size + tid; atomicAdd(&hash_table_value[feature_index], deltaw); } } } // only support LocalizedSlotSparseEmbeddingOneHot template <typename TypeEmbeddingComp> __global__ void opt_sgd_atomic_kernel(int nnz, int embedding_vec_size, float lr_scale, const size_t *hash_value_index, const TypeEmbeddingComp *wgrad, float *hash_table_value) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < nnz) { for (int key_id = bid; key_id < nnz; key_id += gridDim.x) { // for one-hot, the max_feature_per_slot is 1, so sample_id is equal to key_id float deltaw = -lr_scale * TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[key_id * embedding_vec_size + tid]); // atomic update size_t value_index = hash_value_index[key_id]; size_t feature_index = value_index * embedding_vec_size + tid; atomicAdd(&hash_table_value[feature_index], deltaw); } } } } // namespace template <typename TypeHashKey, typename TypeEmbeddingComp> void SparseEmbeddingFunctors::update_params( size_t batch_size, size_t slot_num, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, OptParams<TypeEmbeddingComp> &opt_params, size_t nnz, const Tensor2<TypeHashKey> &row_offset, Tensor2<size_t> &hash_value_index, Tensor2<TypeHashKey> &sample_id, Tensor2<TypeHashKey> &sample_id_sort, Tensor2<size_t> &hash_value_index_sort, Tensor2<uint32_t> &hash_value_index_count_offset, Tensor2<uint32_t> &new_hash_value_flag, Tensor2<uint32_t> &hash_value_flag_sumed, Tensor2<uint32_t> &hash_value_index_count_counter, Tensor2<void> &temp_storage_sort, Tensor2<void> &temp_storage_scan, const Tensor2<TypeEmbeddingComp> &wgrad, Tensor2<size_t> &deltaw_hash_value_index, Tensor2<float> &deltaw, Tensor2<float> &hash_table_value, size_t sm_count, hipStream_t stream) { if (slot_num == 0) { return; } size_t block_size, grid_size; try { // step1: expand sample IDs block_size = 64; grid_size = (batch_size * slot_num - 1) / block_size + 1; hipLaunchKernelGGL(( sample_id_expand_kernel), dim3(grid_size), dim3(block_size), 0, stream, batch_size, slot_num, row_offset.get_ptr(), sample_id.get_ptr()); if (opt_params.optimizer == Optimizer_t::SGD && opt_params.hyperparams.sgd.atomic_update) { // for SGD, do atomic update const size_t block_size = embedding_vec_size; const size_t grid_size = min(max(1ul, nnz), sm_count * 32); float lr_scale = opt_params.lr / opt_params.scaler; hipLaunchKernelGGL(( opt_sgd_atomic_kernel), dim3(grid_size), dim3(block_size), 0, stream, nnz, embedding_vec_size, lr_scale, hash_value_index.get_ptr(), sample_id.get_ptr(), wgrad.get_ptr(), hash_table_value.get_ptr()); } else { // step3: sort by hash_value_index int end_bit = static_cast<int>(log2(static_cast<float>(max_vocabulary_size_per_gpu))) + 1; size_t temp_storage_sort_size = temp_storage_sort.get_size_in_bytes(); CK_CUDA_THROW_(hipcub::DeviceRadixSort::SortPairs( temp_storage_sort.get_ptr(), temp_storage_sort_size, hash_value_index.get_ptr(), hash_value_index_sort.get_ptr(), sample_id.get_ptr(), sample_id_sort.get_ptr(), nnz, 0, end_bit, stream, false)); // step4: count the number for each unduplicated hash_value_index CK_CUDA_THROW_( hipMemsetAsync(hash_value_index_count_counter.get_ptr(), 0, sizeof(uint32_t), stream)); constexpr size_t max_grid_size = 384; block_size = 256; grid_size = min(max_grid_size, (nnz - 1) / block_size + 1); hipLaunchKernelGGL(( value_count_kernel_1), dim3(grid_size), dim3(block_size), 0, stream, nnz, hash_value_index_sort.get_ptr(), new_hash_value_flag.get_ptr()); // prefix_sum size_t temp_storage_scan_size = temp_storage_scan.get_size_in_bytes(); CK_CUDA_THROW_(hipcub::DeviceScan::InclusiveSum( temp_storage_scan.get_ptr(), temp_storage_scan_size, new_hash_value_flag.get_ptr(), hash_value_flag_sumed.get_ptr(), nnz, stream)); hipLaunchKernelGGL(( value_count_kernel_2), dim3(grid_size), dim3(block_size), 0, stream, nnz, new_hash_value_flag.get_ptr(), hash_value_flag_sumed.get_ptr(), hash_value_index_count_offset.get_ptr(), hash_value_index_count_counter.get_ptr()); uint32_t hash_hash_value_index_count_num = 0; // this async memcpy will not perform as a async operation because the host memory is not // a pinned memroy CK_CUDA_THROW_(hipMemcpyAsync(&hash_hash_value_index_count_num, hash_value_index_count_counter.get_ptr(), sizeof(uint32_t), hipMemcpyDeviceToHost, stream)); // step5: use optimizer method to compute deltaw, and record corresponding // deltaw_hash_value_index block_size = embedding_vec_size; grid_size = max(1, hash_hash_value_index_count_num); if (opt_params.global_update) { switch (opt_params.optimizer) { case Optimizer_t::Adam: // adam { float alpha_t = opt_params.lr * sqrt(1 - pow(opt_params.hyperparams.adam.beta2, opt_params.hyperparams.adam.times)) / (1 - pow(opt_params.hyperparams.adam.beta1, opt_params.hyperparams.adam.times)); // update target mi and vi hipLaunchKernelGGL(( opt_adam_kernel_global), dim3(grid_size), dim3(block_size), 0, stream, hash_hash_value_index_count_num, embedding_vec_size, opt_params.hyperparams.adam, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), opt_params.scaler); // all update according to the mi vi hipLaunchKernelGGL(( adam_update_kernel_global), dim3(1024), dim3(256), 0, stream, embedding_vec_size, max_vocabulary_size_per_gpu, opt_params.hyperparams.adam, alpha_t, hash_table_value.get_ptr()); break; } case Optimizer_t::MomentumSGD: // momentum sgd hipLaunchKernelGGL(( opt_momentum_sgd_kernel_global), dim3(grid_size), dim3(block_size), 0, stream, hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, opt_params.hyperparams.momentum, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), opt_params.scaler); hipLaunchKernelGGL(( momentum_sgd_update_kernel_global), dim3(1024), dim3(256), 0, stream, embedding_vec_size, max_vocabulary_size_per_gpu, opt_params.hyperparams.momentum, hash_table_value.get_ptr()); break; case Optimizer_t::Nesterov: // nesterov hipLaunchKernelGGL(( nesterov_global_update_kernel_global), dim3(1024), dim3(256), 0, stream, embedding_vec_size, max_vocabulary_size_per_gpu, opt_params.hyperparams.nesterov, hash_table_value.get_ptr()); hipLaunchKernelGGL(( nesterov_local_update_kernel_global), dim3(grid_size), dim3(block_size), 0, stream, hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, opt_params.hyperparams.nesterov, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), hash_table_value.get_ptr(), opt_params.scaler); break; case Optimizer_t::SGD: hipLaunchKernelGGL(( opt_sgd_kernel_global), dim3(grid_size), dim3(block_size), 0, stream, hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), hash_table_value.get_ptr(), opt_params.scaler); break; default: CK_THROW_(Error_t::WrongInput, "Error: Invalid opitimizer type"); } } else { switch (opt_params.optimizer) { case Optimizer_t::Adam: // adam { float alpha_t = opt_params.lr * sqrt(1 - pow(opt_params.hyperparams.adam.beta2, opt_params.hyperparams.adam.times)) / (1 - pow(opt_params.hyperparams.adam.beta1, opt_params.hyperparams.adam.times)); hipLaunchKernelGGL(( opt_adam_kernel), dim3(grid_size), dim3(block_size), 0, stream, hash_hash_value_index_count_num, embedding_vec_size, opt_params.hyperparams.adam, alpha_t, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), deltaw_hash_value_index.get_ptr(), deltaw.get_ptr(), opt_params.scaler); break; } case Optimizer_t::MomentumSGD: // momentum sgd hipLaunchKernelGGL(( opt_momentum_sgd_kernel), dim3(grid_size), dim3(block_size), 0, stream, hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, opt_params.hyperparams.momentum, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), deltaw_hash_value_index.get_ptr(), deltaw.get_ptr(), opt_params.scaler); break; case Optimizer_t::Nesterov: // nesterov hipLaunchKernelGGL(( opt_nesterov_kernel), dim3(grid_size), dim3(block_size), 0, stream, hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, opt_params.hyperparams.nesterov, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), deltaw_hash_value_index.get_ptr(), deltaw.get_ptr(), opt_params.scaler); break; case Optimizer_t::SGD: hipLaunchKernelGGL(( opt_sgd_kernel), dim3(grid_size), dim3(block_size), 0, stream, hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), deltaw_hash_value_index.get_ptr(), deltaw.get_ptr(), opt_params.scaler); break; default: CK_THROW_(Error_t::WrongInput, "Error: Invalid opitimizer type"); } // step6: update hash_table_value by deltaw block_size = embedding_vec_size; grid_size = max(1, hash_hash_value_index_count_num); hipLaunchKernelGGL(( update_kernel), dim3(grid_size), dim3(block_size), 0, stream, hash_hash_value_index_count_num, embedding_vec_size, deltaw_hash_value_index.get_ptr(), deltaw.get_ptr(), hash_table_value.get_ptr()); } // else } // else #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } catch (const std::runtime_error &rt_err) { std::cerr << rt_err.what() << std::endl; throw; } return; } template <typename TypeEmbeddingComp> void SparseEmbeddingFunctors::update_params(size_t embedding_vec_size, const OptParams<TypeEmbeddingComp> &opt_params, size_t nnz, const Tensor2<size_t> &hash_value_index, const Tensor2<TypeEmbeddingComp> &wgrad, Tensor2<float> &hash_table_value, size_t sm_count, hipStream_t stream) { try { if (opt_params.optimizer == Optimizer_t::SGD && opt_params.hyperparams.sgd.atomic_update) { const size_t grid_size = min(max(1ul, nnz), sm_count * 32); const size_t block_size = embedding_vec_size; float lr_scale = opt_params.lr / opt_params.scaler; // for one-hot, the sample_id is dedicated. hipLaunchKernelGGL(( opt_sgd_atomic_kernel), dim3(grid_size), dim3(block_size), 0, stream, nnz, embedding_vec_size, lr_scale, hash_value_index.get_ptr(), wgrad.get_ptr(), hash_table_value.get_ptr()); } else { CK_THROW_(Error_t::WrongInput, "Error: Invalid opitimizer type"); } } catch (const std::runtime_error &rt_err) { std::cerr << rt_err.what() << std::endl; throw; } return; } template void SparseEmbeddingFunctors::update_params<unsigned int, float>( size_t batch_size, size_t slot_num, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, OptParams<float> &opt_params, size_t nnz, const Tensor2<unsigned int> &row_offset, Tensor2<size_t> &hash_value_index, Tensor2<unsigned int> &sample_id, Tensor2<unsigned int> &sample_id_sort, Tensor2<size_t> &hash_value_index_sort, Tensor2<uint32_t> &hash_value_index_count_offset, Tensor2<uint32_t> &new_hash_value_flag, Tensor2<uint32_t> &hash_value_flag_sumed, Tensor2<uint32_t> &hash_value_index_count_counter, Tensor2<void> &temp_storage_sort, Tensor2<void> &temp_storage_scan, const Tensor2<float> &wgrad, Tensor2<size_t> &deltaw_hash_value_index, Tensor2<float> &deltaw, Tensor2<float> &hash_table_value, size_t sm_count, hipStream_t stream); template void SparseEmbeddingFunctors::update_params<long long, float>( size_t batch_size, size_t slot_num, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, OptParams<float> &opt_params, size_t nnz, const Tensor2<long long> &row_offset, Tensor2<size_t> &hash_value_index, Tensor2<long long> &sample_id, Tensor2<long long> &sample_id_sort, Tensor2<size_t> &hash_value_index_sort, Tensor2<uint32_t> &hash_value_index_count_offset, Tensor2<uint32_t> &new_hash_value_flag, Tensor2<uint32_t> &hash_value_flag_sumed, Tensor2<uint32_t> &hash_value_index_count_counter, Tensor2<void> &temp_storage_sort, Tensor2<void> &temp_storage_scan, const Tensor2<float> &wgrad, Tensor2<size_t> &deltaw_hash_value_index, Tensor2<float> &deltaw, Tensor2<float> &hash_table_value, size_t sm_count, hipStream_t stream); template void SparseEmbeddingFunctors::update_params<unsigned int, __half>( size_t batch_size, size_t slot_num, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, OptParams<__half> &opt_params, size_t nnz, const Tensor2<unsigned int> &row_offset, Tensor2<size_t> &hash_value_index, Tensor2<unsigned int> &sample_id, Tensor2<unsigned int> &sample_id_sort, Tensor2<size_t> &hash_value_index_sort, Tensor2<uint32_t> &hash_value_index_count_offset, Tensor2<uint32_t> &new_hash_value_flag, Tensor2<uint32_t> &hash_value_flag_sumed, Tensor2<uint32_t> &hash_value_index_count_counter, Tensor2<void> &temp_storage_sort, Tensor2<void> &temp_storage_scan, const Tensor2<__half> &wgrad, Tensor2<size_t> &deltaw_hash_value_index, Tensor2<float> &deltaw, Tensor2<float> &hash_table_value, size_t sm_count, hipStream_t stream); template void SparseEmbeddingFunctors::update_params<long long, __half>( size_t batch_size, size_t slot_num, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, OptParams<__half> &opt_params, size_t nnz, const Tensor2<long long> &row_offset, Tensor2<size_t> &hash_value_index, Tensor2<long long> &sample_id, Tensor2<long long> &sample_id_sort, Tensor2<size_t> &hash_value_index_sort, Tensor2<uint32_t> &hash_value_index_count_offset, Tensor2<uint32_t> &new_hash_value_flag, Tensor2<uint32_t> &hash_value_flag_sumed, Tensor2<uint32_t> &hash_value_index_count_counter, Tensor2<void> &temp_storage_sort, Tensor2<void> &temp_storage_scan, const Tensor2<__half> &wgrad, Tensor2<size_t> &deltaw_hash_value_index, Tensor2<float> &deltaw, Tensor2<float> &hash_table_value, size_t sm_count, hipStream_t stream); template void SparseEmbeddingFunctors::update_params<float>( size_t embedding_vec_size, const OptParams<float> &opt_params, size_t nnz, const Tensor2<size_t> &hash_value_index, const Tensor2<float> &wgrad, Tensor2<float> &hash_table_value, size_t sm_count, hipStream_t stream); template void SparseEmbeddingFunctors::update_params<__half>( size_t embedding_vec_size, const OptParams<__half> &opt_params, size_t nnz, const Tensor2<size_t> &hash_value_index, const Tensor2<__half> &wgrad, Tensor2<float> &hash_table_value, size_t sm_count, hipStream_t stream); } // namespace HugeCTR
e42cc20d29cc005227f7f3aa0b73cc200450c4a3.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp" #include "HugeCTR/include/utils.cuh" #include "cub/cub/device/device_radix_sort.cuh" #include "cub/cub/device/device_scan.cuh" namespace HugeCTR { namespace { __global__ void value_count_kernel_2(int nnz, const uint32_t *new_hash_value_flag, const uint32_t *hash_value_flag_sumed, uint32_t *hash_value_index_index, uint32_t *counter) { for (int gid = blockIdx.x * blockDim.x + threadIdx.x; gid < nnz; gid += blockDim.x * gridDim.x) { uint32_t flag = new_hash_value_flag[gid]; if (flag == 1) { hash_value_index_index[hash_value_flag_sumed[gid] - 1] = gid; } } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { *counter = hash_value_flag_sumed[nnz - 1]; hash_value_index_index[*counter] = nnz; } } // expand sample id by row_offset template <typename TypeKey> __global__ void sample_id_expand_kernel(int batch_size, int slot_num, const TypeKey *row_offset, TypeKey *sample_id) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < (batch_size * slot_num)) { TypeKey offset = row_offset[gid]; int value_num = row_offset[gid + 1] - offset; for (int i = 0; i < value_num; i++) { sample_id[offset + i] = gid; } } } __global__ void value_count_kernel_1(int nnz, const size_t *hash_value_index_sort, uint32_t *new_hash_value_flag) { for (int gid = blockIdx.x * blockDim.x + threadIdx.x; gid < nnz; gid += blockDim.x * gridDim.x) { size_t cur_value = hash_value_index_sort[gid]; if (gid > 0) { size_t former_value = hash_value_index_sort[gid - 1]; // decide if this is the start of a group(the elements in this group have the same // hash_value_index_sort) if (cur_value != former_value) { new_hash_value_flag[gid] = 1; } else { new_hash_value_flag[gid] = 0; } } else { // gid == 0 new_hash_value_flag[gid] = 1; } } } // calculate weights update value(deltaw) by adam opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_adam_kernel_global(uint32_t hash_value_index_count_num, int embedding_vec_size, const AdamOptHyperParams<TypeEmbeddingComp> adam, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float mi = TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.m_ptr[feature_index]) + (1.0f - adam.beta1) * gi / adam.beta1; float vi = TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.v_ptr[feature_index]) + (1.0f - adam.beta2) * gi * gi / adam.beta2; adam.m_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mi); adam.v_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(vi); } } template <typename TypeEmbeddingComp> __global__ void adam_update_kernel_global(int embedding_vec_size, size_t table_size, // vocabulary size / factor const AdamOptHyperParams<TypeEmbeddingComp> adam, float alpha_t, float *hash_table_value) { const int TILE_SIZE = blockDim.x * gridDim.x; for (size_t feature_index = blockIdx.x * blockDim.x + threadIdx.x; feature_index < table_size * embedding_vec_size; feature_index += TILE_SIZE) { float mi = adam.beta1 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.m_ptr[feature_index]); float vi = adam.beta2 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.v_ptr[feature_index]); adam.m_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mi); adam.v_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(vi); float weight_diff = -alpha_t * mi / (sqrtf(vi) + adam.epsilon); hash_table_value[feature_index] += weight_diff; } } // calculate weights update value(deltaw) by momentum_sgd opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_momentum_sgd_kernel_global( uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const MomentumSGDOptHyperParams<TypeEmbeddingComp> momentum, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float mo = TypeConvertFunc<float, TypeEmbeddingComp>::convert(momentum.momentum_ptr[feature_index]) - lr * gi / momentum.factor; momentum.momentum_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mo); } } template <typename TypeEmbeddingComp> __global__ void momentum_sgd_update_kernel_global( int embedding_vec_size, size_t table_size, // vocabulary size / factor const MomentumSGDOptHyperParams<TypeEmbeddingComp> momentum, float *hash_table_value) { const int TILE_SIZE = blockDim.x * gridDim.x; for (size_t feature_index = blockIdx.x * blockDim.x + threadIdx.x; feature_index < table_size * embedding_vec_size; feature_index += TILE_SIZE) { float mo = TypeConvertFunc<float, TypeEmbeddingComp>::convert(momentum.momentum_ptr[feature_index]); mo *= momentum.factor; hash_table_value[feature_index] += mo; momentum.momentum_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mo); } } template <typename TypeEmbeddingComp> __global__ void nesterov_global_update_kernel_global( int embedding_vec_size, size_t table_size, // vocabulary size / factor const NesterovOptHyperParams<TypeEmbeddingComp> nesterov, float *hash_table_value) { const int TILE_SIZE = blockDim.x * gridDim.x; for (size_t feature_index = blockIdx.x * blockDim.x + threadIdx.x; feature_index < table_size * embedding_vec_size; feature_index += TILE_SIZE) { float accm = TypeConvertFunc<float, TypeEmbeddingComp>::convert(nesterov.accm_ptr[feature_index]); accm *= nesterov.mu; nesterov.accm_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(accm); hash_table_value[feature_index] += accm * nesterov.mu; } } // calculate weights update value(deltaw) by nesterov opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void nesterov_local_update_kernel_global( uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const NesterovOptHyperParams<TypeEmbeddingComp> nesterov, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, float *hash_table_value, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float accm = TypeConvertFunc<float, TypeEmbeddingComp>::convert(nesterov.accm_ptr[feature_index]); accm -= lr * gi; nesterov.accm_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(accm); hash_table_value[feature_index] -= (1 + nesterov.mu) * (lr * gi); } } template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_sgd_kernel_global(uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, float *hash_table_value, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // update size_t value_index = hash_value_index_sort[offset]; size_t feature_index = value_index * embedding_vec_size + tid; hash_table_value[feature_index] -= lr * gi; } } // calculate weights update value(deltaw) by adam opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_adam_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size, const AdamOptHyperParams<TypeEmbeddingComp> adam, float alpha_t, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, size_t *deltaw_hash_value_index, float *deltaw, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float mi = adam.beta1 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.m_ptr[feature_index]) + (1.0f - adam.beta1) * gi; float vi = adam.beta2 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(adam.v_ptr[feature_index]) + (1.0f - adam.beta2) * gi * gi; adam.m_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mi); adam.v_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(vi); float weight_diff = -alpha_t * mi / (sqrtf(vi) + adam.epsilon); // save weights diff deltaw[bid * embedding_vec_size + tid] = weight_diff; // save hash value_indexs(corresponding to deltaw) if (tid == 0) { deltaw_hash_value_index[bid] = row_index; } } } // calculate weights update value(deltaw) by momentum_sgd opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_momentum_sgd_kernel( uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const MomentumSGDOptHyperParams<TypeEmbeddingComp> momentum, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, size_t *deltaw_hash_value_index, float *deltaw, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float mo = momentum.factor * TypeConvertFunc<float, TypeEmbeddingComp>::convert( momentum.momentum_ptr[feature_index]) - lr * gi; momentum.momentum_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mo); // save weights diff deltaw[bid * embedding_vec_size + tid] = mo; // save hash value_indexs(corresponding to deltaw) if (tid == 0) { deltaw_hash_value_index[bid] = row_index; } } } // calculate weights update value(deltaw) by nesterov opitimizer template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_nesterov_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const NesterovOptHyperParams<TypeEmbeddingComp> nesterov, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, size_t *deltaw_hash_value_index, float *deltaw, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; size_t feature_index = row_index * embedding_vec_size + tid; float accm_old = TypeConvertFunc<float, TypeEmbeddingComp>::convert(nesterov.accm_ptr[feature_index]); float accm_new = nesterov.mu * accm_old - lr * gi; nesterov.accm_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(accm_new); float weight_diff = -nesterov.mu * accm_old + (1.0f + nesterov.mu) * accm_new; // save weights diff deltaw[bid * embedding_vec_size + tid] = weight_diff; // save hash value_indexs(corresponding to deltaw) if (tid == 0) { deltaw_hash_value_index[bid] = row_index; } } } template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_sgd_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size, float lr, const TypeKey *sample_id, const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, size_t *deltaw_hash_value_index, float *deltaw, float scaler) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < hash_value_index_count_num) { // uint32_t sample_num = hash_value_index_count[bid]; uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid]; // accumulate the wgrads for the corresponding embedding vector float gi = 0.0f; uint32_t offset = hash_value_index_count_offset[bid]; for (int i = 0; i < sample_num; i++) { int sample_index = sample_id[offset + i]; gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_index * embedding_vec_size + tid]); } gi = gi / scaler; // compute the grad of the weights and update it size_t row_index = hash_value_index_sort[offset]; float weight_diff = -lr * gi; // save weights diff deltaw[bid * embedding_vec_size + tid] = weight_diff; // save hash value_indexs(corresponding to deltaw) if (tid == 0) { deltaw_hash_value_index[bid] = row_index; } } } // update embedding table(weights) by deltaw __global__ void update_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size, const size_t *deltaw_hash_value_index, const float *deltaw, float *hash_table_value) { int tid = threadIdx.x; int bid = blockIdx.x; if ((bid < hash_value_index_count_num) && (tid < embedding_vec_size)) { size_t value_index = deltaw_hash_value_index[bid]; size_t feature_index = value_index * embedding_vec_size + tid; hash_table_value[feature_index] += deltaw[bid * embedding_vec_size + tid]; } } template <typename TypeKey, typename TypeEmbeddingComp> __global__ void opt_sgd_atomic_kernel(int nnz, int embedding_vec_size, float lr_scale, const size_t *hash_value_index, const TypeKey *sample_ids, const TypeEmbeddingComp *wgrad, float *hash_table_value) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < nnz) { for (int key_id = bid; key_id < nnz; key_id += gridDim.x) { int sample_id = sample_ids[key_id]; float deltaw = -lr_scale * TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[sample_id * embedding_vec_size + tid]); // atomic update size_t value_index = hash_value_index[key_id]; size_t feature_index = value_index * embedding_vec_size + tid; atomicAdd(&hash_table_value[feature_index], deltaw); } } } // only support LocalizedSlotSparseEmbeddingOneHot template <typename TypeEmbeddingComp> __global__ void opt_sgd_atomic_kernel(int nnz, int embedding_vec_size, float lr_scale, const size_t *hash_value_index, const TypeEmbeddingComp *wgrad, float *hash_table_value) { int bid = blockIdx.x; int tid = threadIdx.x; if (tid < embedding_vec_size && bid < nnz) { for (int key_id = bid; key_id < nnz; key_id += gridDim.x) { // for one-hot, the max_feature_per_slot is 1, so sample_id is equal to key_id float deltaw = -lr_scale * TypeConvertFunc<float, TypeEmbeddingComp>::convert( wgrad[key_id * embedding_vec_size + tid]); // atomic update size_t value_index = hash_value_index[key_id]; size_t feature_index = value_index * embedding_vec_size + tid; atomicAdd(&hash_table_value[feature_index], deltaw); } } } } // namespace template <typename TypeHashKey, typename TypeEmbeddingComp> void SparseEmbeddingFunctors::update_params( size_t batch_size, size_t slot_num, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, OptParams<TypeEmbeddingComp> &opt_params, size_t nnz, const Tensor2<TypeHashKey> &row_offset, Tensor2<size_t> &hash_value_index, Tensor2<TypeHashKey> &sample_id, Tensor2<TypeHashKey> &sample_id_sort, Tensor2<size_t> &hash_value_index_sort, Tensor2<uint32_t> &hash_value_index_count_offset, Tensor2<uint32_t> &new_hash_value_flag, Tensor2<uint32_t> &hash_value_flag_sumed, Tensor2<uint32_t> &hash_value_index_count_counter, Tensor2<void> &temp_storage_sort, Tensor2<void> &temp_storage_scan, const Tensor2<TypeEmbeddingComp> &wgrad, Tensor2<size_t> &deltaw_hash_value_index, Tensor2<float> &deltaw, Tensor2<float> &hash_table_value, size_t sm_count, cudaStream_t stream) { if (slot_num == 0) { return; } size_t block_size, grid_size; try { // step1: expand sample IDs block_size = 64; grid_size = (batch_size * slot_num - 1) / block_size + 1; sample_id_expand_kernel<<<grid_size, block_size, 0, stream>>>( batch_size, slot_num, row_offset.get_ptr(), sample_id.get_ptr()); if (opt_params.optimizer == Optimizer_t::SGD && opt_params.hyperparams.sgd.atomic_update) { // for SGD, do atomic update const size_t block_size = embedding_vec_size; const size_t grid_size = min(max(1ul, nnz), sm_count * 32); float lr_scale = opt_params.lr / opt_params.scaler; opt_sgd_atomic_kernel<<<grid_size, block_size, 0, stream>>>( nnz, embedding_vec_size, lr_scale, hash_value_index.get_ptr(), sample_id.get_ptr(), wgrad.get_ptr(), hash_table_value.get_ptr()); } else { // step3: sort by hash_value_index int end_bit = static_cast<int>(log2(static_cast<float>(max_vocabulary_size_per_gpu))) + 1; size_t temp_storage_sort_size = temp_storage_sort.get_size_in_bytes(); CK_CUDA_THROW_(cub::DeviceRadixSort::SortPairs( temp_storage_sort.get_ptr(), temp_storage_sort_size, hash_value_index.get_ptr(), hash_value_index_sort.get_ptr(), sample_id.get_ptr(), sample_id_sort.get_ptr(), nnz, 0, end_bit, stream, false)); // step4: count the number for each unduplicated hash_value_index CK_CUDA_THROW_( cudaMemsetAsync(hash_value_index_count_counter.get_ptr(), 0, sizeof(uint32_t), stream)); constexpr size_t max_grid_size = 384; block_size = 256; grid_size = min(max_grid_size, (nnz - 1) / block_size + 1); value_count_kernel_1<<<grid_size, block_size, 0, stream>>>( nnz, hash_value_index_sort.get_ptr(), new_hash_value_flag.get_ptr()); // prefix_sum size_t temp_storage_scan_size = temp_storage_scan.get_size_in_bytes(); CK_CUDA_THROW_(cub::DeviceScan::InclusiveSum( temp_storage_scan.get_ptr(), temp_storage_scan_size, new_hash_value_flag.get_ptr(), hash_value_flag_sumed.get_ptr(), nnz, stream)); value_count_kernel_2<<<grid_size, block_size, 0, stream>>>( nnz, new_hash_value_flag.get_ptr(), hash_value_flag_sumed.get_ptr(), hash_value_index_count_offset.get_ptr(), hash_value_index_count_counter.get_ptr()); uint32_t hash_hash_value_index_count_num = 0; // this async memcpy will not perform as a async operation because the host memory is not // a pinned memroy CK_CUDA_THROW_(cudaMemcpyAsync(&hash_hash_value_index_count_num, hash_value_index_count_counter.get_ptr(), sizeof(uint32_t), cudaMemcpyDeviceToHost, stream)); // step5: use optimizer method to compute deltaw, and record corresponding // deltaw_hash_value_index block_size = embedding_vec_size; grid_size = max(1, hash_hash_value_index_count_num); if (opt_params.global_update) { switch (opt_params.optimizer) { case Optimizer_t::Adam: // adam { float alpha_t = opt_params.lr * sqrt(1 - pow(opt_params.hyperparams.adam.beta2, opt_params.hyperparams.adam.times)) / (1 - pow(opt_params.hyperparams.adam.beta1, opt_params.hyperparams.adam.times)); // update target mi and vi opt_adam_kernel_global<<<grid_size, block_size, 0, stream>>>( hash_hash_value_index_count_num, embedding_vec_size, opt_params.hyperparams.adam, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), opt_params.scaler); // all update according to the mi vi adam_update_kernel_global<<<1024, 256, 0, stream>>>( embedding_vec_size, max_vocabulary_size_per_gpu, opt_params.hyperparams.adam, alpha_t, hash_table_value.get_ptr()); break; } case Optimizer_t::MomentumSGD: // momentum sgd opt_momentum_sgd_kernel_global<<<grid_size, block_size, 0, stream>>>( hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, opt_params.hyperparams.momentum, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), opt_params.scaler); momentum_sgd_update_kernel_global<<<1024, 256, 0, stream>>>( embedding_vec_size, max_vocabulary_size_per_gpu, opt_params.hyperparams.momentum, hash_table_value.get_ptr()); break; case Optimizer_t::Nesterov: // nesterov nesterov_global_update_kernel_global<<<1024, 256, 0, stream>>>( embedding_vec_size, max_vocabulary_size_per_gpu, opt_params.hyperparams.nesterov, hash_table_value.get_ptr()); nesterov_local_update_kernel_global<<<grid_size, block_size, 0, stream>>>( hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, opt_params.hyperparams.nesterov, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), hash_table_value.get_ptr(), opt_params.scaler); break; case Optimizer_t::SGD: opt_sgd_kernel_global<<<grid_size, block_size, 0, stream>>>( hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), hash_table_value.get_ptr(), opt_params.scaler); break; default: CK_THROW_(Error_t::WrongInput, "Error: Invalid opitimizer type"); } } else { switch (opt_params.optimizer) { case Optimizer_t::Adam: // adam { float alpha_t = opt_params.lr * sqrt(1 - pow(opt_params.hyperparams.adam.beta2, opt_params.hyperparams.adam.times)) / (1 - pow(opt_params.hyperparams.adam.beta1, opt_params.hyperparams.adam.times)); opt_adam_kernel<<<grid_size, block_size, 0, stream>>>( hash_hash_value_index_count_num, embedding_vec_size, opt_params.hyperparams.adam, alpha_t, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), deltaw_hash_value_index.get_ptr(), deltaw.get_ptr(), opt_params.scaler); break; } case Optimizer_t::MomentumSGD: // momentum sgd opt_momentum_sgd_kernel<<<grid_size, block_size, 0, stream>>>( hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, opt_params.hyperparams.momentum, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), deltaw_hash_value_index.get_ptr(), deltaw.get_ptr(), opt_params.scaler); break; case Optimizer_t::Nesterov: // nesterov opt_nesterov_kernel<<<grid_size, block_size, 0, stream>>>( hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, opt_params.hyperparams.nesterov, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), deltaw_hash_value_index.get_ptr(), deltaw.get_ptr(), opt_params.scaler); break; case Optimizer_t::SGD: opt_sgd_kernel<<<grid_size, block_size, 0, stream>>>( hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr, sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), deltaw_hash_value_index.get_ptr(), deltaw.get_ptr(), opt_params.scaler); break; default: CK_THROW_(Error_t::WrongInput, "Error: Invalid opitimizer type"); } // step6: update hash_table_value by deltaw block_size = embedding_vec_size; grid_size = max(1, hash_hash_value_index_count_num); update_kernel<<<grid_size, block_size, 0, stream>>>( hash_hash_value_index_count_num, embedding_vec_size, deltaw_hash_value_index.get_ptr(), deltaw.get_ptr(), hash_table_value.get_ptr()); } // else } // else #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } catch (const std::runtime_error &rt_err) { std::cerr << rt_err.what() << std::endl; throw; } return; } template <typename TypeEmbeddingComp> void SparseEmbeddingFunctors::update_params(size_t embedding_vec_size, const OptParams<TypeEmbeddingComp> &opt_params, size_t nnz, const Tensor2<size_t> &hash_value_index, const Tensor2<TypeEmbeddingComp> &wgrad, Tensor2<float> &hash_table_value, size_t sm_count, cudaStream_t stream) { try { if (opt_params.optimizer == Optimizer_t::SGD && opt_params.hyperparams.sgd.atomic_update) { const size_t grid_size = min(max(1ul, nnz), sm_count * 32); const size_t block_size = embedding_vec_size; float lr_scale = opt_params.lr / opt_params.scaler; // for one-hot, the sample_id is dedicated. opt_sgd_atomic_kernel<<<grid_size, block_size, 0, stream>>>( nnz, embedding_vec_size, lr_scale, hash_value_index.get_ptr(), wgrad.get_ptr(), hash_table_value.get_ptr()); } else { CK_THROW_(Error_t::WrongInput, "Error: Invalid opitimizer type"); } } catch (const std::runtime_error &rt_err) { std::cerr << rt_err.what() << std::endl; throw; } return; } template void SparseEmbeddingFunctors::update_params<unsigned int, float>( size_t batch_size, size_t slot_num, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, OptParams<float> &opt_params, size_t nnz, const Tensor2<unsigned int> &row_offset, Tensor2<size_t> &hash_value_index, Tensor2<unsigned int> &sample_id, Tensor2<unsigned int> &sample_id_sort, Tensor2<size_t> &hash_value_index_sort, Tensor2<uint32_t> &hash_value_index_count_offset, Tensor2<uint32_t> &new_hash_value_flag, Tensor2<uint32_t> &hash_value_flag_sumed, Tensor2<uint32_t> &hash_value_index_count_counter, Tensor2<void> &temp_storage_sort, Tensor2<void> &temp_storage_scan, const Tensor2<float> &wgrad, Tensor2<size_t> &deltaw_hash_value_index, Tensor2<float> &deltaw, Tensor2<float> &hash_table_value, size_t sm_count, cudaStream_t stream); template void SparseEmbeddingFunctors::update_params<long long, float>( size_t batch_size, size_t slot_num, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, OptParams<float> &opt_params, size_t nnz, const Tensor2<long long> &row_offset, Tensor2<size_t> &hash_value_index, Tensor2<long long> &sample_id, Tensor2<long long> &sample_id_sort, Tensor2<size_t> &hash_value_index_sort, Tensor2<uint32_t> &hash_value_index_count_offset, Tensor2<uint32_t> &new_hash_value_flag, Tensor2<uint32_t> &hash_value_flag_sumed, Tensor2<uint32_t> &hash_value_index_count_counter, Tensor2<void> &temp_storage_sort, Tensor2<void> &temp_storage_scan, const Tensor2<float> &wgrad, Tensor2<size_t> &deltaw_hash_value_index, Tensor2<float> &deltaw, Tensor2<float> &hash_table_value, size_t sm_count, cudaStream_t stream); template void SparseEmbeddingFunctors::update_params<unsigned int, __half>( size_t batch_size, size_t slot_num, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, OptParams<__half> &opt_params, size_t nnz, const Tensor2<unsigned int> &row_offset, Tensor2<size_t> &hash_value_index, Tensor2<unsigned int> &sample_id, Tensor2<unsigned int> &sample_id_sort, Tensor2<size_t> &hash_value_index_sort, Tensor2<uint32_t> &hash_value_index_count_offset, Tensor2<uint32_t> &new_hash_value_flag, Tensor2<uint32_t> &hash_value_flag_sumed, Tensor2<uint32_t> &hash_value_index_count_counter, Tensor2<void> &temp_storage_sort, Tensor2<void> &temp_storage_scan, const Tensor2<__half> &wgrad, Tensor2<size_t> &deltaw_hash_value_index, Tensor2<float> &deltaw, Tensor2<float> &hash_table_value, size_t sm_count, cudaStream_t stream); template void SparseEmbeddingFunctors::update_params<long long, __half>( size_t batch_size, size_t slot_num, size_t embedding_vec_size, size_t max_vocabulary_size_per_gpu, OptParams<__half> &opt_params, size_t nnz, const Tensor2<long long> &row_offset, Tensor2<size_t> &hash_value_index, Tensor2<long long> &sample_id, Tensor2<long long> &sample_id_sort, Tensor2<size_t> &hash_value_index_sort, Tensor2<uint32_t> &hash_value_index_count_offset, Tensor2<uint32_t> &new_hash_value_flag, Tensor2<uint32_t> &hash_value_flag_sumed, Tensor2<uint32_t> &hash_value_index_count_counter, Tensor2<void> &temp_storage_sort, Tensor2<void> &temp_storage_scan, const Tensor2<__half> &wgrad, Tensor2<size_t> &deltaw_hash_value_index, Tensor2<float> &deltaw, Tensor2<float> &hash_table_value, size_t sm_count, cudaStream_t stream); template void SparseEmbeddingFunctors::update_params<float>( size_t embedding_vec_size, const OptParams<float> &opt_params, size_t nnz, const Tensor2<size_t> &hash_value_index, const Tensor2<float> &wgrad, Tensor2<float> &hash_table_value, size_t sm_count, cudaStream_t stream); template void SparseEmbeddingFunctors::update_params<__half>( size_t embedding_vec_size, const OptParams<__half> &opt_params, size_t nnz, const Tensor2<size_t> &hash_value_index, const Tensor2<__half> &wgrad, Tensor2<float> &hash_table_value, size_t sm_count, cudaStream_t stream); } // namespace HugeCTR
0216cf636c6bb811b1d0e0e730600c84b3672bf6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "cudamat.cuh" #include "SoftMax_kernels.cuh" extern "C" { inline bool checkCUDAError() { hipError_t err = hipGetLastError(); if (hipSuccess != err) printf("%s\n", hipGetErrorString( err)); return hipSuccess != err; } extern int CumulativeSum(cudamat *probabilities, cudamat *cumSum, int softMaxWidth) { if (!probabilities->on_device || !cumSum->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = probabilities->size[0] ; int numFeatures = probabilities->size[1] ; if (cumSum->size[0] != signalLength || cumSum->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 64 ; int numTotalSoftMaxes = (signalLength/softMaxWidth) + (signalLength % softMaxWidth == 0 ? 0:1) ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(2*softMaxWidth) ; hipLaunchKernelGGL(( kCumulativeSum) , dim3(gridD), dim3(numThreadsPerBlock), sharedMem , 0, probabilities->data_device, cumSum->data_device, softMaxWidth, signalLength) ; hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } extern int MultinomialSamples(cudamat *unifRandNums, cudamat *probabilities, cudamat *samples, int softMaxWidth, int startShift) { if (!probabilities->on_device || !samples->on_device || !unifRandNums->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = probabilities->size[0] ; int numFeatures = probabilities->size[1] ; if (samples->size[0] != signalLength || samples->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 128 ; int numTotalSoftMaxes = ((signalLength-startShift)/softMaxWidth) + ((signalLength-startShift) % softMaxWidth == 0 ? 0:1) ; if (unifRandNums->size[0] * unifRandNums->size[1] < numTotalSoftMaxes*numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(2*softMaxWidth) ; hipLaunchKernelGGL(( kMultinomialSample) , dim3(gridD), dim3(numThreadsPerBlock), sharedMem , 0, unifRandNums->data_device, probabilities->data_device, samples->data_device, softMaxWidth, startShift, signalLength ) ; hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } extern int SoftMaxStackApproxWithPositionBiases( cudamat *activations, cudamat *probabilities, cudamat *stdevs, cudamat *featureBiases, cudamat *positionBiases, int softMaxWidth ) { if (!activations->on_device || !probabilities->on_device || !stdevs->on_device || !featureBiases->on_device || !positionBiases->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = activations->size[0] ; int numFeatures = activations->size[1] ; if (probabilities->size[0] != signalLength || probabilities->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (stdevs->size[0] != signalLength || stdevs->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // feature biases much be a row or column vector if (featureBiases->size[0] != 1 && featureBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (featureBiases->size[0]*featureBiases->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // position biases much be a row or column vector if (positionBiases->size[0] != 1 && positionBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (positionBiases->size[0]*positionBiases->size[1] != softMaxWidth) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 128 ; int numTotalSoftMaxes = (signalLength/softMaxWidth) + (signalLength % softMaxWidth == 0 ? 0:1) ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(softMaxWidth + numThreadsPerBlock) ; hipLaunchKernelGGL(( kSoftMaxStackApproxWithPositionBiases) , dim3(gridD), dim3(numThreadsPerBlock), sharedMem , 0, activations->data_device, probabilities->data_device, stdevs->data_device, featureBiases->data_device, positionBiases->data_device, softMaxWidth, signalLength ) ; hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } extern int SoftMaxReluWithPositionBiases( cudamat *activations, cudamat *probabilities, cudamat *meanValues, cudamat *featureStdevs, cudamat *featureBiases, cudamat *positionBiases, int softMaxWidth, int shift ) { if (!activations->on_device || !probabilities->on_device || !featureBiases->on_device || !positionBiases->on_device || !meanValues->on_device || !featureStdevs->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = activations->size[0] ; int numFeatures = activations->size[1] ; if (probabilities->size[0] != signalLength || probabilities->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (meanValues->size[0] != signalLength || meanValues->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (featureStdevs->size[0] != signalLength || featureStdevs->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // feature biases much be a row or column vector if (featureBiases->size[0] != 1 && featureBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (featureBiases->size[0]*featureBiases->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // position biases much be a row or column vector if (positionBiases->size[0] != 1 && positionBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (positionBiases->size[0]*positionBiases->size[1] != softMaxWidth) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 128 ; int numTotalSoftMaxes = ((signalLength-shift)/softMaxWidth) + ((signalLength-shift) % softMaxWidth == 0 ? 0:1) ; float minExpForSum = -20.0 ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(2*softMaxWidth + numThreadsPerBlock) ; hipLaunchKernelGGL(( kSoftMaxReluWithPositionBiases) , dim3(gridD), dim3(numThreadsPerBlock), sharedMem , 0, activations->data_device, probabilities->data_device, meanValues->data_device, featureStdevs->data_device, featureBiases->data_device, positionBiases->data_device, softMaxWidth, shift, signalLength, minExpForSum ) ; hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } extern int SoftMaxWithOffAndPositionBiases(cudamat *activations, cudamat *probabilities, cudamat *featureBiases, cudamat *positionBiases, int softMaxWidth) { if (!activations->on_device || !probabilities->on_device || !featureBiases->on_device || !positionBiases->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = activations->size[0] ; int numFeatures = activations->size[1] ; if (probabilities->size[0] != signalLength || probabilities->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // feature biases much be a row or column vector if (featureBiases->size[0] != 1 && featureBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (featureBiases->size[0]*featureBiases->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // position biases much be a row or column vector if (positionBiases->size[0] != 1 && positionBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (positionBiases->size[0]*positionBiases->size[1] != softMaxWidth) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 128 ; int numTotalSoftMaxes = (signalLength/softMaxWidth) + (signalLength % softMaxWidth == 0 ? 0:1) ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(softMaxWidth + numThreadsPerBlock) ; hipLaunchKernelGGL(( kSoftMaxWithOffAndPositionBiases) , dim3(gridD), dim3(numThreadsPerBlock), sharedMem , 0, activations->data_device, probabilities->data_device, featureBiases->data_device, positionBiases->data_device, softMaxWidth, signalLength ) ; hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } extern int SoftMaxWithOff(cudamat *activations, cudamat *probabilities, cudamat *featureBiases, int softMaxWidth) { if (!activations->on_device || !probabilities->on_device || !featureBiases->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = activations->size[0] ; int numFeatures = activations->size[1] ; if (probabilities->size[0] != signalLength || probabilities->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (featureBiases->size[0]*featureBiases->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 64 ; int numTotalSoftMaxes = (signalLength/softMaxWidth) + (signalLength % softMaxWidth == 0 ? 0:1) ; int numPtsPerThread = softMaxWidth/numThreadsPerBlock ; if (numPtsPerThread*numThreadsPerBlock < softMaxWidth) numPtsPerThread++ ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(softMaxWidth + numThreadsPerBlock) ; hipLaunchKernelGGL(( kSoftMaxWithOff) , dim3(gridD), dim3(numThreadsPerBlock), sharedMem , 0, activations->data_device, probabilities->data_device, featureBiases->data_device, softMaxWidth, signalLength, numPtsPerThread) ; hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } }
0216cf636c6bb811b1d0e0e730600c84b3672bf6.cu
#include <stdio.h> #include <cuda.h> #include "cudamat.cuh" #include "SoftMax_kernels.cuh" extern "C" { inline bool checkCUDAError() { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) printf("%s\n", cudaGetErrorString( err)); return cudaSuccess != err; } extern int CumulativeSum(cudamat *probabilities, cudamat *cumSum, int softMaxWidth) { if (!probabilities->on_device || !cumSum->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = probabilities->size[0] ; int numFeatures = probabilities->size[1] ; if (cumSum->size[0] != signalLength || cumSum->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 64 ; int numTotalSoftMaxes = (signalLength/softMaxWidth) + (signalLength % softMaxWidth == 0 ? 0:1) ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(2*softMaxWidth) ; kCumulativeSum <<< gridD, numThreadsPerBlock, sharedMem >>> (probabilities->data_device, cumSum->data_device, softMaxWidth, signalLength) ; cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } extern int MultinomialSamples(cudamat *unifRandNums, cudamat *probabilities, cudamat *samples, int softMaxWidth, int startShift) { if (!probabilities->on_device || !samples->on_device || !unifRandNums->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = probabilities->size[0] ; int numFeatures = probabilities->size[1] ; if (samples->size[0] != signalLength || samples->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 128 ; int numTotalSoftMaxes = ((signalLength-startShift)/softMaxWidth) + ((signalLength-startShift) % softMaxWidth == 0 ? 0:1) ; if (unifRandNums->size[0] * unifRandNums->size[1] < numTotalSoftMaxes*numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(2*softMaxWidth) ; kMultinomialSample <<< gridD, numThreadsPerBlock, sharedMem >>>( unifRandNums->data_device, probabilities->data_device, samples->data_device, softMaxWidth, startShift, signalLength ) ; cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } extern int SoftMaxStackApproxWithPositionBiases( cudamat *activations, cudamat *probabilities, cudamat *stdevs, cudamat *featureBiases, cudamat *positionBiases, int softMaxWidth ) { if (!activations->on_device || !probabilities->on_device || !stdevs->on_device || !featureBiases->on_device || !positionBiases->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = activations->size[0] ; int numFeatures = activations->size[1] ; if (probabilities->size[0] != signalLength || probabilities->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (stdevs->size[0] != signalLength || stdevs->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // feature biases much be a row or column vector if (featureBiases->size[0] != 1 && featureBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (featureBiases->size[0]*featureBiases->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // position biases much be a row or column vector if (positionBiases->size[0] != 1 && positionBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (positionBiases->size[0]*positionBiases->size[1] != softMaxWidth) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 128 ; int numTotalSoftMaxes = (signalLength/softMaxWidth) + (signalLength % softMaxWidth == 0 ? 0:1) ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(softMaxWidth + numThreadsPerBlock) ; kSoftMaxStackApproxWithPositionBiases <<< gridD, numThreadsPerBlock, sharedMem >>> ( activations->data_device, probabilities->data_device, stdevs->data_device, featureBiases->data_device, positionBiases->data_device, softMaxWidth, signalLength ) ; cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } extern int SoftMaxReluWithPositionBiases( cudamat *activations, cudamat *probabilities, cudamat *meanValues, cudamat *featureStdevs, cudamat *featureBiases, cudamat *positionBiases, int softMaxWidth, int shift ) { if (!activations->on_device || !probabilities->on_device || !featureBiases->on_device || !positionBiases->on_device || !meanValues->on_device || !featureStdevs->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = activations->size[0] ; int numFeatures = activations->size[1] ; if (probabilities->size[0] != signalLength || probabilities->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (meanValues->size[0] != signalLength || meanValues->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (featureStdevs->size[0] != signalLength || featureStdevs->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // feature biases much be a row or column vector if (featureBiases->size[0] != 1 && featureBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (featureBiases->size[0]*featureBiases->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // position biases much be a row or column vector if (positionBiases->size[0] != 1 && positionBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (positionBiases->size[0]*positionBiases->size[1] != softMaxWidth) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 128 ; int numTotalSoftMaxes = ((signalLength-shift)/softMaxWidth) + ((signalLength-shift) % softMaxWidth == 0 ? 0:1) ; float minExpForSum = -20.0 ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(2*softMaxWidth + numThreadsPerBlock) ; kSoftMaxReluWithPositionBiases <<< gridD, numThreadsPerBlock, sharedMem >>> ( activations->data_device, probabilities->data_device, meanValues->data_device, featureStdevs->data_device, featureBiases->data_device, positionBiases->data_device, softMaxWidth, shift, signalLength, minExpForSum ) ; cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } extern int SoftMaxWithOffAndPositionBiases(cudamat *activations, cudamat *probabilities, cudamat *featureBiases, cudamat *positionBiases, int softMaxWidth) { if (!activations->on_device || !probabilities->on_device || !featureBiases->on_device || !positionBiases->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = activations->size[0] ; int numFeatures = activations->size[1] ; if (probabilities->size[0] != signalLength || probabilities->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // feature biases much be a row or column vector if (featureBiases->size[0] != 1 && featureBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (featureBiases->size[0]*featureBiases->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; // position biases much be a row or column vector if (positionBiases->size[0] != 1 && positionBiases->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (positionBiases->size[0]*positionBiases->size[1] != softMaxWidth) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 128 ; int numTotalSoftMaxes = (signalLength/softMaxWidth) + (signalLength % softMaxWidth == 0 ? 0:1) ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(softMaxWidth + numThreadsPerBlock) ; kSoftMaxWithOffAndPositionBiases <<< gridD, numThreadsPerBlock, sharedMem >>> ( activations->data_device, probabilities->data_device, featureBiases->data_device, positionBiases->data_device, softMaxWidth, signalLength ) ; cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } extern int SoftMaxWithOff(cudamat *activations, cudamat *probabilities, cudamat *featureBiases, int softMaxWidth) { if (!activations->on_device || !probabilities->on_device || !featureBiases->on_device) return ERROR_NOT_ON_DEVICE; int signalLength = activations->size[0] ; int numFeatures = activations->size[1] ; if (probabilities->size[0] != signalLength || probabilities->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; if (featureBiases->size[0]*featureBiases->size[1] != numFeatures) return ERROR_INCOMPATIBLE_DIMENSIONS ; int numThreadsPerBlock = 64 ; int numTotalSoftMaxes = (signalLength/softMaxWidth) + (signalLength % softMaxWidth == 0 ? 0:1) ; int numPtsPerThread = softMaxWidth/numThreadsPerBlock ; if (numPtsPerThread*numThreadsPerBlock < softMaxWidth) numPtsPerThread++ ; dim3 gridD(numFeatures, numTotalSoftMaxes,1) ; int sharedMem = sizeof(float)*(softMaxWidth + numThreadsPerBlock) ; kSoftMaxWithOff <<< gridD, numThreadsPerBlock, sharedMem >>> (activations->data_device, probabilities->data_device, featureBiases->data_device, softMaxWidth, signalLength, numPtsPerThread) ; cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0 ; } }
79c65082a306101bc8ba512eb76a7b1bf686b7ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void device_compute_cross_section_voigtQ_stepone(double* g_energies,const int* g_gns,const double* g_nu,const double* g_aif,double* g_gamma,double* g_n, const int N_ener){ //The stored shared data //Get the global and local thread number int g_idx = blockIdx.x * blockDim.x + threadIdx.x; double ei,gns,nu_if,aif,abscoef; double gammaL; //cross_constants.ln2pi/cross_constants.halfwidth; //if(g_idx == 0) printf("partition = %12.6f\n",cross_constants.partition); if(g_idx < N_ener){ //Store values in local memory ei = g_energies[g_idx]; gns = g_gns[g_idx]; nu_if = g_nu[g_idx]; aif = g_aif[g_idx]; if(nu_if==0) nu_if = 1e-6; abscoef= cross_constants.cmcoef*aif*gns *exp(-cross_constants.beta*ei)*(1.0-exp(-cross_constants.beta*nu_if))/ (nu_if*nu_if*cross_constants.partition); if(gns==-1) abscoef = aif; g_energies[g_idx] = abscoef; gammaL = g_gamma[g_idx]*pow(296.0/cross_constants.temperature,g_n[g_idx])*cross_constants.pressure; g_gamma[g_idx] = gammaL; //if(threadIdx.x == 0) printf("%14.2E %14.2E\n",abscoef,gammaL) ; } }
79c65082a306101bc8ba512eb76a7b1bf686b7ca.cu
__global__ void device_compute_cross_section_voigtQ_stepone(double* g_energies,const int* g_gns,const double* g_nu,const double* g_aif,double* g_gamma,double* g_n, const int N_ener){ //The stored shared data //Get the global and local thread number int g_idx = blockIdx.x * blockDim.x + threadIdx.x; double ei,gns,nu_if,aif,abscoef; double gammaL; //cross_constants.ln2pi/cross_constants.halfwidth; //if(g_idx == 0) printf("partition = %12.6f\n",cross_constants.partition); if(g_idx < N_ener){ //Store values in local memory ei = g_energies[g_idx]; gns = g_gns[g_idx]; nu_if = g_nu[g_idx]; aif = g_aif[g_idx]; if(nu_if==0) nu_if = 1e-6; abscoef= cross_constants.cmcoef*aif*gns *exp(-cross_constants.beta*ei)*(1.0-exp(-cross_constants.beta*nu_if))/ (nu_if*nu_if*cross_constants.partition); if(gns==-1) abscoef = aif; g_energies[g_idx] = abscoef; gammaL = g_gamma[g_idx]*pow(296.0/cross_constants.temperature,g_n[g_idx])*cross_constants.pressure; g_gamma[g_idx] = gammaL; //if(threadIdx.x == 0) printf("%14.2E %14.2E\n",abscoef,gammaL) ; } }
4d5b73c17e556555a0c7911e7927fe897fe38529.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/emulation.hpp" #include "opencv2/gpu/device/transform.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/utility.hpp" using namespace cv::gpu; using namespace cv::gpu::device; namespace canny { struct L1 : binary_function<int, int, float> { __device__ __forceinline__ float operator ()(int x, int y) const { return ::abs(x) + ::abs(y); } __host__ __device__ __forceinline__ L1() {} __host__ __device__ __forceinline__ L1(const L1&) {} }; struct L2 : binary_function<int, int, float> { __device__ __forceinline__ float operator ()(int x, int y) const { return ::sqrtf(x * x + y * y); } __host__ __device__ __forceinline__ L2() {} __host__ __device__ __forceinline__ L2(const L2&) {} }; } namespace cv { namespace gpu { namespace device { template <> struct TransformFunctorTraits<canny::L1> : DefaultTransformFunctorTraits<canny::L1> { enum { smart_shift = 4 }; }; template <> struct TransformFunctorTraits<canny::L2> : DefaultTransformFunctorTraits<canny::L2> { enum { smart_shift = 4 }; }; }}} namespace canny { texture<uchar, hipTextureType2D, hipReadModeElementType> tex_src(false, hipFilterModePoint, hipAddressModeClamp); struct SrcTex { const int xoff; const int yoff; __host__ SrcTex(int _xoff, int _yoff) : xoff(_xoff), yoff(_yoff) {} __device__ __forceinline__ int operator ()(int y, int x) const { return tex2D(tex_src, x + xoff, y + yoff); } }; template <class Norm> __global__ void calcMagnitudeKernel(const SrcTex src, PtrStepi dx, PtrStepi dy, PtrStepSzf mag, const Norm norm) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= mag.rows || x >= mag.cols) return; int dxVal = (src(y - 1, x + 1) + 2 * src(y, x + 1) + src(y + 1, x + 1)) - (src(y - 1, x - 1) + 2 * src(y, x - 1) + src(y + 1, x - 1)); int dyVal = (src(y + 1, x - 1) + 2 * src(y + 1, x) + src(y + 1, x + 1)) - (src(y - 1, x - 1) + 2 * src(y - 1, x) + src(y - 1, x + 1)); dx(y, x) = dxVal; dy(y, x) = dyVal; mag(y, x) = norm(dxVal, dyVal); } void calcMagnitude(PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad) { const dim3 block(16, 16); const dim3 grid(divUp(mag.cols, block.x), divUp(mag.rows, block.y)); bindTexture(&tex_src, srcWhole); SrcTex src(xoff, yoff); if (L2Grad) { L2 norm; hipLaunchKernelGGL(( calcMagnitudeKernel), dim3(grid), dim3(block), 0, 0, src, dx, dy, mag, norm); } else { L1 norm; hipLaunchKernelGGL(( calcMagnitudeKernel), dim3(grid), dim3(block), 0, 0, src, dx, dy, mag, norm); } cudaSafeCall( hipGetLastError() ); cudaSafeCall(hipDeviceSynchronize()); } void calcMagnitude(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad) { if (L2Grad) { L2 norm; transform(dx, dy, mag, norm, WithOutMask(), 0); } else { L1 norm; transform(dx, dy, mag, norm, WithOutMask(), 0); } } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { texture<float, hipTextureType2D, hipReadModeElementType> tex_mag(false, hipFilterModePoint, hipAddressModeClamp); __global__ void calcMapKernel(const PtrStepSzi dx, const PtrStepi dy, PtrStepi map, const float low_thresh, const float high_thresh) { const int CANNY_SHIFT = 15; const int TG22 = (int)(0.4142135623730950488016887242097*(1<<CANNY_SHIFT) + 0.5); const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x == 0 || x >= dx.cols - 1 || y == 0 || y >= dx.rows - 1) return; int dxVal = dx(y, x); int dyVal = dy(y, x); const int s = (dxVal ^ dyVal) < 0 ? -1 : 1; const float m = tex2D(tex_mag, x, y); dxVal = ::abs(dxVal); dyVal = ::abs(dyVal); // 0 - the pixel can not belong to an edge // 1 - the pixel might belong to an edge // 2 - the pixel does belong to an edge int edge_type = 0; if (m > low_thresh) { const int tg22x = dxVal * TG22; const int tg67x = tg22x + ((dxVal + dxVal) << CANNY_SHIFT); dyVal <<= CANNY_SHIFT; if (dyVal < tg22x) { if (m > tex2D(tex_mag, x - 1, y) && m >= tex2D(tex_mag, x + 1, y)) edge_type = 1 + (int)(m > high_thresh); } else if(dyVal > tg67x) { if (m > tex2D(tex_mag, x, y - 1) && m >= tex2D(tex_mag, x, y + 1)) edge_type = 1 + (int)(m > high_thresh); } else { if (m > tex2D(tex_mag, x - s, y - 1) && m >= tex2D(tex_mag, x + s, y + 1)) edge_type = 1 + (int)(m > high_thresh); } } map(y, x) = edge_type; } void calcMap(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, PtrStepSzi map, float low_thresh, float high_thresh) { const dim3 block(16, 16); const dim3 grid(divUp(dx.cols, block.x), divUp(dx.rows, block.y)); bindTexture(&tex_mag, mag); hipLaunchKernelGGL(( calcMapKernel), dim3(grid), dim3(block), 0, 0, dx, dy, map, low_thresh, high_thresh); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { __device__ int counter = 0; __device__ __forceinline__ bool checkIdx(int y, int x, int rows, int cols) { return (y >= 0) && (y < rows) && (x >= 0) && (x < cols); } __global__ void edgesHysteresisLocalKernel(PtrStepSzi map, short2* st) { __shared__ volatile int smem[18][18]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; smem[threadIdx.y + 1][threadIdx.x + 1] = checkIdx(y, x, map.rows, map.cols) ? map(y, x) : 0; if (threadIdx.y == 0) smem[0][threadIdx.x + 1] = checkIdx(y - 1, x, map.rows, map.cols) ? map(y - 1, x) : 0; if (threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][threadIdx.x + 1] = checkIdx(y + 1, x, map.rows, map.cols) ? map(y + 1, x) : 0; if (threadIdx.x == 0) smem[threadIdx.y + 1][0] = checkIdx(y, x - 1, map.rows, map.cols) ? map(y, x - 1) : 0; if (threadIdx.x == blockDim.x - 1) smem[threadIdx.y + 1][blockDim.x + 1] = checkIdx(y, x + 1, map.rows, map.cols) ? map(y, x + 1) : 0; if (threadIdx.x == 0 && threadIdx.y == 0) smem[0][0] = checkIdx(y - 1, x - 1, map.rows, map.cols) ? map(y - 1, x - 1) : 0; if (threadIdx.x == blockDim.x - 1 && threadIdx.y == 0) smem[0][blockDim.x + 1] = checkIdx(y - 1, x + 1, map.rows, map.cols) ? map(y - 1, x + 1) : 0; if (threadIdx.x == 0 && threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][0] = checkIdx(y + 1, x - 1, map.rows, map.cols) ? map(y + 1, x - 1) : 0; if (threadIdx.x == blockDim.x - 1 && threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][blockDim.x + 1] = checkIdx(y + 1, x + 1, map.rows, map.cols) ? map(y + 1, x + 1) : 0; __syncthreads(); if (x >= map.cols || y >= map.rows) return; int n; #pragma unroll for (int k = 0; k < 16; ++k) { n = 0; if (smem[threadIdx.y + 1][threadIdx.x + 1] == 1) { n += smem[threadIdx.y ][threadIdx.x ] == 2; n += smem[threadIdx.y ][threadIdx.x + 1] == 2; n += smem[threadIdx.y ][threadIdx.x + 2] == 2; n += smem[threadIdx.y + 1][threadIdx.x ] == 2; n += smem[threadIdx.y + 1][threadIdx.x + 2] == 2; n += smem[threadIdx.y + 2][threadIdx.x ] == 2; n += smem[threadIdx.y + 2][threadIdx.x + 1] == 2; n += smem[threadIdx.y + 2][threadIdx.x + 2] == 2; } __syncthreads(); if (n > 0) smem[threadIdx.y + 1][threadIdx.x + 1] = 2; __syncthreads(); } const int e = smem[threadIdx.y + 1][threadIdx.x + 1]; map(y, x) = e; n = 0; if (e == 2) { n += smem[threadIdx.y ][threadIdx.x ] == 1; n += smem[threadIdx.y ][threadIdx.x + 1] == 1; n += smem[threadIdx.y ][threadIdx.x + 2] == 1; n += smem[threadIdx.y + 1][threadIdx.x ] == 1; n += smem[threadIdx.y + 1][threadIdx.x + 2] == 1; n += smem[threadIdx.y + 2][threadIdx.x ] == 1; n += smem[threadIdx.y + 2][threadIdx.x + 1] == 1; n += smem[threadIdx.y + 2][threadIdx.x + 2] == 1; } if (n > 0) { const int ind = ::atomicAdd(&counter, 1); st[ind] = make_short2(x, y); } } void edgesHysteresisLocal(PtrStepSzi map, short2* st1) { void* counter_ptr; cudaSafeCall( hipGetSymbolAddress(&counter_ptr, counter) ); cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(int)) ); const dim3 block(16, 16); const dim3 grid(divUp(map.cols, block.x), divUp(map.rows, block.y)); hipLaunchKernelGGL(( edgesHysteresisLocalKernel), dim3(grid), dim3(block), 0, 0, map, st1); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { __constant__ int c_dx[8] = {-1, 0, 1, -1, 1, -1, 0, 1}; __constant__ int c_dy[8] = {-1, -1, -1, 0, 0, 1, 1, 1}; __global__ void edgesHysteresisGlobalKernel(PtrStepSzi map, short2* st1, short2* st2, const int count) { const int stack_size = 512; __shared__ int s_counter; __shared__ int s_ind; __shared__ short2 s_st[stack_size]; if (threadIdx.x == 0) s_counter = 0; __syncthreads(); int ind = blockIdx.y * gridDim.x + blockIdx.x; if (ind >= count) return; short2 pos = st1[ind]; if (threadIdx.x < 8) { pos.x += c_dx[threadIdx.x]; pos.y += c_dy[threadIdx.x]; if (pos.x > 0 && pos.x < map.cols - 1 && pos.y > 0 && pos.y < map.rows - 1 && map(pos.y, pos.x) == 1) { map(pos.y, pos.x) = 2; ind = Emulation::smem::atomicAdd(&s_counter, 1); s_st[ind] = pos; } } __syncthreads(); while (s_counter > 0 && s_counter <= stack_size - blockDim.x) { const int subTaskIdx = threadIdx.x >> 3; const int portion = ::min(s_counter, blockDim.x >> 3); if (subTaskIdx < portion) pos = s_st[s_counter - 1 - subTaskIdx]; __syncthreads(); if (threadIdx.x == 0) s_counter -= portion; __syncthreads(); if (subTaskIdx < portion) { pos.x += c_dx[threadIdx.x & 7]; pos.y += c_dy[threadIdx.x & 7]; if (pos.x > 0 && pos.x < map.cols - 1 && pos.y > 0 && pos.y < map.rows - 1 && map(pos.y, pos.x) == 1) { map(pos.y, pos.x) = 2; ind = Emulation::smem::atomicAdd(&s_counter, 1); s_st[ind] = pos; } } __syncthreads(); } if (s_counter > 0) { if (threadIdx.x == 0) { s_ind = ::atomicAdd(&counter, s_counter); if (s_ind + s_counter > map.cols * map.rows) s_counter = 0; } __syncthreads(); ind = s_ind; for (int i = threadIdx.x; i < s_counter; i += blockDim.x) st2[ind + i] = s_st[i]; } } void edgesHysteresisGlobal(PtrStepSzi map, short2* st1, short2* st2) { void* counter_ptr; cudaSafeCall( hipGetSymbolAddress(&counter_ptr, canny::counter) ); int count; cudaSafeCall( hipMemcpy(&count, counter_ptr, sizeof(int), hipMemcpyDeviceToHost) ); while (count > 0) { cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(int)) ); const dim3 block(128); const dim3 grid(::min(count, 65535u), divUp(count, 65535), 1); hipLaunchKernelGGL(( edgesHysteresisGlobalKernel), dim3(grid), dim3(block), 0, 0, map, st1, st2, count); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); cudaSafeCall( hipMemcpy(&count, counter_ptr, sizeof(int), hipMemcpyDeviceToHost) ); count = min(count, map.cols * map.rows); //std::swap(st1, st2); short2* tmp = st1; st1 = st2; st2 = tmp; } } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { struct GetEdges : unary_function<int, uchar> { __device__ __forceinline__ uchar operator ()(int e) const { return (uchar)(-(e >> 1)); } __host__ __device__ __forceinline__ GetEdges() {} __host__ __device__ __forceinline__ GetEdges(const GetEdges&) {} }; } namespace cv { namespace gpu { namespace device { template <> struct TransformFunctorTraits<canny::GetEdges> : DefaultTransformFunctorTraits<canny::GetEdges> { enum { smart_shift = 4 }; }; }}} namespace canny { void getEdges(PtrStepSzi map, PtrStepSzb dst) { transform(map, dst, GetEdges(), WithOutMask(), 0); } } #endif /* CUDA_DISABLER */
4d5b73c17e556555a0c7911e7927fe897fe38529.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/emulation.hpp" #include "opencv2/gpu/device/transform.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/utility.hpp" using namespace cv::gpu; using namespace cv::gpu::device; namespace canny { struct L1 : binary_function<int, int, float> { __device__ __forceinline__ float operator ()(int x, int y) const { return ::abs(x) + ::abs(y); } __host__ __device__ __forceinline__ L1() {} __host__ __device__ __forceinline__ L1(const L1&) {} }; struct L2 : binary_function<int, int, float> { __device__ __forceinline__ float operator ()(int x, int y) const { return ::sqrtf(x * x + y * y); } __host__ __device__ __forceinline__ L2() {} __host__ __device__ __forceinline__ L2(const L2&) {} }; } namespace cv { namespace gpu { namespace device { template <> struct TransformFunctorTraits<canny::L1> : DefaultTransformFunctorTraits<canny::L1> { enum { smart_shift = 4 }; }; template <> struct TransformFunctorTraits<canny::L2> : DefaultTransformFunctorTraits<canny::L2> { enum { smart_shift = 4 }; }; }}} namespace canny { texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_src(false, cudaFilterModePoint, cudaAddressModeClamp); struct SrcTex { const int xoff; const int yoff; __host__ SrcTex(int _xoff, int _yoff) : xoff(_xoff), yoff(_yoff) {} __device__ __forceinline__ int operator ()(int y, int x) const { return tex2D(tex_src, x + xoff, y + yoff); } }; template <class Norm> __global__ void calcMagnitudeKernel(const SrcTex src, PtrStepi dx, PtrStepi dy, PtrStepSzf mag, const Norm norm) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= mag.rows || x >= mag.cols) return; int dxVal = (src(y - 1, x + 1) + 2 * src(y, x + 1) + src(y + 1, x + 1)) - (src(y - 1, x - 1) + 2 * src(y, x - 1) + src(y + 1, x - 1)); int dyVal = (src(y + 1, x - 1) + 2 * src(y + 1, x) + src(y + 1, x + 1)) - (src(y - 1, x - 1) + 2 * src(y - 1, x) + src(y - 1, x + 1)); dx(y, x) = dxVal; dy(y, x) = dyVal; mag(y, x) = norm(dxVal, dyVal); } void calcMagnitude(PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad) { const dim3 block(16, 16); const dim3 grid(divUp(mag.cols, block.x), divUp(mag.rows, block.y)); bindTexture(&tex_src, srcWhole); SrcTex src(xoff, yoff); if (L2Grad) { L2 norm; calcMagnitudeKernel<<<grid, block>>>(src, dx, dy, mag, norm); } else { L1 norm; calcMagnitudeKernel<<<grid, block>>>(src, dx, dy, mag, norm); } cudaSafeCall( cudaGetLastError() ); cudaSafeCall(cudaThreadSynchronize()); } void calcMagnitude(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad) { if (L2Grad) { L2 norm; transform(dx, dy, mag, norm, WithOutMask(), 0); } else { L1 norm; transform(dx, dy, mag, norm, WithOutMask(), 0); } } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { texture<float, cudaTextureType2D, cudaReadModeElementType> tex_mag(false, cudaFilterModePoint, cudaAddressModeClamp); __global__ void calcMapKernel(const PtrStepSzi dx, const PtrStepi dy, PtrStepi map, const float low_thresh, const float high_thresh) { const int CANNY_SHIFT = 15; const int TG22 = (int)(0.4142135623730950488016887242097*(1<<CANNY_SHIFT) + 0.5); const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x == 0 || x >= dx.cols - 1 || y == 0 || y >= dx.rows - 1) return; int dxVal = dx(y, x); int dyVal = dy(y, x); const int s = (dxVal ^ dyVal) < 0 ? -1 : 1; const float m = tex2D(tex_mag, x, y); dxVal = ::abs(dxVal); dyVal = ::abs(dyVal); // 0 - the pixel can not belong to an edge // 1 - the pixel might belong to an edge // 2 - the pixel does belong to an edge int edge_type = 0; if (m > low_thresh) { const int tg22x = dxVal * TG22; const int tg67x = tg22x + ((dxVal + dxVal) << CANNY_SHIFT); dyVal <<= CANNY_SHIFT; if (dyVal < tg22x) { if (m > tex2D(tex_mag, x - 1, y) && m >= tex2D(tex_mag, x + 1, y)) edge_type = 1 + (int)(m > high_thresh); } else if(dyVal > tg67x) { if (m > tex2D(tex_mag, x, y - 1) && m >= tex2D(tex_mag, x, y + 1)) edge_type = 1 + (int)(m > high_thresh); } else { if (m > tex2D(tex_mag, x - s, y - 1) && m >= tex2D(tex_mag, x + s, y + 1)) edge_type = 1 + (int)(m > high_thresh); } } map(y, x) = edge_type; } void calcMap(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, PtrStepSzi map, float low_thresh, float high_thresh) { const dim3 block(16, 16); const dim3 grid(divUp(dx.cols, block.x), divUp(dx.rows, block.y)); bindTexture(&tex_mag, mag); calcMapKernel<<<grid, block>>>(dx, dy, map, low_thresh, high_thresh); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { __device__ int counter = 0; __device__ __forceinline__ bool checkIdx(int y, int x, int rows, int cols) { return (y >= 0) && (y < rows) && (x >= 0) && (x < cols); } __global__ void edgesHysteresisLocalKernel(PtrStepSzi map, short2* st) { __shared__ volatile int smem[18][18]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; smem[threadIdx.y + 1][threadIdx.x + 1] = checkIdx(y, x, map.rows, map.cols) ? map(y, x) : 0; if (threadIdx.y == 0) smem[0][threadIdx.x + 1] = checkIdx(y - 1, x, map.rows, map.cols) ? map(y - 1, x) : 0; if (threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][threadIdx.x + 1] = checkIdx(y + 1, x, map.rows, map.cols) ? map(y + 1, x) : 0; if (threadIdx.x == 0) smem[threadIdx.y + 1][0] = checkIdx(y, x - 1, map.rows, map.cols) ? map(y, x - 1) : 0; if (threadIdx.x == blockDim.x - 1) smem[threadIdx.y + 1][blockDim.x + 1] = checkIdx(y, x + 1, map.rows, map.cols) ? map(y, x + 1) : 0; if (threadIdx.x == 0 && threadIdx.y == 0) smem[0][0] = checkIdx(y - 1, x - 1, map.rows, map.cols) ? map(y - 1, x - 1) : 0; if (threadIdx.x == blockDim.x - 1 && threadIdx.y == 0) smem[0][blockDim.x + 1] = checkIdx(y - 1, x + 1, map.rows, map.cols) ? map(y - 1, x + 1) : 0; if (threadIdx.x == 0 && threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][0] = checkIdx(y + 1, x - 1, map.rows, map.cols) ? map(y + 1, x - 1) : 0; if (threadIdx.x == blockDim.x - 1 && threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][blockDim.x + 1] = checkIdx(y + 1, x + 1, map.rows, map.cols) ? map(y + 1, x + 1) : 0; __syncthreads(); if (x >= map.cols || y >= map.rows) return; int n; #pragma unroll for (int k = 0; k < 16; ++k) { n = 0; if (smem[threadIdx.y + 1][threadIdx.x + 1] == 1) { n += smem[threadIdx.y ][threadIdx.x ] == 2; n += smem[threadIdx.y ][threadIdx.x + 1] == 2; n += smem[threadIdx.y ][threadIdx.x + 2] == 2; n += smem[threadIdx.y + 1][threadIdx.x ] == 2; n += smem[threadIdx.y + 1][threadIdx.x + 2] == 2; n += smem[threadIdx.y + 2][threadIdx.x ] == 2; n += smem[threadIdx.y + 2][threadIdx.x + 1] == 2; n += smem[threadIdx.y + 2][threadIdx.x + 2] == 2; } __syncthreads(); if (n > 0) smem[threadIdx.y + 1][threadIdx.x + 1] = 2; __syncthreads(); } const int e = smem[threadIdx.y + 1][threadIdx.x + 1]; map(y, x) = e; n = 0; if (e == 2) { n += smem[threadIdx.y ][threadIdx.x ] == 1; n += smem[threadIdx.y ][threadIdx.x + 1] == 1; n += smem[threadIdx.y ][threadIdx.x + 2] == 1; n += smem[threadIdx.y + 1][threadIdx.x ] == 1; n += smem[threadIdx.y + 1][threadIdx.x + 2] == 1; n += smem[threadIdx.y + 2][threadIdx.x ] == 1; n += smem[threadIdx.y + 2][threadIdx.x + 1] == 1; n += smem[threadIdx.y + 2][threadIdx.x + 2] == 1; } if (n > 0) { const int ind = ::atomicAdd(&counter, 1); st[ind] = make_short2(x, y); } } void edgesHysteresisLocal(PtrStepSzi map, short2* st1) { void* counter_ptr; cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, counter) ); cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(int)) ); const dim3 block(16, 16); const dim3 grid(divUp(map.cols, block.x), divUp(map.rows, block.y)); edgesHysteresisLocalKernel<<<grid, block>>>(map, st1); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { __constant__ int c_dx[8] = {-1, 0, 1, -1, 1, -1, 0, 1}; __constant__ int c_dy[8] = {-1, -1, -1, 0, 0, 1, 1, 1}; __global__ void edgesHysteresisGlobalKernel(PtrStepSzi map, short2* st1, short2* st2, const int count) { const int stack_size = 512; __shared__ int s_counter; __shared__ int s_ind; __shared__ short2 s_st[stack_size]; if (threadIdx.x == 0) s_counter = 0; __syncthreads(); int ind = blockIdx.y * gridDim.x + blockIdx.x; if (ind >= count) return; short2 pos = st1[ind]; if (threadIdx.x < 8) { pos.x += c_dx[threadIdx.x]; pos.y += c_dy[threadIdx.x]; if (pos.x > 0 && pos.x < map.cols - 1 && pos.y > 0 && pos.y < map.rows - 1 && map(pos.y, pos.x) == 1) { map(pos.y, pos.x) = 2; ind = Emulation::smem::atomicAdd(&s_counter, 1); s_st[ind] = pos; } } __syncthreads(); while (s_counter > 0 && s_counter <= stack_size - blockDim.x) { const int subTaskIdx = threadIdx.x >> 3; const int portion = ::min(s_counter, blockDim.x >> 3); if (subTaskIdx < portion) pos = s_st[s_counter - 1 - subTaskIdx]; __syncthreads(); if (threadIdx.x == 0) s_counter -= portion; __syncthreads(); if (subTaskIdx < portion) { pos.x += c_dx[threadIdx.x & 7]; pos.y += c_dy[threadIdx.x & 7]; if (pos.x > 0 && pos.x < map.cols - 1 && pos.y > 0 && pos.y < map.rows - 1 && map(pos.y, pos.x) == 1) { map(pos.y, pos.x) = 2; ind = Emulation::smem::atomicAdd(&s_counter, 1); s_st[ind] = pos; } } __syncthreads(); } if (s_counter > 0) { if (threadIdx.x == 0) { s_ind = ::atomicAdd(&counter, s_counter); if (s_ind + s_counter > map.cols * map.rows) s_counter = 0; } __syncthreads(); ind = s_ind; for (int i = threadIdx.x; i < s_counter; i += blockDim.x) st2[ind + i] = s_st[i]; } } void edgesHysteresisGlobal(PtrStepSzi map, short2* st1, short2* st2) { void* counter_ptr; cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, canny::counter) ); int count; cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(int), cudaMemcpyDeviceToHost) ); while (count > 0) { cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(int)) ); const dim3 block(128); const dim3 grid(::min(count, 65535u), divUp(count, 65535), 1); edgesHysteresisGlobalKernel<<<grid, block>>>(map, st1, st2, count); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(int), cudaMemcpyDeviceToHost) ); count = min(count, map.cols * map.rows); //std::swap(st1, st2); short2* tmp = st1; st1 = st2; st2 = tmp; } } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { struct GetEdges : unary_function<int, uchar> { __device__ __forceinline__ uchar operator ()(int e) const { return (uchar)(-(e >> 1)); } __host__ __device__ __forceinline__ GetEdges() {} __host__ __device__ __forceinline__ GetEdges(const GetEdges&) {} }; } namespace cv { namespace gpu { namespace device { template <> struct TransformFunctorTraits<canny::GetEdges> : DefaultTransformFunctorTraits<canny::GetEdges> { enum { smart_shift = 4 }; }; }}} namespace canny { void getEdges(PtrStepSzi map, PtrStepSzb dst) { transform(map, dst, GetEdges(), WithOutMask(), 0); } } #endif /* CUDA_DISABLER */
e4e3aa56b6e6887c0f3bfa5b91f8c341ee2b85d9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "MatrixMulSh.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *Md = NULL; hipMalloc(&Md, XSIZE*YSIZE); float *Nd = NULL; hipMalloc(&Nd, XSIZE*YSIZE); float *Pd = NULL; hipMalloc(&Pd, XSIZE*YSIZE); const int WIDTH = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( MatrixMulSh), dim3(gridBlock),dim3(threadBlock), 0, 0, Md,Nd,Pd,WIDTH); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( MatrixMulSh), dim3(gridBlock),dim3(threadBlock), 0, 0, Md,Nd,Pd,WIDTH); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( MatrixMulSh), dim3(gridBlock),dim3(threadBlock), 0, 0, Md,Nd,Pd,WIDTH); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e4e3aa56b6e6887c0f3bfa5b91f8c341ee2b85d9.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "MatrixMulSh.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *Md = NULL; cudaMalloc(&Md, XSIZE*YSIZE); float *Nd = NULL; cudaMalloc(&Nd, XSIZE*YSIZE); float *Pd = NULL; cudaMalloc(&Pd, XSIZE*YSIZE); const int WIDTH = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); MatrixMulSh<<<gridBlock,threadBlock>>>(Md,Nd,Pd,WIDTH); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { MatrixMulSh<<<gridBlock,threadBlock>>>(Md,Nd,Pd,WIDTH); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { MatrixMulSh<<<gridBlock,threadBlock>>>(Md,Nd,Pd,WIDTH); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a913724c8038a96c90de1a35b35c21bb42878ebe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "neural_net.h" #include <time.h> template <typename T> __global__ void softmaxLossBackProp(int *y, T *SO, T *dSO, int batch_size, int output_size, float eps) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; int cur_class = static_cast<int>(y[i]); dSO[i * output_size + cur_class] = -1 / (SO[i * output_size + cur_class] * batch_size + eps); } template <typename T> __global__ void computeSoftmaxLoss(T *O, int *y, float *loss, int batch_size, int num_classes, float eps) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; loss[i] = -logf(O[i * num_classes + y[i]] + eps); } template <typename T> __global__ void inferClass(T *O, int *pred_y, int batch_size, int num_classes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; T max = O[i * num_classes]; int index = 0; for (int j = 1; j < num_classes; j++) { if (O[i * num_classes + j] > max) { max = O[i * num_classes + j]; index = j; } } pred_y[i] = index; } float NeuralNet::computeLoss() { if (layer_type[num_layers - 1] == SOFTMAX) { if (data_type == CUDNN_DATA_FLOAT) hipLaunchKernelGGL(( computeSoftmaxLoss<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (float *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps); else if (data_type == CUDNN_DATA_DOUBLE) hipLaunchKernelGGL(( computeSoftmaxLoss<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (double *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps); } checkCudaErrors(hipMemcpy(h_loss, loss, batch_size * sizeof(float), hipMemcpyDeviceToHost)); float total_loss = 0.0; for (int i = 0; i < batch_size; i++) total_loss += h_loss[i]; return total_loss / batch_size; } void NeuralNet::compareOutputCorrect(int *correct_count, int *y) { *correct_count = 0; if (data_type == CUDNN_DATA_FLOAT) { float *typecast_O = (float *)layer_input[num_layers - 1]; hipLaunchKernelGGL(( inferClass<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, typecast_O, pred_y, batch_size, num_classes); checkCudaErrors(hipMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) { if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1; } } else if (data_type == CUDNN_DATA_DOUBLE) { double *typecast_O = (double *)layer_input[num_layers - 1]; hipLaunchKernelGGL(( inferClass<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, typecast_O, pred_y, batch_size, num_classes); checkCudaErrors(hipMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) { if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1; } } } NeuralNet::NeuralNet(std::vector<LayerSpecifier> &layers, DataType data_type, int batch_size, TensorFormat tensor_format, long long dropout_seed, float softmax_eps, float init_std_dev, vDNNType vdnn_type, vDNNConvAlgo vdnn_conv_algo, UpdateRule update_rule) { // ---------------------- vDNN start ---------------------- checkCudaErrors(hipStreamCreate(&stream_compute)); checkCudaErrors(hipStreamCreate(&stream_memory)); this->vdnn_type = vdnn_type; this->vdnn_conv_algo = vdnn_conv_algo; // ---------------------- vDNN end ------------------------ // create handle checkCUDNN(cudnnCreate(&cudnn_handle)); checkCUDNN(cudnnSetStream(cudnn_handle, stream_compute)); checkCUBLAS(hipblasCreate(&cublas_handle)); checkCUBLAS(hipblasSetStream(cublas_handle, stream_compute)); checkCURAND(hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT)); checkCURAND(hiprandSetStream(curand_gen, stream_compute)); checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes)); size_t init_free_bytes = free_bytes; std::cout << "Free bytes at start: " << free_bytes << std::endl; pre_alloc_conv_derivative = false; pre_alloc_fc_derivative = false; pre_alloc_batch_norm_derivative = true; if (vdnn_type == vDNN_NONE) { pre_alloc_conv_derivative = true; pre_alloc_fc_derivative = true; pre_alloc_batch_norm_derivative = true; } if (data_type == DATA_FLOAT) { this->data_type = CUDNN_DATA_FLOAT; data_type_size = sizeof(float); } else if (data_type == DATA_DOUBLE) { this->data_type = CUDNN_DATA_DOUBLE; data_type_size = sizeof(double); } if (tensor_format == TENSOR_NCHW) this->tensor_format = CUDNN_TENSOR_NCHW; else if (tensor_format == TENSOR_NHWC) this->tensor_format = CUDNN_TENSOR_NHWC; this->batch_size = batch_size; this->softmax_eps = softmax_eps; this->init_std_dev = init_std_dev; num_layers = layers.size(); // allocation of space for input to each layer layer_input = (void **)malloc((num_layers + 1) * sizeof(void *)); layer_input_size = (int *)malloc((num_layers + 1) * sizeof(int)); dlayer_input = (void **)malloc((num_layers + 1) * sizeof(void *)); params = (void **)malloc(num_layers * sizeof(void *)); LayerDimension prev_output_size; LayerDimension current_output_size; for (int i = 0; i < num_layers; i++) { layer_type.push_back(layers[i].type); if (layers[i].type == CONV) { ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params; params[i] = malloc(sizeof(ConvLayerParams)); ((ConvLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format, data_type_size, current_output_size, update_rule); } else if (layers[i].type == FULLY_CONNECTED) { FCDescriptor *user_params = (FCDescriptor *)layers[i].params; params[i] = malloc(sizeof(FCLayerParams)); ((FCLayerParams *)params[i])->initializeValues(user_params, batch_size, this->tensor_format, this->data_type, current_output_size, update_rule); } else if (layers[i].type == DROPOUT) { DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params; params[i] = malloc(sizeof(DropoutLayerParams)); ((DropoutLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format, current_output_size); } else if (layers[i].type == BATCHNORM) { BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params; params[i] = malloc(sizeof(BatchNormLayerParams)); ((BatchNormLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size, update_rule); } else if (layers[i].type == POOLING) { PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params; params[i] = malloc(sizeof(BatchNormLayerParams)); ((PoolingLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); } else if (layers[i].type == ACTV) { ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params; params[i] = malloc(sizeof(ActivationLayerParams)); ((ActivationLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); } else if (layers[i].type == SOFTMAX) { SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params; params[i] = malloc(sizeof(SoftmaxLayerParams)); ((SoftmaxLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); // std::cout << current_output_size.N << ' ' << current_output_size.C << current_output_size.H << current_output_size.W << std::endl; } if (i == 0) { prev_output_size = current_output_size; } // incomplete - have to check flatten and check exact dimension // else if (current_output_size.getTotalSize() != prev_output_size.getTotalSize()) { // std::cout << "Layer " << i << " output and next layer's input size mismatch\n"; // exit(0); // } } // ---------------------- vDNN start ---------------------- // allocate space in host memory for layers to be transferred h_layer_input = (void **)malloc(num_layers * sizeof(void *)); to_offload = (bool *)malloc(num_layers * sizeof(bool)); prefetched = (bool *)malloc(num_layers * sizeof(bool)); // ---------------------- vDNN end ------------------------ checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes)); std::cout << "Free bytes just before allocate space: " << free_bytes << std::endl; // allocate space for parameters // Exception BatchNorm - looks like it will take lots of space if only FC layers - space taken = size of one input for (int i = 0; i < num_layers; i++) { size_t input_size; if (layers[i].type == CONV) { ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params; ((ConvLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev, free_bytes, pre_alloc_conv_derivative); input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w; if (i == 0) { input_channels = user_params->input_channels; input_h = user_params->input_h; input_w = user_params->input_w; } } else if (layers[i].type == FULLY_CONNECTED) { FCDescriptor *user_params = (FCDescriptor *)layers[i].params; ((FCLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev, free_bytes, pre_alloc_fc_derivative); input_size = batch_size * user_params->input_channels; if (i == 0) { input_channels = user_params->input_channels; input_h = 1; input_w = 1; } } else if (layers[i].type == DROPOUT) { DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params; ((DropoutLayerParams *)params[i])->allocateSpace(free_bytes, cudnn_handle, user_params, dropout_seed); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == BATCHNORM) { BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params; ((BatchNormLayerParams *)params[i])->allocateSpace(this->data_type, data_type_size, free_bytes, pre_alloc_batch_norm_derivative); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == POOLING) { PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params; ((PoolingLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w; if (i == 0) { input_channels = user_params->input_channels; input_h = user_params->input_h; input_w = user_params->input_w; } } else if (layers[i].type == ACTV) { ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params; ((ActivationLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == SOFTMAX) { SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params; ((SoftmaxLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->channels * user_params->h * user_params->w; // assuming this is last layer, allocate for next layer as well // checkCudaErrors(hipMalloc(&layer_input[i + 1], input_size * data_type_size)); // checkCudaErrors(hipMalloc(&dlayer_input[i + 1], input_size * data_type_size)); layer_input_size[i + 1] = input_size; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } if (i == num_layers - 1) { num_classes = user_params->channels; } } // do not allocate memory initially // checkCudaErrors(hipMalloc(&layer_input[i], input_size * data_type_size)); // checkCudaErrors(hipMalloc(&dlayer_input[i], input_size * data_type_size)); // ---------------------- vDNN start ---------------------- layer_input_size[i] = input_size; // ---------------------- vDNN end ------------------------ } checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes)); std::cout << "Free bytes just after allocate space: " << free_bytes << std::endl; // very small - could be allocated initially itself checkCudaErrors(hipMalloc((void **)&y, batch_size * sizeof(int))); checkCudaErrors(hipMalloc((void **)&pred_y, batch_size * sizeof(int))); checkCudaErrors(hipMalloc((void **)&loss, batch_size * sizeof(float))); checkCudaErrors(hipMalloc(&one_vec, batch_size * data_type_size)); if (this->data_type == CUDNN_DATA_FLOAT) hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (float *)one_vec, batch_size, 1); else hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (double *)one_vec, batch_size, 1); checkCudaErrors(hipHostMalloc((void **)&h_loss, batch_size * sizeof(float))); checkCudaErrors(hipHostMalloc((void **)&h_pred_y, batch_size * sizeof(int))); // do not allocate workspace initially // allocate space for workspace and also keep track of algo // size_t cur_workspace_size; // workspace_size = 0; // for (int i = 0; i < num_layers; i++) { // if (layers[i].type == CONV) { // ((ConvLayerParams *)params[i])->getWorkspaceSize(cur_workspace_size, free_bytes); // if (cur_workspace_size > workspace_size) // workspace_size = cur_workspace_size; // } // } // checkCudaErrors(hipMalloc(&workspace, workspace_size)); // free_bytes = free_bytes - workspace_size; checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes)); // leave 600 MB and use the rest std::cout << "Free bytes: " << free_bytes << std::endl; free_bytes -= 1024 * 1024 * 600; // ---------------------- vDNN start ---------------------- size_t exp_max_consume, max_consume; vDNNOptimize(exp_max_consume, max_consume); std::cout << "actual_max_consume: " << max_consume << std::endl; std::cout << "exp_max_consume: " << exp_max_consume << std::endl; std::cout << "diff_max_consume(MB): " << (max_consume - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl; std::cout << "exp_free_bytes(MB): " << (free_bytes + 1024 * 1024 * 600 - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl; std::cout << "exp_total_consume(MB): " << (init_free_bytes - (free_bytes + 600 * 1024 * 1024 - exp_max_consume)) / (1.0 * 1024 * 1024) << std::endl; std::cout << "actual_total_consume(MB): " << (init_free_bytes - (free_bytes + 600 * 1024 * 1024 - max_consume)) / (1.0 * 1024 * 1024) << std::endl; // ---------------------- vDNN end ------------------------ // ---------------------- vDNN start ---------------------- free_bytes = max_consume; cnmemDevice_t cnmem_device; size_t cnmem_stream_memory_size = free_bytes; cnmem_device.device = 0; cnmem_device.size = cnmem_stream_memory_size; cnmem_device.numStreams = 0; cnmem_device.streams = NULL; cnmem_device.streamSizes = NULL; // do not allow call to hipMalloc checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW)); // ---------------------- vDNN end ------------------------ // ---------------------- vDNN start ---------------------- for (int i = 0; i < num_layers; i++) { std::cerr << "to_offload[i] " << to_offload[i] << std::endl; } for (int i = 0; i < num_layers; i++) { // allocate pinned memory in host if (to_offload[i]) checkCudaErrors(hipHostMalloc(&h_layer_input[i], layer_input_size[i] * data_type_size)); } // ---------------------- vDNN end ------------------------ checkCudaErrors(hipDeviceSynchronize()); size_t temp_free_bytes; checkCudaErrors(hipMemGetInfo(&temp_free_bytes, &total_bytes)); std::cout << "Free bytes just before end of NeuralNet: " << temp_free_bytes << std::endl; // { // int n; // std::cout << "waiting..\n"; // std::cin >> n; // } // ---------------------- vDNN ext start ------------------------ event_offload_done = (hipEvent_t *)malloc(num_layers * sizeof(hipEvent_t)); thread_free_layer_input = (pthread_t *)malloc(num_layers * sizeof(pthread_t)); sem_sync_offload = (sem_t *)malloc(num_layers * sizeof(sem_t)); event_prefetch_done = (hipEvent_t *)malloc(num_layers * sizeof(hipEvent_t)); thread_flag_prefetch_done = (pthread_t *)malloc(num_layers * sizeof(pthread_t)); sem_prefetch_done = (sem_t *)malloc(num_layers * sizeof(sem_t)); for (int i = 0; i < num_layers; i++) { if (to_offload[i]) { checkCudaErrors(hipEventCreate(&event_offload_done[i])); checkSemaphoreErrors(sem_init(&sem_sync_offload[i], 0, 0)); checkCudaErrors(hipEventCreate(&event_prefetch_done[i])); checkSemaphoreErrors(sem_init(&sem_prefetch_done[i], 0, 0)); } } checkPMutexErrors(pthread_mutex_init(&lock_cnmem_memory, NULL)); checkPCondErrors(pthread_cond_init(&cond_cnmem_available, NULL)); layer_num = (PtrIndex *)malloc(num_layers * sizeof(PtrIndex)); for (int i = 0; i < num_layers; i++) { layer_num[i].ptr = this; layer_num[i].index = i; } // ---------------------- vDNN ext end ------------------------ } bool NeuralNet::simulateNeuralNetworkMemory(vDNNConvAlgoPref algo_pref, bool hard, size_t &exp_max_consume, size_t &max_consume) { CnmemSpace space_tracker(free_bytes); max_consume = 0; // forward pass // allocate space for 1st input std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating input(MB): " << space_tracker.getConsumed() << std::endl; std::cerr << "Forward pass" << std::endl; for (int i = 0; i < num_layers; i++) { if (layer_type[i] == SOFTMAX) break; std::cerr << "Processing layer " << i << std::endl; std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size); std::cerr << "Used space after output allocation(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; long cur_workspace_size = cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::FWD, algo_pref, hard); space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); space_tracker.updateMaxConsume(max_consume); if (cur_workspace_size == -1 or !space_tracker.isAvailable()) return false; std::cerr << "Used space after workspace allocation(MB): " << space_tracker.getConsumed() << std::endl; // current layer computation over, deallocate workspace space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); std::cerr << "Used space after workspace deallocation(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; // deallocate layer input if (to_offload[i]) { std::cerr << "deallocating input to " << i << std::endl; space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl; } } std::cerr << "Backward pass" << std::endl; if (batch_size * num_classes * data_type_size != layer_input_size[num_layers] * data_type_size) { std::cout << "Panic!! Using wrong size\n"; exit(0); } // backward pass space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size); std::cerr << "Used space after allocating final derivative(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); // std::cerr << "max_consume: " << max_consume << std::endl; for (int i = num_layers - 1; i >= 0; i--) { // allocate space for previous layer derivative std::cerr << "Processing layer " << i << std::endl; std::cerr << "Used space initial(MB): " << space_tracker.getConsumed() << std::endl; if (i > 0) { if (layer_type[i] == SOFTMAX) continue; else { space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size); std::cerr << "Used space after allocating prev. derivative(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); } // std::cerr << "max_consume: " << max_consume << std::endl; } int layer_to_prefetch = findPrefetchLayer(i); // if layer to be prefetched, allocate space for that layer if (layer_to_prefetch != -1) { std::cerr << "Prefetch layer " << layer_to_prefetch << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size); std::cerr << "Used space after allocating prefetch(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); } if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; long cur_filter_workspace_size = cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_FILTER, algo_pref, hard); long cur_data_workspace_size = 0; if (i > 0) cur_data_workspace_size = cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_DATA, algo_pref, hard); long cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size :cur_data_workspace_size; space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); std::cerr << "Used space after allocating workspace(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); if (!pre_alloc_conv_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } // std::cerr << "max_consume: " << max_consume << std::endl; if (cur_filter_workspace_size == -1 or cur_data_workspace_size == -1 or !space_tracker.isAvailable()) return false; // current layer computation over, deallocate workspace space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); std::cerr << "Used space after deallocating workspace(MB): " << space_tracker.getConsumed() << std::endl; if (!pre_alloc_conv_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (!pre_alloc_fc_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; if (!pre_alloc_fc_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; if (!pre_alloc_batch_norm_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } if (!space_tracker.isAvailable()) return false; // deallocate layer output and derivative space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); std::cerr << "Used space after deallocating output, derivative(MB): " << space_tracker.getConsumed() << std::endl; // if 1st layer, deallocate input layer also if (i == 0) { space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl; } } if (space_tracker.getConsumed() > 0) std::cerr << "Panic!! more free bytes\n"; if (space_tracker.getConsumed() != 0) std::cerr << "Panic!! bytes not freed properly\n"; // return true; exp_max_consume = max_consume; cnmemDevice_t cnmem_device; cnmem_device.device = 0; cnmem_device.size = max_consume; cnmem_device.numStreams = 0; cnmem_device.streams = NULL; cnmem_device.streamSizes = NULL; checkCNMEM(cnmemInit(1, &cnmem_device, 0)); // check with cnmem once bool ret_val = simulateCNMEMMemory(max_consume); checkCNMEM(cnmemFinalize()); return ret_val; } bool NeuralNet::simulateCNMEMMemory(size_t &max_consume) { resetPrefetched(); checkCNMEMRet(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL)); // forward propagate for (int i = 0; i < num_layers; i++) { size_t cur_workspace_size; void *cur_workspace; checkCNMEMRet(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL)); if (layer_type[i] == CONV) { // std::cout << "conv\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; checkCNMEMRet(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); } if (layer_type[i] == CONV) { checkCNMEMRet(cnmemFree(cur_workspace, NULL)); } if (to_offload[i]) { checkCNMEMRet(cnmemFree(layer_input[i], NULL)); } if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) { i = i + 1; } } checkCNMEMRet(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL)); for (int i = num_layers - 1; i >= 0; i--) { // ---------------------- vDNN start ---------------------- int cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size; void *cur_workspace; if (i > 0) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) { dlayer_input[i] = dlayer_input[i + 1]; } else { int layer_to_prefetch = findPrefetchLayer(i); if (layer_to_prefetch != -1) { checkCNMEMRet(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL)); } checkCNMEMRet(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL)); } } if (layer_type[i] == CONV) { // std::cout << "here\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; // allocate space for derivative if (!pre_alloc_conv_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCNMEMRet(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (!pre_alloc_fc_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } } else if (layer_type[i] == SOFTMAX) { // std::cout << "compute here\n"; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; continue; } if (layer_type[i] == CONV) { checkCNMEMRet(cnmemFree(cur_workspace, NULL)); if (!pre_alloc_conv_derivative) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } else if (layer_type[i] == FULLY_CONNECTED) { if (!pre_alloc_fc_derivative) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } else if (layer_type[i] == BATCHNORM) { if (!pre_alloc_batch_norm_derivative) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } checkCNMEMRet(cnmemFree(layer_input[i + 1], NULL)); checkCNMEMRet(cnmemFree(dlayer_input[i + 1], NULL)); if (i == 0) { checkCNMEMRet(cnmemFree(layer_input[i], NULL)); } } size_t temp; checkCNMEM(cnmemMemGetInfo(&temp, &max_consume, NULL)); return true; } void NeuralNet::vDNNOptimize(size_t &exp_max_consume, size_t &max_consume) { bool hard = true, soft = false; // if type is vDNN_ALL or vDNN_CONV, check if sufficient space is available if (vdnn_type == vDNN_ALL) { setOffload(OFFLOAD_ALL); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } else if (vdnn_type == vDNN_CONV) { setOffload(OFFLOAD_CONV); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } else if (vdnn_type == vDNN_NONE) { setOffload(OFFLOAD_NONE); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } if (vdnn_type == vDNN_DYN) { // check for trainability std::cerr << "vDNN_DYN\n"; setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if(!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); // check if work with fastest algo and no offload, if so, select it and return setOffload(NeuralNet::OFFLOAD_NONE); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, NO OFFLOAD\n"; return; } // check if conv offload and fastest algo works, then check if all offload and fastest algo works setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, ALL OFFLOAD\n"; return; } // optimize using greedy algo memory usage while improving performance setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) { std::cerr << "Choosing GREEDY, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) { std::cerr << "Choosing GREEDY, ALL OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing MEM_OPT, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if(simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing MEM_OPT, ALL OFFLOAD\n"; return; } } exit(0); } void NeuralNet::setOffload(NeuralNet::OffloadType offload_type) { if (offload_type == OFFLOAD_NONE) { for (int i = 0; i < num_layers; i++) to_offload[i] = false; } else if (offload_type == OFFLOAD_CONV) { for (int i = 0; i < num_layers; i++) { if (layer_type[i] == CONV) to_offload[i] = true; else to_offload[i] = false; } // set last non SOFTMAX/ACTV layer to no_offload for (int i = num_layers - 1; i >= 0; i--) { if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV) ; else { to_offload[i] = false; break; } } } else if (offload_type == OFFLOAD_ALL) { for (int i = 0; i < num_layers; i++) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) to_offload[i] = false; else to_offload[i] = true; } // set last non SOFTMAX/ACTV layer to no_offload for (int i = num_layers - 1; i >= 0; i--) { if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV) ; else { to_offload[i] = false; break; } } } } void NeuralNet::resetPrefetched() { for (int i = 0; i < num_layers; i++) prefetched[i] = false; } void NeuralNet::getLoss(void *X, int *y, double learning_rate, bool train, int *correct_count, float *loss) { std::vector<float> t1, t2; this->getLoss(X, y, learning_rate, t1, t2, train, correct_count, loss); } void NeuralNet::lockedcnmemMalloc(void **p, size_t size, hipStream_t stream) { checkPMutexErrors(pthread_mutex_lock(&lock_cnmem_memory)); while (true) { cnmemStatus_t status = cnmemMalloc(p, size, stream); if (status == CNMEM_STATUS_SUCCESS) { break; } else if (status == CNMEM_STATUS_OUT_OF_MEMORY) { // std::cout << "locked cnmem malloc, waiting for memory\n"; checkPCondErrors(pthread_cond_wait(&cond_cnmem_available, &lock_cnmem_memory)); } } checkPMutexErrors(pthread_mutex_unlock(&lock_cnmem_memory)); } void NeuralNet::lockedcnmemFree(void *p, hipStream_t stream) { checkPMutexErrors(pthread_mutex_lock(&lock_cnmem_memory)); checkCNMEM(cnmemFree(p, stream)); checkPCondErrors(pthread_cond_broadcast(&cond_cnmem_available)); checkPMutexErrors(pthread_mutex_unlock(&lock_cnmem_memory)); } void NeuralNet::threadFreeLayerInputFunction(int layer_num) { checkCudaErrors(hipEventSynchronize(event_offload_done[layer_num])); lockedcnmemFree(layer_input[layer_num], NULL); checkSemaphoreErrors(sem_post(&sem_sync_offload[layer_num])); // space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[layer_num] * data_type_size); } void *NeuralNet::threadFreeLayerInputHelper(void *arg) { PtrIndex *ptr_index = static_cast<PtrIndex *>(arg); NeuralNet *net = static_cast<NeuralNet *>(ptr_index->ptr); int index = ptr_index->index; net->threadFreeLayerInputFunction(index); return NULL; } void NeuralNet::threadFlagPrefetchDone(int layer_num) { checkCudaErrors(hipEventSynchronize(event_prefetch_done[layer_num])); checkSemaphoreErrors(sem_post(&sem_prefetch_done[layer_num])); } void *NeuralNet::threadFlagPrefetchDoneHelper(void *arg) { PtrIndex *ptr_index = static_cast<PtrIndex *>(arg); NeuralNet *net = static_cast<NeuralNet *>(ptr_index->ptr); int index = ptr_index->index; net->threadFlagPrefetchDone(index); return NULL; } void NeuralNet::getLoss(void *X, int *y, double learning_rate, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag, bool train, int *correct_count, float *scalar_loss) { CnmemSpace space_tracker(free_bytes); // std::cout << "here\n"; // std::cout << "Free bytes: " << free_bytes << std::endl; for (int i = 0; i < num_layers; i++) prefetched[i] = false; checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size); checkCudaErrors(hipMemcpy(layer_input[0], X, batch_size * input_channels * input_h * input_w * data_type_size, hipMemcpyHostToDevice)); if (train == true) { checkCudaErrors(hipMemcpy(this->y, y, batch_size * data_type_size, hipMemcpyHostToDevice)); } float alpha = 1.0, beta = 0.0; float Salpha = 1.0, Sbeta = 0.0; double Dalpha = 1.0, Dbeta = 0.0; // forward propagate for (int i = 0; i < num_layers; i++) { if (train == false && i == num_layers - 1) break; // ---------------------- vDNN start ---------------------- size_t cur_workspace_size; void *cur_workspace; // offload if required if (i > 0 && to_offload[i] && train == true) { checkCudaErrors(hipMemcpyAsync(h_layer_input[i], layer_input[i], layer_input_size[i] * data_type_size, hipMemcpyDeviceToHost, stream_memory)); checkCudaErrors(hipEventRecord(event_offload_done[i], stream_memory)); } lockedcnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // ---------------------- vDNN end ------------------------ // std::cout << "here" << i << std::endl; if (layer_type[i] == CONV) { // std::cout << "conv\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; lockedcnmemMalloc(&cur_workspace, cur_workspace_size, NULL); // computation checkCUDNN(cudnnConvolutionForward(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->filter_desc, cur_params->W, cur_params->conv_desc, cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta, cur_params->output_tensor, layer_input[i + 1])); checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha, cur_params->bias_desc, cur_params->b, &alpha, cur_params->output_tensor, layer_input[i + 1])); // if activation required if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); // std::cout << "Free bytes: " << free_bytes << std::endl; } else if (layer_type[i] == FULLY_CONNECTED) { // std::cout << "FC\n"; FCLayerParams *cur_params = (FCLayerParams *)params[i]; // std::cout << "FChere" << i << std::endl; if (data_type == CUDNN_DATA_FLOAT) { checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, 1, &Salpha, (float *)cur_params->b, cur_params->C_out, (float *)one_vec, 1, &Salpha, (float *)layer_input[i + 1], cur_params->C_out)); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, 1, &Dalpha, (double *)cur_params->b, cur_params->C_out, (double *)one_vec, 1, &Dalpha, (double *)layer_input[i + 1], cur_params->C_out)); } if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } // std::cout << "FChere" << i << std::endl; } else if (layer_type[i] == DROPOUT) { // std::cout << "Dropout\n"; DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutForward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { // std::cout << "Batchnorm\n"; BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (train == true) { checkCUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle, cur_params->mode, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale, cur_params->bias, cur_params->factor, cur_params->running_mean, cur_params->running_variance, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); } else { checkCUDNN(cudnnBatchNormalizationForwardInference(cudnn_handle, cur_params->mode, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale, cur_params->bias, cur_params->running_mean, cur_params->running_variance, cur_params->epsilon)); } } else if (layer_type[i] == POOLING) { // std::cout << "Pooling\n"; PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingForward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->output_tensor, layer_input[i + 1])); } else if (layer_type[i] == ACTV) { // std::cout << "Actv\n"; std::cout << "Panic!! ACTV wrong place\n"; exit(0); ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } else if (layer_type[i] == SOFTMAX) { // std::cout << "Softmax\n"; std::cout << "Panic!! SOFTMAX wrong place\n"; exit(0); if (train == true) { SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } } // ---------------------- vDNN start ---------------------- // synchronization // checkCudaErrors(hipDeviceSynchronize()); // if next layer is ACTV or SOFTMAX, complete that and come to synchronization // the case in above if for ACTV and SOFTMAX never occurs if (layer_type[i + 1] == SOFTMAX) { i++; if (train == true) { layer_input[i + 1] = layer_input[i]; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } i--; } // struct timespec start_time, end_time; checkCudaErrors(hipStreamSynchronize(stream_compute)); if (train && to_offload[i]){ checkPThreadErrors(pthread_create(&thread_free_layer_input[i], NULL, NeuralNet::threadFreeLayerInputHelper, (void *)(&(layer_num[i])))); checkPThreadErrors(pthread_detach(thread_free_layer_input[i])); } // if (train) // clock_gettime(CLOCK_MONOTONIC, &start_time); // checkCudaErrors(hipStreamSynchronize(stream_memory)); // if (train) { // clock_gettime(CLOCK_MONOTONIC, &end_time); // float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; // fwd_vdnn_lag.push_back(lag); // } if (layer_type[i] == CONV) { lockedcnmemFree(cur_workspace, NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); } if (to_offload[i] && train == true) { // lockedcnmemFree(layer_input[i], NULL); // space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } if (train == false) { lockedcnmemFree(layer_input[i], NULL); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) { i = i + 1; } // ---------------------- vDNN end ------------------------ } // std::cout << "here" << std::endl; if (train == false) { compareOutputCorrect(correct_count, y); checkCNMEM(cnmemFree(layer_input[num_layers - 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[num_layers - 1] * data_type_size); return; } struct timespec start_time, end_time; clock_gettime(CLOCK_MONOTONIC, &start_time); for (int i = 0; i < num_layers; i++) { if (to_offload[i]) checkSemaphoreErrors(sem_wait(&sem_sync_offload[i])); } clock_gettime(CLOCK_MONOTONIC, &end_time); float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; fwd_vdnn_lag.push_back(lag); *scalar_loss = computeLoss(); // ---------------------- vDNN start ---------------------- checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // ---------------------- vDNN end ------------------------ if (layer_type[num_layers - 1] == SOFTMAX) { // SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers - 1]; if (data_type == CUDNN_DATA_FLOAT) { checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(float))); hipLaunchKernelGGL(( softmaxLossBackProp<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, this->y, (float *)layer_input[num_layers], (float *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(double))); hipLaunchKernelGGL(( softmaxLossBackProp<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, this->y, (double *)layer_input[num_layers], (double *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); } } for (int i = num_layers - 1; i >= 0; i--) { // ---------------------- vDNN start ---------------------- int cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size; void *cur_workspace; struct timespec start_time, end_time; clock_gettime(CLOCK_MONOTONIC, &start_time); if (to_offload[i]) checkSemaphoreErrors(sem_wait(&sem_prefetch_done[i])); clock_gettime(CLOCK_MONOTONIC, &end_time); float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag); // { // int n; // std::cout << "waiting..\n"; // std::cin >> n; // } if (i > 0) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) { dlayer_input[i] = dlayer_input[i + 1]; } else { int layer_to_prefetch = findPrefetchLayer(i); if (layer_to_prefetch != -1) { checkCNMEM(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; if (layer_to_prefetch != 0) { checkCudaErrors(hipMemcpyAsync(layer_input[layer_to_prefetch], h_layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, hipMemcpyHostToDevice, stream_memory)); } else { // std::cout << "transfer here\n"; checkCudaErrors(hipMemcpyAsync(layer_input[layer_to_prefetch], X, layer_input_size[layer_to_prefetch] * data_type_size, hipMemcpyHostToDevice, stream_memory)); // std::cout << "transfer here\n"; } checkCudaErrors(hipEventRecord(event_prefetch_done[layer_to_prefetch], stream_memory)); checkPThreadErrors(pthread_create(&thread_flag_prefetch_done[layer_to_prefetch], NULL, NeuralNet::threadFlagPrefetchDoneHelper, (void *)(&(layer_num[layer_to_prefetch])))); checkPThreadErrors(pthread_detach(thread_flag_prefetch_done[layer_to_prefetch])); } checkCNMEM(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size); } // std::cout << "Free bytes: " << free_bytes << std::endl; } // ---------------------- vDNN end ------------------------ if (layer_type[i] == CONV) { // std::cout << "here\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } // allocate space for derivative if (!pre_alloc_conv_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; // std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handle, &alpha, cur_params->output_tensor, dlayer_input[i + 1], &beta, cur_params->bias_desc, cur_params->db)); // std::cout << "neural_net: backward conv i:" << i << std::endl; checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_filter_algo, cur_workspace, cur_workspace_size, &beta, cur_params->filter_desc, cur_params->dW)); if (i > 0) checkCUDNN(cudnnConvolutionBackwardData(cudnn_handle, &alpha, cur_params->filter_desc, cur_params->W, cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_data_algo, cur_workspace, cur_workspace_size, &beta, cur_params->input_tensor, dlayer_input[i])); space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // std::cout << "here\n"; cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } if (!pre_alloc_fc_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); } if (data_type == CUDNN_DATA_FLOAT) { // bias backward checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, 1, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)one_vec, batch_size, &Sbeta, (float *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)dlayer_input[i + 1], cur_params->C_out, &Sbeta, (float *)dlayer_input[i], cur_params->C_in)); } else if (data_type == CUDNN_DATA_DOUBLE) { // bias backward checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, 1, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)one_vec, batch_size, &Dbeta, (double *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)dlayer_input[i + 1], cur_params->C_out, &Dbeta, (double *)dlayer_input[i], cur_params->C_in)); } cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == DROPOUT) { DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutBackward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); } checkCUDNN(cudnnBatchNormalizationBackward(cudnn_handle, cur_params->mode, &alpha, &beta, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->sbmv_desc, cur_params->scale, cur_params->dscale, cur_params->dbias, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == POOLING) { PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); } else if (layer_type[i] == ACTV) { ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); continue; } else if (layer_type[i] == SOFTMAX) { // std::cout << "compute here\n"; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxBackward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], &beta, cur_params->input_tensor, dlayer_input[i])); // std::cout << "compute here\n"; continue; } // ---------------------- vDNN start ---------------------- // checkCudaErrors(hipDeviceSynchronize()); // struct timespec start_time, end_time; checkCudaErrors(hipStreamSynchronize(stream_compute)); // if (train) // clock_gettime(CLOCK_MONOTONIC, &start_time); // checkCudaErrors(hipStreamSynchronize(stream_memory)); // if (train) { // clock_gettime(CLOCK_MONOTONIC, &end_time); // float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; // bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag); // } if (layer_type[i] == CONV) { checkCNMEM(cnmemFree(cur_workspace, NULL)); space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); if (!pre_alloc_conv_derivative) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); } } else if (layer_type[i] == FULLY_CONNECTED) { if (!pre_alloc_fc_derivative) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); } } else if (layer_type[i] == BATCHNORM) { if (train == true and !pre_alloc_batch_norm_derivative) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); } } checkCNMEM(cnmemFree(layer_input[i + 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); if (i == 0) { checkCNMEM(cnmemFree(layer_input[i], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } // ---------------------- vDNN end ------------------------ } if (space_tracker.getConsumed() != 0) { // std::cout << "Panic!! Space not updated properly\n"; } // exit(0); } int NeuralNet::findPrefetchLayer(int cur_layer) { for (int i = cur_layer - 1; i >= 0; i--) { if (to_offload[i] && !prefetched[i]) { prefetched[i] = true; return i; } else if (layer_type[i] == CONV) { return -1; } } return -1; }
a913724c8038a96c90de1a35b35c21bb42878ebe.cu
#include "neural_net.h" #include <time.h> template <typename T> __global__ void softmaxLossBackProp(int *y, T *SO, T *dSO, int batch_size, int output_size, float eps) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; int cur_class = static_cast<int>(y[i]); dSO[i * output_size + cur_class] = -1 / (SO[i * output_size + cur_class] * batch_size + eps); } template <typename T> __global__ void computeSoftmaxLoss(T *O, int *y, float *loss, int batch_size, int num_classes, float eps) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; loss[i] = -logf(O[i * num_classes + y[i]] + eps); } template <typename T> __global__ void inferClass(T *O, int *pred_y, int batch_size, int num_classes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; T max = O[i * num_classes]; int index = 0; for (int j = 1; j < num_classes; j++) { if (O[i * num_classes + j] > max) { max = O[i * num_classes + j]; index = j; } } pred_y[i] = index; } float NeuralNet::computeLoss() { if (layer_type[num_layers - 1] == SOFTMAX) { if (data_type == CUDNN_DATA_FLOAT) computeSoftmaxLoss<float><<<ceil(1.0 * batch_size / BW), BW>>>((float *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps); else if (data_type == CUDNN_DATA_DOUBLE) computeSoftmaxLoss<double><<<ceil(1.0 * batch_size / BW), BW>>>((double *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps); } checkCudaErrors(cudaMemcpy(h_loss, loss, batch_size * sizeof(float), cudaMemcpyDeviceToHost)); float total_loss = 0.0; for (int i = 0; i < batch_size; i++) total_loss += h_loss[i]; return total_loss / batch_size; } void NeuralNet::compareOutputCorrect(int *correct_count, int *y) { *correct_count = 0; if (data_type == CUDNN_DATA_FLOAT) { float *typecast_O = (float *)layer_input[num_layers - 1]; inferClass<float><<<ceil(1.0 * batch_size / BW), BW>>>(typecast_O, pred_y, batch_size, num_classes); checkCudaErrors(cudaMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) { if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1; } } else if (data_type == CUDNN_DATA_DOUBLE) { double *typecast_O = (double *)layer_input[num_layers - 1]; inferClass<double><<<ceil(1.0 * batch_size / BW), BW>>>(typecast_O, pred_y, batch_size, num_classes); checkCudaErrors(cudaMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) { if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1; } } } NeuralNet::NeuralNet(std::vector<LayerSpecifier> &layers, DataType data_type, int batch_size, TensorFormat tensor_format, long long dropout_seed, float softmax_eps, float init_std_dev, vDNNType vdnn_type, vDNNConvAlgo vdnn_conv_algo, UpdateRule update_rule) { // ---------------------- vDNN start ---------------------- checkCudaErrors(cudaStreamCreate(&stream_compute)); checkCudaErrors(cudaStreamCreate(&stream_memory)); this->vdnn_type = vdnn_type; this->vdnn_conv_algo = vdnn_conv_algo; // ---------------------- vDNN end ------------------------ // create handle checkCUDNN(cudnnCreate(&cudnn_handle)); checkCUDNN(cudnnSetStream(cudnn_handle, stream_compute)); checkCUBLAS(cublasCreate(&cublas_handle)); checkCUBLAS(cublasSetStream(cublas_handle, stream_compute)); checkCURAND(curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT)); checkCURAND(curandSetStream(curand_gen, stream_compute)); checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes)); size_t init_free_bytes = free_bytes; std::cout << "Free bytes at start: " << free_bytes << std::endl; pre_alloc_conv_derivative = false; pre_alloc_fc_derivative = false; pre_alloc_batch_norm_derivative = true; if (vdnn_type == vDNN_NONE) { pre_alloc_conv_derivative = true; pre_alloc_fc_derivative = true; pre_alloc_batch_norm_derivative = true; } if (data_type == DATA_FLOAT) { this->data_type = CUDNN_DATA_FLOAT; data_type_size = sizeof(float); } else if (data_type == DATA_DOUBLE) { this->data_type = CUDNN_DATA_DOUBLE; data_type_size = sizeof(double); } if (tensor_format == TENSOR_NCHW) this->tensor_format = CUDNN_TENSOR_NCHW; else if (tensor_format == TENSOR_NHWC) this->tensor_format = CUDNN_TENSOR_NHWC; this->batch_size = batch_size; this->softmax_eps = softmax_eps; this->init_std_dev = init_std_dev; num_layers = layers.size(); // allocation of space for input to each layer layer_input = (void **)malloc((num_layers + 1) * sizeof(void *)); layer_input_size = (int *)malloc((num_layers + 1) * sizeof(int)); dlayer_input = (void **)malloc((num_layers + 1) * sizeof(void *)); params = (void **)malloc(num_layers * sizeof(void *)); LayerDimension prev_output_size; LayerDimension current_output_size; for (int i = 0; i < num_layers; i++) { layer_type.push_back(layers[i].type); if (layers[i].type == CONV) { ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params; params[i] = malloc(sizeof(ConvLayerParams)); ((ConvLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format, data_type_size, current_output_size, update_rule); } else if (layers[i].type == FULLY_CONNECTED) { FCDescriptor *user_params = (FCDescriptor *)layers[i].params; params[i] = malloc(sizeof(FCLayerParams)); ((FCLayerParams *)params[i])->initializeValues(user_params, batch_size, this->tensor_format, this->data_type, current_output_size, update_rule); } else if (layers[i].type == DROPOUT) { DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params; params[i] = malloc(sizeof(DropoutLayerParams)); ((DropoutLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format, current_output_size); } else if (layers[i].type == BATCHNORM) { BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params; params[i] = malloc(sizeof(BatchNormLayerParams)); ((BatchNormLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size, update_rule); } else if (layers[i].type == POOLING) { PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params; params[i] = malloc(sizeof(BatchNormLayerParams)); ((PoolingLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); } else if (layers[i].type == ACTV) { ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params; params[i] = malloc(sizeof(ActivationLayerParams)); ((ActivationLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); } else if (layers[i].type == SOFTMAX) { SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params; params[i] = malloc(sizeof(SoftmaxLayerParams)); ((SoftmaxLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); // std::cout << current_output_size.N << ' ' << current_output_size.C << current_output_size.H << current_output_size.W << std::endl; } if (i == 0) { prev_output_size = current_output_size; } // incomplete - have to check flatten and check exact dimension // else if (current_output_size.getTotalSize() != prev_output_size.getTotalSize()) { // std::cout << "Layer " << i << " output and next layer's input size mismatch\n"; // exit(0); // } } // ---------------------- vDNN start ---------------------- // allocate space in host memory for layers to be transferred h_layer_input = (void **)malloc(num_layers * sizeof(void *)); to_offload = (bool *)malloc(num_layers * sizeof(bool)); prefetched = (bool *)malloc(num_layers * sizeof(bool)); // ---------------------- vDNN end ------------------------ checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes)); std::cout << "Free bytes just before allocate space: " << free_bytes << std::endl; // allocate space for parameters // Exception BatchNorm - looks like it will take lots of space if only FC layers - space taken = size of one input for (int i = 0; i < num_layers; i++) { size_t input_size; if (layers[i].type == CONV) { ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params; ((ConvLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev, free_bytes, pre_alloc_conv_derivative); input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w; if (i == 0) { input_channels = user_params->input_channels; input_h = user_params->input_h; input_w = user_params->input_w; } } else if (layers[i].type == FULLY_CONNECTED) { FCDescriptor *user_params = (FCDescriptor *)layers[i].params; ((FCLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev, free_bytes, pre_alloc_fc_derivative); input_size = batch_size * user_params->input_channels; if (i == 0) { input_channels = user_params->input_channels; input_h = 1; input_w = 1; } } else if (layers[i].type == DROPOUT) { DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params; ((DropoutLayerParams *)params[i])->allocateSpace(free_bytes, cudnn_handle, user_params, dropout_seed); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == BATCHNORM) { BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params; ((BatchNormLayerParams *)params[i])->allocateSpace(this->data_type, data_type_size, free_bytes, pre_alloc_batch_norm_derivative); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == POOLING) { PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params; ((PoolingLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w; if (i == 0) { input_channels = user_params->input_channels; input_h = user_params->input_h; input_w = user_params->input_w; } } else if (layers[i].type == ACTV) { ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params; ((ActivationLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == SOFTMAX) { SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params; ((SoftmaxLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->channels * user_params->h * user_params->w; // assuming this is last layer, allocate for next layer as well // checkCudaErrors(cudaMalloc(&layer_input[i + 1], input_size * data_type_size)); // checkCudaErrors(cudaMalloc(&dlayer_input[i + 1], input_size * data_type_size)); layer_input_size[i + 1] = input_size; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } if (i == num_layers - 1) { num_classes = user_params->channels; } } // do not allocate memory initially // checkCudaErrors(cudaMalloc(&layer_input[i], input_size * data_type_size)); // checkCudaErrors(cudaMalloc(&dlayer_input[i], input_size * data_type_size)); // ---------------------- vDNN start ---------------------- layer_input_size[i] = input_size; // ---------------------- vDNN end ------------------------ } checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes)); std::cout << "Free bytes just after allocate space: " << free_bytes << std::endl; // very small - could be allocated initially itself checkCudaErrors(cudaMalloc((void **)&y, batch_size * sizeof(int))); checkCudaErrors(cudaMalloc((void **)&pred_y, batch_size * sizeof(int))); checkCudaErrors(cudaMalloc((void **)&loss, batch_size * sizeof(float))); checkCudaErrors(cudaMalloc(&one_vec, batch_size * data_type_size)); if (this->data_type == CUDNN_DATA_FLOAT) fillValue<float><<<ceil(1.0 * batch_size / BW), BW>>>((float *)one_vec, batch_size, 1); else fillValue<double><<<ceil(1.0 * batch_size / BW), BW>>>((double *)one_vec, batch_size, 1); checkCudaErrors(cudaMallocHost((void **)&h_loss, batch_size * sizeof(float))); checkCudaErrors(cudaMallocHost((void **)&h_pred_y, batch_size * sizeof(int))); // do not allocate workspace initially // allocate space for workspace and also keep track of algo // size_t cur_workspace_size; // workspace_size = 0; // for (int i = 0; i < num_layers; i++) { // if (layers[i].type == CONV) { // ((ConvLayerParams *)params[i])->getWorkspaceSize(cur_workspace_size, free_bytes); // if (cur_workspace_size > workspace_size) // workspace_size = cur_workspace_size; // } // } // checkCudaErrors(cudaMalloc(&workspace, workspace_size)); // free_bytes = free_bytes - workspace_size; checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes)); // leave 600 MB and use the rest std::cout << "Free bytes: " << free_bytes << std::endl; free_bytes -= 1024 * 1024 * 600; // ---------------------- vDNN start ---------------------- size_t exp_max_consume, max_consume; vDNNOptimize(exp_max_consume, max_consume); std::cout << "actual_max_consume: " << max_consume << std::endl; std::cout << "exp_max_consume: " << exp_max_consume << std::endl; std::cout << "diff_max_consume(MB): " << (max_consume - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl; std::cout << "exp_free_bytes(MB): " << (free_bytes + 1024 * 1024 * 600 - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl; std::cout << "exp_total_consume(MB): " << (init_free_bytes - (free_bytes + 600 * 1024 * 1024 - exp_max_consume)) / (1.0 * 1024 * 1024) << std::endl; std::cout << "actual_total_consume(MB): " << (init_free_bytes - (free_bytes + 600 * 1024 * 1024 - max_consume)) / (1.0 * 1024 * 1024) << std::endl; // ---------------------- vDNN end ------------------------ // ---------------------- vDNN start ---------------------- free_bytes = max_consume; cnmemDevice_t cnmem_device; size_t cnmem_stream_memory_size = free_bytes; cnmem_device.device = 0; cnmem_device.size = cnmem_stream_memory_size; cnmem_device.numStreams = 0; cnmem_device.streams = NULL; cnmem_device.streamSizes = NULL; // do not allow call to cudaMalloc checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW)); // ---------------------- vDNN end ------------------------ // ---------------------- vDNN start ---------------------- for (int i = 0; i < num_layers; i++) { std::cerr << "to_offload[i] " << to_offload[i] << std::endl; } for (int i = 0; i < num_layers; i++) { // allocate pinned memory in host if (to_offload[i]) checkCudaErrors(cudaMallocHost(&h_layer_input[i], layer_input_size[i] * data_type_size)); } // ---------------------- vDNN end ------------------------ checkCudaErrors(cudaDeviceSynchronize()); size_t temp_free_bytes; checkCudaErrors(cudaMemGetInfo(&temp_free_bytes, &total_bytes)); std::cout << "Free bytes just before end of NeuralNet: " << temp_free_bytes << std::endl; // { // int n; // std::cout << "waiting..\n"; // std::cin >> n; // } // ---------------------- vDNN ext start ------------------------ event_offload_done = (cudaEvent_t *)malloc(num_layers * sizeof(cudaEvent_t)); thread_free_layer_input = (pthread_t *)malloc(num_layers * sizeof(pthread_t)); sem_sync_offload = (sem_t *)malloc(num_layers * sizeof(sem_t)); event_prefetch_done = (cudaEvent_t *)malloc(num_layers * sizeof(cudaEvent_t)); thread_flag_prefetch_done = (pthread_t *)malloc(num_layers * sizeof(pthread_t)); sem_prefetch_done = (sem_t *)malloc(num_layers * sizeof(sem_t)); for (int i = 0; i < num_layers; i++) { if (to_offload[i]) { checkCudaErrors(cudaEventCreate(&event_offload_done[i])); checkSemaphoreErrors(sem_init(&sem_sync_offload[i], 0, 0)); checkCudaErrors(cudaEventCreate(&event_prefetch_done[i])); checkSemaphoreErrors(sem_init(&sem_prefetch_done[i], 0, 0)); } } checkPMutexErrors(pthread_mutex_init(&lock_cnmem_memory, NULL)); checkPCondErrors(pthread_cond_init(&cond_cnmem_available, NULL)); layer_num = (PtrIndex *)malloc(num_layers * sizeof(PtrIndex)); for (int i = 0; i < num_layers; i++) { layer_num[i].ptr = this; layer_num[i].index = i; } // ---------------------- vDNN ext end ------------------------ } bool NeuralNet::simulateNeuralNetworkMemory(vDNNConvAlgoPref algo_pref, bool hard, size_t &exp_max_consume, size_t &max_consume) { CnmemSpace space_tracker(free_bytes); max_consume = 0; // forward pass // allocate space for 1st input std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating input(MB): " << space_tracker.getConsumed() << std::endl; std::cerr << "Forward pass" << std::endl; for (int i = 0; i < num_layers; i++) { if (layer_type[i] == SOFTMAX) break; std::cerr << "Processing layer " << i << std::endl; std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size); std::cerr << "Used space after output allocation(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; long cur_workspace_size = cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::FWD, algo_pref, hard); space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); space_tracker.updateMaxConsume(max_consume); if (cur_workspace_size == -1 or !space_tracker.isAvailable()) return false; std::cerr << "Used space after workspace allocation(MB): " << space_tracker.getConsumed() << std::endl; // current layer computation over, deallocate workspace space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); std::cerr << "Used space after workspace deallocation(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; // deallocate layer input if (to_offload[i]) { std::cerr << "deallocating input to " << i << std::endl; space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl; } } std::cerr << "Backward pass" << std::endl; if (batch_size * num_classes * data_type_size != layer_input_size[num_layers] * data_type_size) { std::cout << "Panic!! Using wrong size\n"; exit(0); } // backward pass space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size); std::cerr << "Used space after allocating final derivative(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); // std::cerr << "max_consume: " << max_consume << std::endl; for (int i = num_layers - 1; i >= 0; i--) { // allocate space for previous layer derivative std::cerr << "Processing layer " << i << std::endl; std::cerr << "Used space initial(MB): " << space_tracker.getConsumed() << std::endl; if (i > 0) { if (layer_type[i] == SOFTMAX) continue; else { space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size); std::cerr << "Used space after allocating prev. derivative(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); } // std::cerr << "max_consume: " << max_consume << std::endl; } int layer_to_prefetch = findPrefetchLayer(i); // if layer to be prefetched, allocate space for that layer if (layer_to_prefetch != -1) { std::cerr << "Prefetch layer " << layer_to_prefetch << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size); std::cerr << "Used space after allocating prefetch(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); } if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; long cur_filter_workspace_size = cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_FILTER, algo_pref, hard); long cur_data_workspace_size = 0; if (i > 0) cur_data_workspace_size = cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_DATA, algo_pref, hard); long cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size :cur_data_workspace_size; space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); std::cerr << "Used space after allocating workspace(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); if (!pre_alloc_conv_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } // std::cerr << "max_consume: " << max_consume << std::endl; if (cur_filter_workspace_size == -1 or cur_data_workspace_size == -1 or !space_tracker.isAvailable()) return false; // current layer computation over, deallocate workspace space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); std::cerr << "Used space after deallocating workspace(MB): " << space_tracker.getConsumed() << std::endl; if (!pre_alloc_conv_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (!pre_alloc_fc_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; if (!pre_alloc_fc_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; if (!pre_alloc_batch_norm_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } if (!space_tracker.isAvailable()) return false; // deallocate layer output and derivative space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); std::cerr << "Used space after deallocating output, derivative(MB): " << space_tracker.getConsumed() << std::endl; // if 1st layer, deallocate input layer also if (i == 0) { space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl; } } if (space_tracker.getConsumed() > 0) std::cerr << "Panic!! more free bytes\n"; if (space_tracker.getConsumed() != 0) std::cerr << "Panic!! bytes not freed properly\n"; // return true; exp_max_consume = max_consume; cnmemDevice_t cnmem_device; cnmem_device.device = 0; cnmem_device.size = max_consume; cnmem_device.numStreams = 0; cnmem_device.streams = NULL; cnmem_device.streamSizes = NULL; checkCNMEM(cnmemInit(1, &cnmem_device, 0)); // check with cnmem once bool ret_val = simulateCNMEMMemory(max_consume); checkCNMEM(cnmemFinalize()); return ret_val; } bool NeuralNet::simulateCNMEMMemory(size_t &max_consume) { resetPrefetched(); checkCNMEMRet(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL)); // forward propagate for (int i = 0; i < num_layers; i++) { size_t cur_workspace_size; void *cur_workspace; checkCNMEMRet(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL)); if (layer_type[i] == CONV) { // std::cout << "conv\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; checkCNMEMRet(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); } if (layer_type[i] == CONV) { checkCNMEMRet(cnmemFree(cur_workspace, NULL)); } if (to_offload[i]) { checkCNMEMRet(cnmemFree(layer_input[i], NULL)); } if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) { i = i + 1; } } checkCNMEMRet(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL)); for (int i = num_layers - 1; i >= 0; i--) { // ---------------------- vDNN start ---------------------- int cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size; void *cur_workspace; if (i > 0) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) { dlayer_input[i] = dlayer_input[i + 1]; } else { int layer_to_prefetch = findPrefetchLayer(i); if (layer_to_prefetch != -1) { checkCNMEMRet(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL)); } checkCNMEMRet(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL)); } } if (layer_type[i] == CONV) { // std::cout << "here\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; // allocate space for derivative if (!pre_alloc_conv_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCNMEMRet(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (!pre_alloc_fc_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } } else if (layer_type[i] == SOFTMAX) { // std::cout << "compute here\n"; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; continue; } if (layer_type[i] == CONV) { checkCNMEMRet(cnmemFree(cur_workspace, NULL)); if (!pre_alloc_conv_derivative) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } else if (layer_type[i] == FULLY_CONNECTED) { if (!pre_alloc_fc_derivative) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } else if (layer_type[i] == BATCHNORM) { if (!pre_alloc_batch_norm_derivative) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } checkCNMEMRet(cnmemFree(layer_input[i + 1], NULL)); checkCNMEMRet(cnmemFree(dlayer_input[i + 1], NULL)); if (i == 0) { checkCNMEMRet(cnmemFree(layer_input[i], NULL)); } } size_t temp; checkCNMEM(cnmemMemGetInfo(&temp, &max_consume, NULL)); return true; } void NeuralNet::vDNNOptimize(size_t &exp_max_consume, size_t &max_consume) { bool hard = true, soft = false; // if type is vDNN_ALL or vDNN_CONV, check if sufficient space is available if (vdnn_type == vDNN_ALL) { setOffload(OFFLOAD_ALL); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } else if (vdnn_type == vDNN_CONV) { setOffload(OFFLOAD_CONV); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } else if (vdnn_type == vDNN_NONE) { setOffload(OFFLOAD_NONE); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } if (vdnn_type == vDNN_DYN) { // check for trainability std::cerr << "vDNN_DYN\n"; setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if(!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); // check if work with fastest algo and no offload, if so, select it and return setOffload(NeuralNet::OFFLOAD_NONE); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, NO OFFLOAD\n"; return; } // check if conv offload and fastest algo works, then check if all offload and fastest algo works setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, ALL OFFLOAD\n"; return; } // optimize using greedy algo memory usage while improving performance setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) { std::cerr << "Choosing GREEDY, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) { std::cerr << "Choosing GREEDY, ALL OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing MEM_OPT, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if(simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing MEM_OPT, ALL OFFLOAD\n"; return; } } exit(0); } void NeuralNet::setOffload(NeuralNet::OffloadType offload_type) { if (offload_type == OFFLOAD_NONE) { for (int i = 0; i < num_layers; i++) to_offload[i] = false; } else if (offload_type == OFFLOAD_CONV) { for (int i = 0; i < num_layers; i++) { if (layer_type[i] == CONV) to_offload[i] = true; else to_offload[i] = false; } // set last non SOFTMAX/ACTV layer to no_offload for (int i = num_layers - 1; i >= 0; i--) { if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV) ; else { to_offload[i] = false; break; } } } else if (offload_type == OFFLOAD_ALL) { for (int i = 0; i < num_layers; i++) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) to_offload[i] = false; else to_offload[i] = true; } // set last non SOFTMAX/ACTV layer to no_offload for (int i = num_layers - 1; i >= 0; i--) { if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV) ; else { to_offload[i] = false; break; } } } } void NeuralNet::resetPrefetched() { for (int i = 0; i < num_layers; i++) prefetched[i] = false; } void NeuralNet::getLoss(void *X, int *y, double learning_rate, bool train, int *correct_count, float *loss) { std::vector<float> t1, t2; this->getLoss(X, y, learning_rate, t1, t2, train, correct_count, loss); } void NeuralNet::lockedcnmemMalloc(void **p, size_t size, cudaStream_t stream) { checkPMutexErrors(pthread_mutex_lock(&lock_cnmem_memory)); while (true) { cnmemStatus_t status = cnmemMalloc(p, size, stream); if (status == CNMEM_STATUS_SUCCESS) { break; } else if (status == CNMEM_STATUS_OUT_OF_MEMORY) { // std::cout << "locked cnmem malloc, waiting for memory\n"; checkPCondErrors(pthread_cond_wait(&cond_cnmem_available, &lock_cnmem_memory)); } } checkPMutexErrors(pthread_mutex_unlock(&lock_cnmem_memory)); } void NeuralNet::lockedcnmemFree(void *p, cudaStream_t stream) { checkPMutexErrors(pthread_mutex_lock(&lock_cnmem_memory)); checkCNMEM(cnmemFree(p, stream)); checkPCondErrors(pthread_cond_broadcast(&cond_cnmem_available)); checkPMutexErrors(pthread_mutex_unlock(&lock_cnmem_memory)); } void NeuralNet::threadFreeLayerInputFunction(int layer_num) { checkCudaErrors(cudaEventSynchronize(event_offload_done[layer_num])); lockedcnmemFree(layer_input[layer_num], NULL); checkSemaphoreErrors(sem_post(&sem_sync_offload[layer_num])); // space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[layer_num] * data_type_size); } void *NeuralNet::threadFreeLayerInputHelper(void *arg) { PtrIndex *ptr_index = static_cast<PtrIndex *>(arg); NeuralNet *net = static_cast<NeuralNet *>(ptr_index->ptr); int index = ptr_index->index; net->threadFreeLayerInputFunction(index); return NULL; } void NeuralNet::threadFlagPrefetchDone(int layer_num) { checkCudaErrors(cudaEventSynchronize(event_prefetch_done[layer_num])); checkSemaphoreErrors(sem_post(&sem_prefetch_done[layer_num])); } void *NeuralNet::threadFlagPrefetchDoneHelper(void *arg) { PtrIndex *ptr_index = static_cast<PtrIndex *>(arg); NeuralNet *net = static_cast<NeuralNet *>(ptr_index->ptr); int index = ptr_index->index; net->threadFlagPrefetchDone(index); return NULL; } void NeuralNet::getLoss(void *X, int *y, double learning_rate, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag, bool train, int *correct_count, float *scalar_loss) { CnmemSpace space_tracker(free_bytes); // std::cout << "here\n"; // std::cout << "Free bytes: " << free_bytes << std::endl; for (int i = 0; i < num_layers; i++) prefetched[i] = false; checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size); checkCudaErrors(cudaMemcpy(layer_input[0], X, batch_size * input_channels * input_h * input_w * data_type_size, cudaMemcpyHostToDevice)); if (train == true) { checkCudaErrors(cudaMemcpy(this->y, y, batch_size * data_type_size, cudaMemcpyHostToDevice)); } float alpha = 1.0, beta = 0.0; float Salpha = 1.0, Sbeta = 0.0; double Dalpha = 1.0, Dbeta = 0.0; // forward propagate for (int i = 0; i < num_layers; i++) { if (train == false && i == num_layers - 1) break; // ---------------------- vDNN start ---------------------- size_t cur_workspace_size; void *cur_workspace; // offload if required if (i > 0 && to_offload[i] && train == true) { checkCudaErrors(cudaMemcpyAsync(h_layer_input[i], layer_input[i], layer_input_size[i] * data_type_size, cudaMemcpyDeviceToHost, stream_memory)); checkCudaErrors(cudaEventRecord(event_offload_done[i], stream_memory)); } lockedcnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // ---------------------- vDNN end ------------------------ // std::cout << "here" << i << std::endl; if (layer_type[i] == CONV) { // std::cout << "conv\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; lockedcnmemMalloc(&cur_workspace, cur_workspace_size, NULL); // computation checkCUDNN(cudnnConvolutionForward(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->filter_desc, cur_params->W, cur_params->conv_desc, cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta, cur_params->output_tensor, layer_input[i + 1])); checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha, cur_params->bias_desc, cur_params->b, &alpha, cur_params->output_tensor, layer_input[i + 1])); // if activation required if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); // std::cout << "Free bytes: " << free_bytes << std::endl; } else if (layer_type[i] == FULLY_CONNECTED) { // std::cout << "FC\n"; FCLayerParams *cur_params = (FCLayerParams *)params[i]; // std::cout << "FChere" << i << std::endl; if (data_type == CUDNN_DATA_FLOAT) { checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, 1, &Salpha, (float *)cur_params->b, cur_params->C_out, (float *)one_vec, 1, &Salpha, (float *)layer_input[i + 1], cur_params->C_out)); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, 1, &Dalpha, (double *)cur_params->b, cur_params->C_out, (double *)one_vec, 1, &Dalpha, (double *)layer_input[i + 1], cur_params->C_out)); } if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } // std::cout << "FChere" << i << std::endl; } else if (layer_type[i] == DROPOUT) { // std::cout << "Dropout\n"; DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutForward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { // std::cout << "Batchnorm\n"; BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (train == true) { checkCUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle, cur_params->mode, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale, cur_params->bias, cur_params->factor, cur_params->running_mean, cur_params->running_variance, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); } else { checkCUDNN(cudnnBatchNormalizationForwardInference(cudnn_handle, cur_params->mode, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale, cur_params->bias, cur_params->running_mean, cur_params->running_variance, cur_params->epsilon)); } } else if (layer_type[i] == POOLING) { // std::cout << "Pooling\n"; PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingForward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->output_tensor, layer_input[i + 1])); } else if (layer_type[i] == ACTV) { // std::cout << "Actv\n"; std::cout << "Panic!! ACTV wrong place\n"; exit(0); ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } else if (layer_type[i] == SOFTMAX) { // std::cout << "Softmax\n"; std::cout << "Panic!! SOFTMAX wrong place\n"; exit(0); if (train == true) { SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } } // ---------------------- vDNN start ---------------------- // synchronization // checkCudaErrors(cudaDeviceSynchronize()); // if next layer is ACTV or SOFTMAX, complete that and come to synchronization // the case in above if for ACTV and SOFTMAX never occurs if (layer_type[i + 1] == SOFTMAX) { i++; if (train == true) { layer_input[i + 1] = layer_input[i]; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } i--; } // struct timespec start_time, end_time; checkCudaErrors(cudaStreamSynchronize(stream_compute)); if (train && to_offload[i]){ checkPThreadErrors(pthread_create(&thread_free_layer_input[i], NULL, NeuralNet::threadFreeLayerInputHelper, (void *)(&(layer_num[i])))); checkPThreadErrors(pthread_detach(thread_free_layer_input[i])); } // if (train) // clock_gettime(CLOCK_MONOTONIC, &start_time); // checkCudaErrors(cudaStreamSynchronize(stream_memory)); // if (train) { // clock_gettime(CLOCK_MONOTONIC, &end_time); // float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; // fwd_vdnn_lag.push_back(lag); // } if (layer_type[i] == CONV) { lockedcnmemFree(cur_workspace, NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); } if (to_offload[i] && train == true) { // lockedcnmemFree(layer_input[i], NULL); // space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } if (train == false) { lockedcnmemFree(layer_input[i], NULL); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) { i = i + 1; } // ---------------------- vDNN end ------------------------ } // std::cout << "here" << std::endl; if (train == false) { compareOutputCorrect(correct_count, y); checkCNMEM(cnmemFree(layer_input[num_layers - 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[num_layers - 1] * data_type_size); return; } struct timespec start_time, end_time; clock_gettime(CLOCK_MONOTONIC, &start_time); for (int i = 0; i < num_layers; i++) { if (to_offload[i]) checkSemaphoreErrors(sem_wait(&sem_sync_offload[i])); } clock_gettime(CLOCK_MONOTONIC, &end_time); float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; fwd_vdnn_lag.push_back(lag); *scalar_loss = computeLoss(); // ---------------------- vDNN start ---------------------- checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // ---------------------- vDNN end ------------------------ if (layer_type[num_layers - 1] == SOFTMAX) { // SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers - 1]; if (data_type == CUDNN_DATA_FLOAT) { checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(float))); softmaxLossBackProp<float><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (float *)layer_input[num_layers], (float *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(double))); softmaxLossBackProp<double><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (double *)layer_input[num_layers], (double *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); } } for (int i = num_layers - 1; i >= 0; i--) { // ---------------------- vDNN start ---------------------- int cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size; void *cur_workspace; struct timespec start_time, end_time; clock_gettime(CLOCK_MONOTONIC, &start_time); if (to_offload[i]) checkSemaphoreErrors(sem_wait(&sem_prefetch_done[i])); clock_gettime(CLOCK_MONOTONIC, &end_time); float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag); // { // int n; // std::cout << "waiting..\n"; // std::cin >> n; // } if (i > 0) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) { dlayer_input[i] = dlayer_input[i + 1]; } else { int layer_to_prefetch = findPrefetchLayer(i); if (layer_to_prefetch != -1) { checkCNMEM(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; if (layer_to_prefetch != 0) { checkCudaErrors(cudaMemcpyAsync(layer_input[layer_to_prefetch], h_layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, cudaMemcpyHostToDevice, stream_memory)); } else { // std::cout << "transfer here\n"; checkCudaErrors(cudaMemcpyAsync(layer_input[layer_to_prefetch], X, layer_input_size[layer_to_prefetch] * data_type_size, cudaMemcpyHostToDevice, stream_memory)); // std::cout << "transfer here\n"; } checkCudaErrors(cudaEventRecord(event_prefetch_done[layer_to_prefetch], stream_memory)); checkPThreadErrors(pthread_create(&thread_flag_prefetch_done[layer_to_prefetch], NULL, NeuralNet::threadFlagPrefetchDoneHelper, (void *)(&(layer_num[layer_to_prefetch])))); checkPThreadErrors(pthread_detach(thread_flag_prefetch_done[layer_to_prefetch])); } checkCNMEM(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size); } // std::cout << "Free bytes: " << free_bytes << std::endl; } // ---------------------- vDNN end ------------------------ if (layer_type[i] == CONV) { // std::cout << "here\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } // allocate space for derivative if (!pre_alloc_conv_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; // std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handle, &alpha, cur_params->output_tensor, dlayer_input[i + 1], &beta, cur_params->bias_desc, cur_params->db)); // std::cout << "neural_net: backward conv i:" << i << std::endl; checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_filter_algo, cur_workspace, cur_workspace_size, &beta, cur_params->filter_desc, cur_params->dW)); if (i > 0) checkCUDNN(cudnnConvolutionBackwardData(cudnn_handle, &alpha, cur_params->filter_desc, cur_params->W, cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_data_algo, cur_workspace, cur_workspace_size, &beta, cur_params->input_tensor, dlayer_input[i])); space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // std::cout << "here\n"; cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } if (!pre_alloc_fc_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); } if (data_type == CUDNN_DATA_FLOAT) { // bias backward checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, 1, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)one_vec, batch_size, &Sbeta, (float *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)dlayer_input[i + 1], cur_params->C_out, &Sbeta, (float *)dlayer_input[i], cur_params->C_in)); } else if (data_type == CUDNN_DATA_DOUBLE) { // bias backward checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, 1, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)one_vec, batch_size, &Dbeta, (double *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)dlayer_input[i + 1], cur_params->C_out, &Dbeta, (double *)dlayer_input[i], cur_params->C_in)); } cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == DROPOUT) { DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutBackward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); } checkCUDNN(cudnnBatchNormalizationBackward(cudnn_handle, cur_params->mode, &alpha, &beta, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->sbmv_desc, cur_params->scale, cur_params->dscale, cur_params->dbias, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == POOLING) { PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); } else if (layer_type[i] == ACTV) { ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); continue; } else if (layer_type[i] == SOFTMAX) { // std::cout << "compute here\n"; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxBackward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], &beta, cur_params->input_tensor, dlayer_input[i])); // std::cout << "compute here\n"; continue; } // ---------------------- vDNN start ---------------------- // checkCudaErrors(cudaDeviceSynchronize()); // struct timespec start_time, end_time; checkCudaErrors(cudaStreamSynchronize(stream_compute)); // if (train) // clock_gettime(CLOCK_MONOTONIC, &start_time); // checkCudaErrors(cudaStreamSynchronize(stream_memory)); // if (train) { // clock_gettime(CLOCK_MONOTONIC, &end_time); // float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; // bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag); // } if (layer_type[i] == CONV) { checkCNMEM(cnmemFree(cur_workspace, NULL)); space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); if (!pre_alloc_conv_derivative) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); } } else if (layer_type[i] == FULLY_CONNECTED) { if (!pre_alloc_fc_derivative) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); } } else if (layer_type[i] == BATCHNORM) { if (train == true and !pre_alloc_batch_norm_derivative) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); } } checkCNMEM(cnmemFree(layer_input[i + 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); if (i == 0) { checkCNMEM(cnmemFree(layer_input[i], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } // ---------------------- vDNN end ------------------------ } if (space_tracker.getConsumed() != 0) { // std::cout << "Panic!! Space not updated properly\n"; } // exit(0); } int NeuralNet::findPrefetchLayer(int cur_layer) { for (int i = cur_layer - 1; i >= 0; i--) { if (to_offload[i] && !prefetched[i]) { prefetched[i] = true; return i; } else if (layer_type[i] == CONV) { return -1; } } return -1; }
a52238518b0c0c0a410bf4e671f94c4420eb693e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorMathBlas.hip" #else #include "ATen/hip/HIPContext.h" #include <ATen/NamedTensorUtils.h> #define ERROR_ONLY_FP_TYPES(func) \ THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func); accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) { at::NoNamesGuard guard; if ( (THTensor_nDimension(self) != 1) || (THTensor_nDimension(src) != 1) ) { THError("1D tensors expected, got %dD, %dD tensors", THTensor_nDimension(self), THTensor_nDimension(src)); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THArgCheck(THCTensor_(nElement)(state, self) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); self = THCTensor_(newContiguous)(state, self); src = THCTensor_(newContiguous)(state, src); #ifdef THC_REAL_IS_FLOAT accreal result = THCudaBlas_Sdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_DOUBLE) accreal result = THCudaBlas_Ddot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_HALF) accreal result = THCudaBlas_Hdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #endif THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; #else ERROR_ONLY_FP_TYPES("dot"); return ScalarConvert<int, accreal>::to(0); #endif } static void THCTensor_(addmvImpl)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *mat, THCTensor *vec, scalar_t beta, scalar_t alpha) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); if( (mat->dim() != 2) || (THTensor_nDimension(vec) != 1) ) THError("2D tensor and 1D tensor expected, got %dD, %dD tensors", mat->dim(), THTensor_nDimension(vec)); auto vec_size = THTensor_sizeLegacyNoScalars(vec, 0); auto vec_stride = THTensor_strideLegacyNoScalars(vec, 0); if( mat->size(1) != THTensor_sizeLegacyNoScalars(vec, 0) ) THError("size mismatch"); if(t->dim() != 1) THError("size mismatch"); if(THTensor_sizeLegacyNoScalars(t, 0) != mat->size(0)) THError("size mismatch"); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if(r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } auto r_stride = THTensor_strideLegacyNoScalars(r_, 0); if(mat->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif } else if(mat->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif } else { THCTensor *cmat = THCTensor_(newContiguous)(state, mat); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif THCTensor_(free)(state, cmat); } // In hipblasSgemv, hipblasDgemv (x,0).mv(0) does not // handle beta, whereas hipblasSgemm, hipblasDgemm do for case where (x,0).mm(0,y). if (THTensor_sizeLegacyNoScalars(vec, 0) == 0 && mat->size(0) != 0) { if(THCNumerics<scalar_t>::eq(beta, ScalarConvert<int, scalar_t>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<scalar_t>::ne(beta, ScalarConvert<int, scalar_t>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } } #elif defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16) // Currently no Hgemv/SgemvEx in Cublas THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec); THCTensor_(resize2d)(state, vecAsMatrix, vec_size, 1); THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t); THCTensor_(resize2d)(state, tAsMatrix, THTensor_sizeLegacyNoScalars(tAsMatrix, 0), 1); THCTensor_(addmm)(state, r_, tAsMatrix, mat, vecAsMatrix, beta, alpha); // r_ will have answer as matrix, need to return a vector THCTensor_(resize1d)(state, r_, THTensor_sizeLegacyNoScalars(r_, 0)); THCTensor_(free)(state, vecAsMatrix); THCTensor_(free)(state, tAsMatrix); #endif #else ERROR_ONLY_FP_TYPES("addmv"); #endif } void THCTensor_(addmv)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *mat, THCTensor *vec, scalar_t beta, scalar_t alpha) { { at::NoNamesGuard guard; THCTensor_(addmvImpl)(state, r_, t, mat, vec, beta, alpha); } at::namedinference::propagate_names_for_addmv(r_, mat, vec, t); } void THCTensor_(addr)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *vec1, THCTensor *vec2, scalar_t beta, scalar_t alpha) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); if ( (THTensor_nDimension(vec1) != 1) || (THTensor_nDimension(vec2) != 1) ) { THError("1D tensors expected, got %dD, %dD tensors", THTensor_nDimension(vec1), THTensor_nDimension(vec2)); } auto vec1_size = THTensor_sizeLegacyNoScalars(vec1, 0); auto vec2_size = THTensor_sizeLegacyNoScalars(vec2, 0); auto vec1_stride = THTensor_strideLegacyNoScalars(vec1, 0); auto vec2_stride = THTensor_strideLegacyNoScalars(vec2, 0); if (t->dim() != 2) { THError("size mismatch"); } if ( (t->size(0) != vec1_size) || (t->size(1) != vec2_size) ) { THError("size mismatch"); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if (r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(THCNumerics<scalar_t>::eq(beta, ScalarConvert<int, scalar_t>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<scalar_t>::ne(beta, ScalarConvert<int, scalar_t>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } if(r_->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec1_size, vec2_size, alpha, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, r_), r_->stride(1)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec1_size, vec2_size, alpha, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, r_), r_->stride(1)); #endif } else if(r_->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, r_), r_->stride(0)); #endif } else { THCTensor *cr = THCTensor_(newClone)(state, r_); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, cr), cr->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, cr), cr->stride(0)); #endif THCTensor_(freeCopyTo)(state, cr, r_); } #elif defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16) // currently no Hger/SgerEx in Cublas. THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2); THCTensor_(resize2d)(state, vec2T, vec2_size, 1); THCTensor_(transpose)(state, vec2T, NULL, 0, 1); THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1); THCTensor_(resize2d)(state, vec1M, vec1_size, 1); THCTensor_(addmm)(state, r_, t, vec1M, vec2T, beta, alpha); THCTensor_(free)(state, vec2T); THCTensor_(free)(state, vec1M); #endif #else ERROR_ONLY_FP_TYPES("addr"); #endif } static void THCTensor_(addmmImpl)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *m1, THCTensor *m2, scalar_t beta, scalar_t alpha) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2)); char transpose_r, transpose_m1, transpose_m2; THCTensor *r__, *m1_, *m2_; if( (m1->dim() != 2) || (m2->dim() != 2) ) THError("2D tensors expected, got %dD, %dD tensors", m1->dim(), m2->dim()); if(t->dim() != 2) THError("2D tensor expected, got %dD tensor for t", t->dim()); if(m1->size(1) != m2->size(0)) { THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( (t->size(0) != m1->size(0)) || (t->size(1) != m2->size(1)) ) { THCDescBuff bt = THCTensor_(sizeDesc)(state, t); THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THCTensor_(resizeAs)(state, r_, t); if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) { THCTensor_(copy)(state, r_, t); } } if((r_->size(0) == 0) || (r_->size(1) == 0)) { return; } /* r_ */ if(r_->stride(0) == 1 && r_->stride(1) != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride(1) == 1 && r_->stride(0) != 0) { THCTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* m1 */ if(m1->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m1->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m1->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THCTensor_(newContiguous)(state, m1); } /* m2 */ if(m2->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m2->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m2->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THCTensor_(newContiguous)(state, m2); } #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_BFLOAT16) #if defined(__HIP_PLATFORM_HCC__) THCudaBlas_Bgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #else TORCH_CHECK(false, "Bgemm not supported on at::BFloat16 type"); #endif // __HIP_PLATFORM_HCC__ #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #endif /* free intermediate variables */ if(m1_ != m1) { THCTensor_(free)(state, m1_); } if(m2_ != m2) { THCTensor_(free)(state, m2_); } if(r__ != r_) { THCTensor_(freeCopyTo)(state, r__, r_); } #else ERROR_ONLY_FP_TYPES("addmm"); #endif } void THCTensor_(addmm)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *m1, THCTensor *m2, scalar_t beta, scalar_t alpha) { { at::NoNamesGuard guard; THCTensor_(addmmImpl)(state, r_, t, m1, m2, beta, alpha); } at::namedinference::propagate_names_for_addmm(r_, m1, m2, t); } void THCTensor_(addbmm)(THCState *state, THCTensor *result, THCTensor *t, THCTensor *batch1, THCTensor *batch2, scalar_t beta, scalar_t alpha) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 2, 4, "expected 2D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor"); int64_t batchnum = THCTensor_(size)(state, batch1, 0); int64_t m1d1 = THCTensor_(size)(state, batch1, 1); int64_t innerdim = THCTensor_(size)(state, batch1, 2); int64_t m2d2 = THCTensor_(size)(state, batch2, 2); THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); // M is t, as listed in the docs under addbmm THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6, "first dimension must match first dimension of M"); THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7, "second dimension must match second dimension of M"); THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6, "second dimension must match first dimension of batch2"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } THCTensor *slice1 = THCTensor_(new)(state); THCTensor *slice2 = THCTensor_(new)(state); for (int64_t i=0; i<batchnum; i++) { THCTensor_(select)(state, slice1, batch1, 0, i); THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, result, slice1, slice2, beta, alpha); beta = ScalarConvert<int, scalar_t>::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); #else ERROR_ONLY_FP_TYPES("addbmm"); #endif } __global__ void createBatchGemmBuffer(const scalar_t** buffer, scalar_t* data, int64_t stride, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer[idx] = data + idx * stride; } } __global__ void createBatchGemmBuffer3(const scalar_t** buffer1, const scalar_t ** buffer2, const scalar_t ** buffer3, scalar_t* data1, scalar_t * data2, scalar_t * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer1[idx] = data1 + idx * stride1; buffer2[idx] = data2 + idx * stride2; buffer3[idx] = data3 + idx * stride3; } } void THCTensor_(baddbmm)(THCState *state, THCTensor *result, THCTensor *t, THCTensor *batch1, THCTensor *batch2, scalar_t beta, scalar_t alpha) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 3, 4, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); auto maybe_outnames = at::namedinference::compute_baddbmm_outnames(result, batch1, batch2, t); { at::NoNamesGuard guard; THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6, "wrong matrix size"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } bool transpose_result; char transpose_batch1, transpose_batch2; int64_t lda, ldb, ldc; THCTensor *result_, *batch1_, *batch2_; if (result->stride(1) == 1) { transpose_result = false; result_ = result; ldc = result_->stride(2); } else if (result->stride(2) == 1) { transpose_result = true; THCTensor *swap = batch2; batch2 = batch1; batch1 = swap; result_ = result; ldc = result_->stride(1); } else { transpose_result = false; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2); result_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, result_, NULL, 1, 2); ldc = result_->stride(2); } if (batch1->stride(transpose_result ? 2 : 1) == 1 && batch1->stride(transpose_result ? 1 : 2) != 0) { transpose_batch1 = 'n'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 1 : 2); } else if (batch1->stride(transpose_result ? 1 : 2) == 1 && batch1->stride(transpose_result ? 2 : 1) != 0) { transpose_batch1 = 't'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 2 : 1); } else { transpose_batch1 = transpose_result ? 'n' : 't'; // batch1_ is later freed if batch1_ != batch1 if (THCTensor_(isContiguous)(state, batch1)) { batch1_ = batch1; } else { batch1_ = THCTensor_(newContiguous)(state, batch1); } lda = batch1_->stride(1); } if (batch2->stride(transpose_result ? 2 : 1) == 1 && batch2->stride(transpose_result ? 1 : 2) != 0) { transpose_batch2 = 'n'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 1 : 2); } else if (batch2->stride(transpose_result ? 1 : 2) == 1 && batch2->stride(transpose_result ? 2 : 1) != 0) { transpose_batch2 = 't'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 2 : 1); } else { transpose_batch2 = transpose_result ? 'n' : 't'; // batch2_ is later freed if batch2_ != batch2 if (THCTensor_(isContiguous)(state, batch2)) { batch2_ = batch2; } else { batch2_ = THCTensor_(newContiguous)(state, batch2); } ldb = batch2_->stride(1); } int64_t num_batches = result_->size(0); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) // Compute pointers to matrices in each batch. #if TORCH_HIP_VERSION < 8000 && !defined __HIP_PLATFORM_HCC__ size_t matrices_size = num_batches * sizeof(scalar_t*); // Copy pointers to device. auto d_matrices1 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size)); auto d_matrices2 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size)); auto d_result_matrices = static_cast<scalar_t**>(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer3), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), d_matrices1, d_matrices2, (const scalar_t**)d_result_matrices, THCTensor_(data)(state, batch1_), THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_), batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches); #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #endif //THC_REAL THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCudaFree(state, d_result_matrices); #else #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #endif //THC_REAL #endif //TORCH_HIP_VERSION #elif defined(THC_REAL_IS_HALF) #if TORCH_HIP_VERSION < 9010 // Currently no HgemmBatched in Cublas for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } #else #ifndef __HIP_PLATFORM_HCC__ hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties(); if (prop->major >= 5){ #endif THCudaBlas_HgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #ifndef __HIP_PLATFORM_HCC__ } else { for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } } #endif #endif //TORCH_HIP_VERSION #elif defined(THC_REAL_IS_BFLOAT16) #if defined(__HIP_PLATFORM_HCC__) THCudaBlas_BgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #else TORCH_CHECK(false, "BgemmStridedBatched is not supported with at::BFloat16 type"); #endif // __HIP_PLATFORM_HCC__ #endif if (batch1_ != batch1) { THCTensor_(free)(state, batch1_); } if (batch2_ != batch2) { THCTensor_(free)(state, batch2_); } if (result_ != result) { THCTensor_(freeCopyTo)(state, result_, result); } } at::namedinference::propagate_names_if_nonempty(result, maybe_outnames); #else ERROR_ONLY_FP_TYPES("baddbmm"); #endif } #endif
a52238518b0c0c0a410bf4e671f94c4420eb693e.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorMathBlas.cu" #else #include "ATen/cuda/CUDAContext.h" #include <ATen/NamedTensorUtils.h> #define ERROR_ONLY_FP_TYPES(func) \ THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func); accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) { at::NoNamesGuard guard; if ( (THTensor_nDimension(self) != 1) || (THTensor_nDimension(src) != 1) ) { THError("1D tensors expected, got %dD, %dD tensors", THTensor_nDimension(self), THTensor_nDimension(src)); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THArgCheck(THCTensor_(nElement)(state, self) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); self = THCTensor_(newContiguous)(state, self); src = THCTensor_(newContiguous)(state, src); #ifdef THC_REAL_IS_FLOAT accreal result = THCudaBlas_Sdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_DOUBLE) accreal result = THCudaBlas_Ddot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_HALF) accreal result = THCudaBlas_Hdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #endif THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; #else ERROR_ONLY_FP_TYPES("dot"); return ScalarConvert<int, accreal>::to(0); #endif } static void THCTensor_(addmvImpl)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *mat, THCTensor *vec, scalar_t beta, scalar_t alpha) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); if( (mat->dim() != 2) || (THTensor_nDimension(vec) != 1) ) THError("2D tensor and 1D tensor expected, got %dD, %dD tensors", mat->dim(), THTensor_nDimension(vec)); auto vec_size = THTensor_sizeLegacyNoScalars(vec, 0); auto vec_stride = THTensor_strideLegacyNoScalars(vec, 0); if( mat->size(1) != THTensor_sizeLegacyNoScalars(vec, 0) ) THError("size mismatch"); if(t->dim() != 1) THError("size mismatch"); if(THTensor_sizeLegacyNoScalars(t, 0) != mat->size(0)) THError("size mismatch"); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if(r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } auto r_stride = THTensor_strideLegacyNoScalars(r_, 0); if(mat->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif } else if(mat->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif } else { THCTensor *cmat = THCTensor_(newContiguous)(state, mat); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif THCTensor_(free)(state, cmat); } // In cublasSgemv, cublasDgemv (x,0).mv(0) does not // handle beta, whereas cublasSgemm, cublasDgemm do for case where (x,0).mm(0,y). if (THTensor_sizeLegacyNoScalars(vec, 0) == 0 && mat->size(0) != 0) { if(THCNumerics<scalar_t>::eq(beta, ScalarConvert<int, scalar_t>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<scalar_t>::ne(beta, ScalarConvert<int, scalar_t>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } } #elif defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16) // Currently no Hgemv/SgemvEx in Cublas THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec); THCTensor_(resize2d)(state, vecAsMatrix, vec_size, 1); THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t); THCTensor_(resize2d)(state, tAsMatrix, THTensor_sizeLegacyNoScalars(tAsMatrix, 0), 1); THCTensor_(addmm)(state, r_, tAsMatrix, mat, vecAsMatrix, beta, alpha); // r_ will have answer as matrix, need to return a vector THCTensor_(resize1d)(state, r_, THTensor_sizeLegacyNoScalars(r_, 0)); THCTensor_(free)(state, vecAsMatrix); THCTensor_(free)(state, tAsMatrix); #endif #else ERROR_ONLY_FP_TYPES("addmv"); #endif } void THCTensor_(addmv)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *mat, THCTensor *vec, scalar_t beta, scalar_t alpha) { { at::NoNamesGuard guard; THCTensor_(addmvImpl)(state, r_, t, mat, vec, beta, alpha); } at::namedinference::propagate_names_for_addmv(r_, mat, vec, t); } void THCTensor_(addr)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *vec1, THCTensor *vec2, scalar_t beta, scalar_t alpha) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); if ( (THTensor_nDimension(vec1) != 1) || (THTensor_nDimension(vec2) != 1) ) { THError("1D tensors expected, got %dD, %dD tensors", THTensor_nDimension(vec1), THTensor_nDimension(vec2)); } auto vec1_size = THTensor_sizeLegacyNoScalars(vec1, 0); auto vec2_size = THTensor_sizeLegacyNoScalars(vec2, 0); auto vec1_stride = THTensor_strideLegacyNoScalars(vec1, 0); auto vec2_stride = THTensor_strideLegacyNoScalars(vec2, 0); if (t->dim() != 2) { THError("size mismatch"); } if ( (t->size(0) != vec1_size) || (t->size(1) != vec2_size) ) { THError("size mismatch"); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if (r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(THCNumerics<scalar_t>::eq(beta, ScalarConvert<int, scalar_t>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<scalar_t>::ne(beta, ScalarConvert<int, scalar_t>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } if(r_->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec1_size, vec2_size, alpha, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, r_), r_->stride(1)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec1_size, vec2_size, alpha, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, r_), r_->stride(1)); #endif } else if(r_->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, r_), r_->stride(0)); #endif } else { THCTensor *cr = THCTensor_(newClone)(state, r_); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, cr), cr->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, cr), cr->stride(0)); #endif THCTensor_(freeCopyTo)(state, cr, r_); } #elif defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16) // currently no Hger/SgerEx in Cublas. THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2); THCTensor_(resize2d)(state, vec2T, vec2_size, 1); THCTensor_(transpose)(state, vec2T, NULL, 0, 1); THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1); THCTensor_(resize2d)(state, vec1M, vec1_size, 1); THCTensor_(addmm)(state, r_, t, vec1M, vec2T, beta, alpha); THCTensor_(free)(state, vec2T); THCTensor_(free)(state, vec1M); #endif #else ERROR_ONLY_FP_TYPES("addr"); #endif } static void THCTensor_(addmmImpl)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *m1, THCTensor *m2, scalar_t beta, scalar_t alpha) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2)); char transpose_r, transpose_m1, transpose_m2; THCTensor *r__, *m1_, *m2_; if( (m1->dim() != 2) || (m2->dim() != 2) ) THError("2D tensors expected, got %dD, %dD tensors", m1->dim(), m2->dim()); if(t->dim() != 2) THError("2D tensor expected, got %dD tensor for t", t->dim()); if(m1->size(1) != m2->size(0)) { THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( (t->size(0) != m1->size(0)) || (t->size(1) != m2->size(1)) ) { THCDescBuff bt = THCTensor_(sizeDesc)(state, t); THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THCTensor_(resizeAs)(state, r_, t); if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) { THCTensor_(copy)(state, r_, t); } } if((r_->size(0) == 0) || (r_->size(1) == 0)) { return; } /* r_ */ if(r_->stride(0) == 1 && r_->stride(1) != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride(1) == 1 && r_->stride(0) != 0) { THCTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* m1 */ if(m1->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m1->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m1->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THCTensor_(newContiguous)(state, m1); } /* m2 */ if(m2->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m2->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m2->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THCTensor_(newContiguous)(state, m2); } #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_BFLOAT16) #if defined(__HIP_PLATFORM_HCC__) THCudaBlas_Bgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #else TORCH_CHECK(false, "Bgemm not supported on at::BFloat16 type"); #endif // __HIP_PLATFORM_HCC__ #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #endif /* free intermediate variables */ if(m1_ != m1) { THCTensor_(free)(state, m1_); } if(m2_ != m2) { THCTensor_(free)(state, m2_); } if(r__ != r_) { THCTensor_(freeCopyTo)(state, r__, r_); } #else ERROR_ONLY_FP_TYPES("addmm"); #endif } void THCTensor_(addmm)(THCState *state, THCTensor *r_, THCTensor *t, THCTensor *m1, THCTensor *m2, scalar_t beta, scalar_t alpha) { { at::NoNamesGuard guard; THCTensor_(addmmImpl)(state, r_, t, m1, m2, beta, alpha); } at::namedinference::propagate_names_for_addmm(r_, m1, m2, t); } void THCTensor_(addbmm)(THCState *state, THCTensor *result, THCTensor *t, THCTensor *batch1, THCTensor *batch2, scalar_t beta, scalar_t alpha) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 2, 4, "expected 2D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor"); int64_t batchnum = THCTensor_(size)(state, batch1, 0); int64_t m1d1 = THCTensor_(size)(state, batch1, 1); int64_t innerdim = THCTensor_(size)(state, batch1, 2); int64_t m2d2 = THCTensor_(size)(state, batch2, 2); THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); // M is t, as listed in the docs under addbmm THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6, "first dimension must match first dimension of M"); THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7, "second dimension must match second dimension of M"); THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6, "second dimension must match first dimension of batch2"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } THCTensor *slice1 = THCTensor_(new)(state); THCTensor *slice2 = THCTensor_(new)(state); for (int64_t i=0; i<batchnum; i++) { THCTensor_(select)(state, slice1, batch1, 0, i); THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, result, slice1, slice2, beta, alpha); beta = ScalarConvert<int, scalar_t>::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); #else ERROR_ONLY_FP_TYPES("addbmm"); #endif } __global__ void createBatchGemmBuffer(const scalar_t** buffer, scalar_t* data, int64_t stride, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer[idx] = data + idx * stride; } } __global__ void createBatchGemmBuffer3(const scalar_t** buffer1, const scalar_t ** buffer2, const scalar_t ** buffer3, scalar_t* data1, scalar_t * data2, scalar_t * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer1[idx] = data1 + idx * stride1; buffer2[idx] = data2 + idx * stride2; buffer3[idx] = data3 + idx * stride3; } } void THCTensor_(baddbmm)(THCState *state, THCTensor *result, THCTensor *t, THCTensor *batch1, THCTensor *batch2, scalar_t beta, scalar_t alpha) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_BFLOAT16) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 3, 4, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); auto maybe_outnames = at::namedinference::compute_baddbmm_outnames(result, batch1, batch2, t); { at::NoNamesGuard guard; THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6, "wrong matrix size"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<scalar_t, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } bool transpose_result; char transpose_batch1, transpose_batch2; int64_t lda, ldb, ldc; THCTensor *result_, *batch1_, *batch2_; if (result->stride(1) == 1) { transpose_result = false; result_ = result; ldc = result_->stride(2); } else if (result->stride(2) == 1) { transpose_result = true; THCTensor *swap = batch2; batch2 = batch1; batch1 = swap; result_ = result; ldc = result_->stride(1); } else { transpose_result = false; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2); result_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, result_, NULL, 1, 2); ldc = result_->stride(2); } if (batch1->stride(transpose_result ? 2 : 1) == 1 && batch1->stride(transpose_result ? 1 : 2) != 0) { transpose_batch1 = 'n'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 1 : 2); } else if (batch1->stride(transpose_result ? 1 : 2) == 1 && batch1->stride(transpose_result ? 2 : 1) != 0) { transpose_batch1 = 't'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 2 : 1); } else { transpose_batch1 = transpose_result ? 'n' : 't'; // batch1_ is later freed if batch1_ != batch1 if (THCTensor_(isContiguous)(state, batch1)) { batch1_ = batch1; } else { batch1_ = THCTensor_(newContiguous)(state, batch1); } lda = batch1_->stride(1); } if (batch2->stride(transpose_result ? 2 : 1) == 1 && batch2->stride(transpose_result ? 1 : 2) != 0) { transpose_batch2 = 'n'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 1 : 2); } else if (batch2->stride(transpose_result ? 1 : 2) == 1 && batch2->stride(transpose_result ? 2 : 1) != 0) { transpose_batch2 = 't'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 2 : 1); } else { transpose_batch2 = transpose_result ? 'n' : 't'; // batch2_ is later freed if batch2_ != batch2 if (THCTensor_(isContiguous)(state, batch2)) { batch2_ = batch2; } else { batch2_ = THCTensor_(newContiguous)(state, batch2); } ldb = batch2_->stride(1); } int64_t num_batches = result_->size(0); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) // Compute pointers to matrices in each batch. #if CUDA_VERSION < 8000 && !defined __HIP_PLATFORM_HCC__ size_t matrices_size = num_batches * sizeof(scalar_t*); // Copy pointers to device. auto d_matrices1 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size)); auto d_matrices2 = static_cast<const scalar_t**>(THCudaMalloc(state, matrices_size)); auto d_result_matrices = static_cast<scalar_t**>(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer3<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( d_matrices1, d_matrices2, (const scalar_t**)d_result_matrices, THCTensor_(data)(state, batch1_), THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_), batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches); #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #endif //THC_REAL THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCudaFree(state, d_result_matrices); #else #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #endif //THC_REAL #endif //CUDA_VERSION #elif defined(THC_REAL_IS_HALF) #if CUDA_VERSION < 9010 // Currently no HgemmBatched in Cublas for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } #else #ifndef __HIP_PLATFORM_HCC__ cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties(); if (prop->major >= 5){ #endif THCudaBlas_HgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #ifndef __HIP_PLATFORM_HCC__ } else { for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } } #endif #endif //CUDA_VERSION #elif defined(THC_REAL_IS_BFLOAT16) #if defined(__HIP_PLATFORM_HCC__) THCudaBlas_BgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #else TORCH_CHECK(false, "BgemmStridedBatched is not supported with at::BFloat16 type"); #endif // __HIP_PLATFORM_HCC__ #endif if (batch1_ != batch1) { THCTensor_(free)(state, batch1_); } if (batch2_ != batch2) { THCTensor_(free)(state, batch2_); } if (result_ != result) { THCTensor_(freeCopyTo)(state, result_, result); } } at::namedinference::propagate_names_if_nonempty(result, maybe_outnames); #else ERROR_ONLY_FP_TYPES("baddbmm"); #endif } #endif
4dd1dd7fd21e7f29aed2a3e9d22bd6be96097938.hip
// !!! This is a file automatically generated by hipify!!! // // auto-generated by op2.py // //global constants #ifndef MAX_CONST_SIZE #define MAX_CONST_SIZE 128 #endif __constant__ double gam_cuda; __constant__ double gm1_cuda; __constant__ double gm1i_cuda; __constant__ double m2_cuda; __constant__ double wtg1_cuda[2]; __constant__ double xi1_cuda[2]; __constant__ double Ng1_cuda[4]; __constant__ double Ng1_xi_cuda[4]; __constant__ double wtg2_cuda[4]; __constant__ double Ng2_cuda[16]; __constant__ double Ng2_xi_cuda[32]; __constant__ double minf_cuda; __constant__ double freq_cuda; __constant__ double kappa_cuda; __constant__ double nmode_cuda; __constant__ double mfan_cuda; //header #include "op_lib_cpp.h" #include "op_cuda_rt_support.h" #include "op_cuda_reduction.h" void op_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ if (!OP_hybrid_gpu) return; if (!strcmp(name,"gam")) { cutilSafeCall(hipMemcpyToSymbol(gam_cuda, dat, dim*size)); } else if (!strcmp(name,"gm1")) { cutilSafeCall(hipMemcpyToSymbol(gm1_cuda, dat, dim*size)); } else if (!strcmp(name,"gm1i")) { cutilSafeCall(hipMemcpyToSymbol(gm1i_cuda, dat, dim*size)); } else if (!strcmp(name,"m2")) { cutilSafeCall(hipMemcpyToSymbol(m2_cuda, dat, dim*size)); } else if (!strcmp(name,"wtg1")) { cutilSafeCall(hipMemcpyToSymbol(wtg1_cuda, dat, dim*size)); } else if (!strcmp(name,"xi1")) { cutilSafeCall(hipMemcpyToSymbol(xi1_cuda, dat, dim*size)); } else if (!strcmp(name,"Ng1")) { cutilSafeCall(hipMemcpyToSymbol(Ng1_cuda, dat, dim*size)); } else if (!strcmp(name,"Ng1_xi")) { cutilSafeCall(hipMemcpyToSymbol(Ng1_xi_cuda, dat, dim*size)); } else if (!strcmp(name,"wtg2")) { cutilSafeCall(hipMemcpyToSymbol(wtg2_cuda, dat, dim*size)); } else if (!strcmp(name,"Ng2")) { cutilSafeCall(hipMemcpyToSymbol(Ng2_cuda, dat, dim*size)); } else if (!strcmp(name,"Ng2_xi")) { cutilSafeCall(hipMemcpyToSymbol(Ng2_xi_cuda, dat, dim*size)); } else if (!strcmp(name,"minf")) { cutilSafeCall(hipMemcpyToSymbol(minf_cuda, dat, dim*size)); } else if (!strcmp(name,"freq")) { cutilSafeCall(hipMemcpyToSymbol(freq_cuda, dat, dim*size)); } else if (!strcmp(name,"kappa")) { cutilSafeCall(hipMemcpyToSymbol(kappa_cuda, dat, dim*size)); } else if (!strcmp(name,"nmode")) { cutilSafeCall(hipMemcpyToSymbol(nmode_cuda, dat, dim*size)); } else if (!strcmp(name,"mfan")) { cutilSafeCall(hipMemcpyToSymbol(mfan_cuda, dat, dim*size)); } else { printf("error: unknown const name\n"); exit(1); } } //user kernel files #include "res_calc_kernel.cu" #include "dirichlet_kernel.cu" #include "init_cg_kernel.cu" #include "spMV_kernel.cu" #include "dotPV_kernel.cu" #include "updateUR_kernel.cu" #include "dotR_kernel.cu" #include "updateP_kernel.cu" #include "update_kernel.hip"
4dd1dd7fd21e7f29aed2a3e9d22bd6be96097938.cu
// // auto-generated by op2.py // //global constants #ifndef MAX_CONST_SIZE #define MAX_CONST_SIZE 128 #endif __constant__ double gam_cuda; __constant__ double gm1_cuda; __constant__ double gm1i_cuda; __constant__ double m2_cuda; __constant__ double wtg1_cuda[2]; __constant__ double xi1_cuda[2]; __constant__ double Ng1_cuda[4]; __constant__ double Ng1_xi_cuda[4]; __constant__ double wtg2_cuda[4]; __constant__ double Ng2_cuda[16]; __constant__ double Ng2_xi_cuda[32]; __constant__ double minf_cuda; __constant__ double freq_cuda; __constant__ double kappa_cuda; __constant__ double nmode_cuda; __constant__ double mfan_cuda; //header #include "op_lib_cpp.h" #include "op_cuda_rt_support.h" #include "op_cuda_reduction.h" void op_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ if (!OP_hybrid_gpu) return; if (!strcmp(name,"gam")) { cutilSafeCall(cudaMemcpyToSymbol(gam_cuda, dat, dim*size)); } else if (!strcmp(name,"gm1")) { cutilSafeCall(cudaMemcpyToSymbol(gm1_cuda, dat, dim*size)); } else if (!strcmp(name,"gm1i")) { cutilSafeCall(cudaMemcpyToSymbol(gm1i_cuda, dat, dim*size)); } else if (!strcmp(name,"m2")) { cutilSafeCall(cudaMemcpyToSymbol(m2_cuda, dat, dim*size)); } else if (!strcmp(name,"wtg1")) { cutilSafeCall(cudaMemcpyToSymbol(wtg1_cuda, dat, dim*size)); } else if (!strcmp(name,"xi1")) { cutilSafeCall(cudaMemcpyToSymbol(xi1_cuda, dat, dim*size)); } else if (!strcmp(name,"Ng1")) { cutilSafeCall(cudaMemcpyToSymbol(Ng1_cuda, dat, dim*size)); } else if (!strcmp(name,"Ng1_xi")) { cutilSafeCall(cudaMemcpyToSymbol(Ng1_xi_cuda, dat, dim*size)); } else if (!strcmp(name,"wtg2")) { cutilSafeCall(cudaMemcpyToSymbol(wtg2_cuda, dat, dim*size)); } else if (!strcmp(name,"Ng2")) { cutilSafeCall(cudaMemcpyToSymbol(Ng2_cuda, dat, dim*size)); } else if (!strcmp(name,"Ng2_xi")) { cutilSafeCall(cudaMemcpyToSymbol(Ng2_xi_cuda, dat, dim*size)); } else if (!strcmp(name,"minf")) { cutilSafeCall(cudaMemcpyToSymbol(minf_cuda, dat, dim*size)); } else if (!strcmp(name,"freq")) { cutilSafeCall(cudaMemcpyToSymbol(freq_cuda, dat, dim*size)); } else if (!strcmp(name,"kappa")) { cutilSafeCall(cudaMemcpyToSymbol(kappa_cuda, dat, dim*size)); } else if (!strcmp(name,"nmode")) { cutilSafeCall(cudaMemcpyToSymbol(nmode_cuda, dat, dim*size)); } else if (!strcmp(name,"mfan")) { cutilSafeCall(cudaMemcpyToSymbol(mfan_cuda, dat, dim*size)); } else { printf("error: unknown const name\n"); exit(1); } } //user kernel files #include "res_calc_kernel.cu" #include "dirichlet_kernel.cu" #include "init_cg_kernel.cu" #include "spMV_kernel.cu" #include "dotPV_kernel.cu" #include "updateUR_kernel.cu" #include "dotR_kernel.cu" #include "updateP_kernel.cu" #include "update_kernel.cu"
88e5a110d4fefc957054c324e0c484a32de2c6e7.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "array.hpp" #include "types.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "kernel_dispatcher.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/tensor.hpp" #include "../cuda4dnn/csl/span.hpp" #include "../cuda4dnn/kernels/fill_copy.hpp" #include <opencv2/core.hpp> #include <cstddef> #include <vector> using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, std::size_t Rank> __global__ void permute( array<index_type, Rank> axis_order, Span<T> output, array<size_type, Rank> outStrides, View<T> input, array<size_type, Rank> inStrides) { for (auto i : grid_stride_range(input.size())) { index_type oldPosition = 0; index_type newPosition = i; for (int j = 0; j < Rank; j++) { auto order = axis_order[j]; oldPosition += (newPosition / outStrides[j]) * inStrides[order]; newPosition %= outStrides[j]; } output[i] = input[oldPosition]; } } template <class T, int TILE_SIZE, int ROWS_PER_THREAD> __global__ void transpose(Span<T> output, View<T> input, size_type in_width, size_type out_width) { __shared__ T tile[TILE_SIZE][TILE_SIZE + 1]; /* blockDim.y = TILE_SIZE / ROWS_PER_THREAD, blockDim.x = TILE_SIZE */ const index_type in_x = blockIdx.x * TILE_SIZE + threadIdx.x; const index_type in_y_begin = blockIdx.y * TILE_SIZE + threadIdx.y; /* Every valid input location has a corresponding output location and vice versa. * Hence, if we do not load values into the shared memory for a given location, we * also won't read them for storing in the output. */ for (int j = 0; j < TILE_SIZE; j += TILE_SIZE / ROWS_PER_THREAD) { const auto in_y_current = in_y_begin + j; if (in_x < in_width && in_y_current < out_width) tile[threadIdx.y + j][threadIdx.x] = input[in_y_current * in_width + in_x]; } __syncthreads(); /* We interchange `threadIdx.x` and `threadIdx.y` so that consecutive output indices map to * consecutive threads. This would allow writes across threds in a warp to be coalesced. */ const index_type out_x = blockIdx.y * TILE_SIZE + threadIdx.x; const index_type out_y_begin = blockIdx.x * TILE_SIZE + threadIdx.y; for (int j = 0; j < TILE_SIZE; j += TILE_SIZE / ROWS_PER_THREAD) { const auto out_y_current = out_y_begin + j; if (out_x < out_width && out_y_current < in_width) output[out_y_current * out_width + out_x] = tile[threadIdx.x][threadIdx.y + j]; } } } template <class T> void transpose(const Stream& stream, Span<T> output, View<T> input, std::size_t in_width, std::size_t out_width) { /* Each block processes a TILE_SIZE x TILE_SIZE piece */ constexpr int TILE_SIZE = 32; /* Each thread processes ROWS_PER_THREAD rows. We do this to decrease the number of threads required * in a block so that the cost of the block-wide synchronization is minimized. */ constexpr int ROWS_PER_THREAD = 4; dim3 grid_size((in_width + TILE_SIZE - 1) / TILE_SIZE, (out_width + TILE_SIZE - 1) / TILE_SIZE); dim3 block_size(TILE_SIZE, TILE_SIZE / ROWS_PER_THREAD); auto policy = execution_policy(grid_size, block_size, stream); auto kernel = raw::transpose<T, TILE_SIZE, ROWS_PER_THREAD>; launch_kernel(kernel, policy, output, input, in_width, out_width); } template void transpose(const Stream&, Span<__half>, View<__half>, std::size_t, std::size_t); template void transpose(const Stream&, Span<float>, View<float>, std::size_t, std::size_t); template <class T, std::size_t Rank> static void launch_permute_kernel( const Stream& stream, const std::vector<std::size_t>& order, Span<T> output, const std::vector<std::size_t>& outStride, View<T> input, const std::vector<std::size_t>& inStride) { CV_Assert(order.size() == Rank); CV_Assert(outStride.size() == Rank); CV_Assert(inStride.size() == Rank); array<index_type, Rank> order_k; order_k.assign(std::begin(order), std::end(order)); array<size_type, Rank> outStride_k, inStride_k; outStride_k.assign(std::begin(outStride), std::end(outStride)); inStride_k.assign(std::begin(inStride), std::end(inStride)); auto kernel = raw::permute<T, Rank>; auto policy = make_policy(kernel, input.size(), 0, stream); launch_kernel(kernel, policy, order_k, output, outStride_k, input, inStride_k); } GENERATE_KERNEL_DISPATCHER(permute_dispatcher, launch_permute_kernel); template <class T> void permute( const Stream& stream, TensorSpan<T> output, TensorView<T> input, std::vector<std::size_t> order) { CV_Assert(output.rank() == input.rank()); CV_Assert(input.rank() == order.size()); CV_Assert(input.size() == output.size()); auto rank = output.rank(); auto inShape = input.shape_as_vector(); auto outShape = output.shape_as_vector(); /* singleton axes do not contribute towards address calculation * * Reasoning: * ---------- * Suppose an item's indices in the input tensor is [i1, i2, ...]. The indices in the * output tensor will be some permutation of the input tensor indices. Let the output * tensor indices be [o1, o2, ...]. The permutation operation essentially copies items * from the input tensor to new locations in the output tensor as dictated by the indices. * * If the size of the nth axis (say i2) of the input is one the input and output indicies for * all the elements will be of the form be [i1, 0, ...] and [..., 0, ...] respectively. * The index does not contribute to the element's address calculation and hence would give * identical result if it weren't there. */ for (int i = 0; i < rank; i++) { /* index `i` corresponds to the axis index in the output; order[i] has the corresponding axis index in the input */ while (i < rank && outShape[i] == 1) { int in_i = order[i]; CV_Assert(inShape[in_i] == 1); /* delete axis `i` */ inShape.erase(std::begin(inShape) + in_i); outShape.erase(std::begin(outShape) + i); /* deletion of an axis reduces an axis in the input tensor which would cause the indices * of the axes that come after the deleted axis to reduce by one */ order.erase(order.begin() + i); for (auto& axis : order) if (axis > in_i) axis--; rank--; /* optimizations should not break the invariants */ CV_Assert(rank == order.size()); CV_Assert(inShape.size() == order.size()); CV_Assert(outShape.size() == order.size()); CV_Assert(input.size() == output.size()); } } /* contiguous axes whose relative ordering stays same before and after permutation can be merged into one axis * example: in permute order 0 2 3 1, axes 2 and 3 can be grouped into a single axis * * Reasoning: * ---------- * Suppose an item's indices in the input tensor is [i0, i1, i2, i3, ...]. Let the permutation order be [0, 3, 1, 2, ...]. * Note that i1 and i2 are adjacent axes in the same order in input as well as output. The indices in the output tensor * will be [i0, i3, i1, i2, ...]. * * Each axis in the contiguous axes sequence will add an offset of iN * strideN. In the above example, * the two axes add a total offset of `i1 * (size2 * stride2) + i2 * stride2` which is `(i1 * size2 + i2) * stride2`, * in both input and output. Note stride2 can be different in the input and output. We can merge the two axes into one axis * with a size of `size1 * size2`. The new offset added will be `i12 * stride12` as the kernel iterates through `i12`. Note * that `i12` is actually `(i1 * size2 + i2)` and `stride12` is `stride2`. */ for (int i = 0; i < rank; i++) { /* the indices used in the loops such as `i` and `j` are axis indices in the output tensor */ /* the corresponding input axis indices are `order[i]` and `order[j]`*/ /* loop invariant: `i` is the first axis in the contiguous unpermuted axis sequence */ int j = i + 1; /* `j` is the axis which we will attempt to merge */ while (j < rank && (order[i] + 1) == order[j]) { /* axis `i` and axis `j` do not change relative order */ auto in_i = order[i], in_j = order[j]; auto new_size = inShape[in_i] * inShape[in_j]; inShape[in_i] = new_size; outShape[i] = new_size; /* delete axis `j` */ inShape.erase(std::begin(inShape) + in_j); outShape.erase(std::begin(outShape) + j); /* deletion of an axis reduces an axis in the input tensor which would cause the indices * of the axes that come after the deleted axis to reduce by one */ order.erase(order.begin() + j); for (auto& axis : order) if (axis > order[i]) axis--; rank--; /* optimizations should not break the invariants */ CV_Assert(rank == order.size()); CV_Assert(inShape.size() == order.size()); CV_Assert(outShape.size() == order.size()); CV_Assert(input.size() == output.size()); } } std::vector<std::size_t> inStride(rank), outStride(rank); inStride.back() = 1; outStride.back() = 1; /* garbage, ..., garbage, 1 */ std::copy(std::begin(inShape) + 1, std::end(inShape), std::begin(inStride)); std::copy(std::begin(outShape) + 1, std::end(outShape), std::begin(outStride)); /* dim[0], dim[1], ..., dim[-1], 1 */ std::partial_sum(inStride.rbegin(), inStride.rend(), inStride.rbegin(), std::multiplies<std::size_t>()); std::partial_sum(outStride.rbegin(), outStride.rend(), outStride.rbegin(), std::multiplies<std::size_t>()); /* stride[0], stride[1], ..., stride[-2], 1 */ const bool is_in_order = [&order] { for (int i = 0; i < order.size(); i++) if (order[i] != i) return false; return true; }(); if (is_in_order) { kernels::copy<T>(stream, output, input); } else if(rank == 2) { /* use the more efficient transpose kernel */ transpose<T>(stream, output, input, inShape[1], outShape[1]); } else { CV_Assert(3 <= rank && rank <= CSL_MAX_TENSOR_RANK); permute_dispatcher<T, 3, CSL_MAX_TENSOR_RANK>(rank, stream, order, output, outStride, input, inStride); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void permute(const Stream&, TensorSpan<__half>, TensorView<__half>, std::vector<std::size_t>); #endif template void permute(const Stream&, TensorSpan<float>, TensorView<float>, std::vector<std::size_t>); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
88e5a110d4fefc957054c324e0c484a32de2c6e7.cu
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <cuda_runtime.h> #include <cuda_fp16.h> #include "array.hpp" #include "types.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "kernel_dispatcher.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/tensor.hpp" #include "../cuda4dnn/csl/span.hpp" #include "../cuda4dnn/kernels/fill_copy.hpp" #include <opencv2/core.hpp> #include <cstddef> #include <vector> using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, std::size_t Rank> __global__ void permute( array<index_type, Rank> axis_order, Span<T> output, array<size_type, Rank> outStrides, View<T> input, array<size_type, Rank> inStrides) { for (auto i : grid_stride_range(input.size())) { index_type oldPosition = 0; index_type newPosition = i; for (int j = 0; j < Rank; j++) { auto order = axis_order[j]; oldPosition += (newPosition / outStrides[j]) * inStrides[order]; newPosition %= outStrides[j]; } output[i] = input[oldPosition]; } } template <class T, int TILE_SIZE, int ROWS_PER_THREAD> __global__ void transpose(Span<T> output, View<T> input, size_type in_width, size_type out_width) { __shared__ T tile[TILE_SIZE][TILE_SIZE + 1]; /* blockDim.y = TILE_SIZE / ROWS_PER_THREAD, blockDim.x = TILE_SIZE */ const index_type in_x = blockIdx.x * TILE_SIZE + threadIdx.x; const index_type in_y_begin = blockIdx.y * TILE_SIZE + threadIdx.y; /* Every valid input location has a corresponding output location and vice versa. * Hence, if we do not load values into the shared memory for a given location, we * also won't read them for storing in the output. */ for (int j = 0; j < TILE_SIZE; j += TILE_SIZE / ROWS_PER_THREAD) { const auto in_y_current = in_y_begin + j; if (in_x < in_width && in_y_current < out_width) tile[threadIdx.y + j][threadIdx.x] = input[in_y_current * in_width + in_x]; } __syncthreads(); /* We interchange `threadIdx.x` and `threadIdx.y` so that consecutive output indices map to * consecutive threads. This would allow writes across threds in a warp to be coalesced. */ const index_type out_x = blockIdx.y * TILE_SIZE + threadIdx.x; const index_type out_y_begin = blockIdx.x * TILE_SIZE + threadIdx.y; for (int j = 0; j < TILE_SIZE; j += TILE_SIZE / ROWS_PER_THREAD) { const auto out_y_current = out_y_begin + j; if (out_x < out_width && out_y_current < in_width) output[out_y_current * out_width + out_x] = tile[threadIdx.x][threadIdx.y + j]; } } } template <class T> void transpose(const Stream& stream, Span<T> output, View<T> input, std::size_t in_width, std::size_t out_width) { /* Each block processes a TILE_SIZE x TILE_SIZE piece */ constexpr int TILE_SIZE = 32; /* Each thread processes ROWS_PER_THREAD rows. We do this to decrease the number of threads required * in a block so that the cost of the block-wide synchronization is minimized. */ constexpr int ROWS_PER_THREAD = 4; dim3 grid_size((in_width + TILE_SIZE - 1) / TILE_SIZE, (out_width + TILE_SIZE - 1) / TILE_SIZE); dim3 block_size(TILE_SIZE, TILE_SIZE / ROWS_PER_THREAD); auto policy = execution_policy(grid_size, block_size, stream); auto kernel = raw::transpose<T, TILE_SIZE, ROWS_PER_THREAD>; launch_kernel(kernel, policy, output, input, in_width, out_width); } template void transpose(const Stream&, Span<__half>, View<__half>, std::size_t, std::size_t); template void transpose(const Stream&, Span<float>, View<float>, std::size_t, std::size_t); template <class T, std::size_t Rank> static void launch_permute_kernel( const Stream& stream, const std::vector<std::size_t>& order, Span<T> output, const std::vector<std::size_t>& outStride, View<T> input, const std::vector<std::size_t>& inStride) { CV_Assert(order.size() == Rank); CV_Assert(outStride.size() == Rank); CV_Assert(inStride.size() == Rank); array<index_type, Rank> order_k; order_k.assign(std::begin(order), std::end(order)); array<size_type, Rank> outStride_k, inStride_k; outStride_k.assign(std::begin(outStride), std::end(outStride)); inStride_k.assign(std::begin(inStride), std::end(inStride)); auto kernel = raw::permute<T, Rank>; auto policy = make_policy(kernel, input.size(), 0, stream); launch_kernel(kernel, policy, order_k, output, outStride_k, input, inStride_k); } GENERATE_KERNEL_DISPATCHER(permute_dispatcher, launch_permute_kernel); template <class T> void permute( const Stream& stream, TensorSpan<T> output, TensorView<T> input, std::vector<std::size_t> order) { CV_Assert(output.rank() == input.rank()); CV_Assert(input.rank() == order.size()); CV_Assert(input.size() == output.size()); auto rank = output.rank(); auto inShape = input.shape_as_vector(); auto outShape = output.shape_as_vector(); /* singleton axes do not contribute towards address calculation * * Reasoning: * ---------- * Suppose an item's indices in the input tensor is [i1, i2, ...]. The indices in the * output tensor will be some permutation of the input tensor indices. Let the output * tensor indices be [o1, o2, ...]. The permutation operation essentially copies items * from the input tensor to new locations in the output tensor as dictated by the indices. * * If the size of the nth axis (say i2) of the input is one the input and output indicies for * all the elements will be of the form be [i1, 0, ...] and [..., 0, ...] respectively. * The index does not contribute to the element's address calculation and hence would give * identical result if it weren't there. */ for (int i = 0; i < rank; i++) { /* index `i` corresponds to the axis index in the output; order[i] has the corresponding axis index in the input */ while (i < rank && outShape[i] == 1) { int in_i = order[i]; CV_Assert(inShape[in_i] == 1); /* delete axis `i` */ inShape.erase(std::begin(inShape) + in_i); outShape.erase(std::begin(outShape) + i); /* deletion of an axis reduces an axis in the input tensor which would cause the indices * of the axes that come after the deleted axis to reduce by one */ order.erase(order.begin() + i); for (auto& axis : order) if (axis > in_i) axis--; rank--; /* optimizations should not break the invariants */ CV_Assert(rank == order.size()); CV_Assert(inShape.size() == order.size()); CV_Assert(outShape.size() == order.size()); CV_Assert(input.size() == output.size()); } } /* contiguous axes whose relative ordering stays same before and after permutation can be merged into one axis * example: in permute order 0 2 3 1, axes 2 and 3 can be grouped into a single axis * * Reasoning: * ---------- * Suppose an item's indices in the input tensor is [i0, i1, i2, i3, ...]. Let the permutation order be [0, 3, 1, 2, ...]. * Note that i1 and i2 are adjacent axes in the same order in input as well as output. The indices in the output tensor * will be [i0, i3, i1, i2, ...]. * * Each axis in the contiguous axes sequence will add an offset of iN * strideN. In the above example, * the two axes add a total offset of `i1 * (size2 * stride2) + i2 * stride2` which is `(i1 * size2 + i2) * stride2`, * in both input and output. Note stride2 can be different in the input and output. We can merge the two axes into one axis * with a size of `size1 * size2`. The new offset added will be `i12 * stride12` as the kernel iterates through `i12`. Note * that `i12` is actually `(i1 * size2 + i2)` and `stride12` is `stride2`. */ for (int i = 0; i < rank; i++) { /* the indices used in the loops such as `i` and `j` are axis indices in the output tensor */ /* the corresponding input axis indices are `order[i]` and `order[j]`*/ /* loop invariant: `i` is the first axis in the contiguous unpermuted axis sequence */ int j = i + 1; /* `j` is the axis which we will attempt to merge */ while (j < rank && (order[i] + 1) == order[j]) { /* axis `i` and axis `j` do not change relative order */ auto in_i = order[i], in_j = order[j]; auto new_size = inShape[in_i] * inShape[in_j]; inShape[in_i] = new_size; outShape[i] = new_size; /* delete axis `j` */ inShape.erase(std::begin(inShape) + in_j); outShape.erase(std::begin(outShape) + j); /* deletion of an axis reduces an axis in the input tensor which would cause the indices * of the axes that come after the deleted axis to reduce by one */ order.erase(order.begin() + j); for (auto& axis : order) if (axis > order[i]) axis--; rank--; /* optimizations should not break the invariants */ CV_Assert(rank == order.size()); CV_Assert(inShape.size() == order.size()); CV_Assert(outShape.size() == order.size()); CV_Assert(input.size() == output.size()); } } std::vector<std::size_t> inStride(rank), outStride(rank); inStride.back() = 1; outStride.back() = 1; /* garbage, ..., garbage, 1 */ std::copy(std::begin(inShape) + 1, std::end(inShape), std::begin(inStride)); std::copy(std::begin(outShape) + 1, std::end(outShape), std::begin(outStride)); /* dim[0], dim[1], ..., dim[-1], 1 */ std::partial_sum(inStride.rbegin(), inStride.rend(), inStride.rbegin(), std::multiplies<std::size_t>()); std::partial_sum(outStride.rbegin(), outStride.rend(), outStride.rbegin(), std::multiplies<std::size_t>()); /* stride[0], stride[1], ..., stride[-2], 1 */ const bool is_in_order = [&order] { for (int i = 0; i < order.size(); i++) if (order[i] != i) return false; return true; }(); if (is_in_order) { kernels::copy<T>(stream, output, input); } else if(rank == 2) { /* use the more efficient transpose kernel */ transpose<T>(stream, output, input, inShape[1], outShape[1]); } else { CV_Assert(3 <= rank && rank <= CSL_MAX_TENSOR_RANK); permute_dispatcher<T, 3, CSL_MAX_TENSOR_RANK>(rank, stream, order, output, outStride, input, inStride); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void permute(const Stream&, TensorSpan<__half>, TensorView<__half>, std::vector<std::size_t>); #endif template void permute(const Stream&, TensorSpan<float>, TensorView<float>, std::vector<std::size_t>); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
aafc11177e300badb577fa4a4859855df3440d5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // CrossCorrelation.cu // CrossCorrelation // // Created by Vivek Sridhar on 29/06/17. // Copyright 2017 Vivek Sridhar. All rights reserved. // #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <dirent.h> template <typename T> std::string to_string(const T& value) { std::stringstream ss; ss << value; return ss.str(); } long factorial(long val) { std::cout << val << "\n"; long result = 1; for (long i = 1; i <= val; ++i) { result *= i; } return result; } long combination(long n, long r) { return (factorial(n)) / ((factorial(n - r)) * factorial(r)); } __global__ void kernel(float *x1, float *y1, float *x2, float *y2, float *res, int tau, int na_frames, long nElements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (tau < 0) { if (index >= -tau+na_frames) { res[index] = x1[index] * x2[index + tau] + y1[index] * y2[index + tau]; } else res[index] = 0.0; } else { if (index < nElements - tau) { res[index] = x1[index] * x2[index + tau] + y1[index] * y2[index + tau]; } else res[index] = 0.0; } } // total measurement points in the time series is defined by nElements #define M 1024 // number of threads per block #define fps 10 // frames per second of input video (used to determine tau) #define time 5 // time in seconds within which time delayed cross correlation is calculated (tau ranges from -time*fps to time*fps) #define n_inds 10 int na_frames = 0; // number of frames in the start with nas int scale = 1; // time window for analysis in seconds; varying this allows us to examine dynamics of leadership across varying timescales; setting scale larger than the entire time series or -1 gives aggregated statistics across the entire duration (otherwise, timescale of analysis is scale*fps) //const int pairs = combination(n_inds, 2); const bool aggregate = false; // this boolean decides whether you output a dynamic time variable leadership network or a static time aggregated network; scale is set to -1 if aggregate is true std::ofstream outputFile1; int main () { DIR *dir; FILE *pFile_x1; FILE *pFile_y1; FILE *pFile_x2; FILE *pFile_y2; long lSize; long nElements; struct dirent *file; float *d_x1, *d_y1, *d_x2, *d_y2, *d_res; float *x1, *y1, *x2, *y2, *res; size_t result_x1, result_y1, result_x2, result_y2; if (aggregate) scale = -1; std::vector<std::string> files; std::string directory = "/home/user/Documents/Vivek/cuda/DirectionalCorrelation/Data/Input/pigeons/10_birds/ffA3/cross_correlation/"; dir = opendir(directory.c_str()); int idx = 0; while ((file = readdir(dir)) != NULL) { if (file->d_name[0] == 'd') { files.push_back(file->d_name); ++idx; } } std::sort(files.begin(), files.begin()+2*n_inds); closedir(dir); // Open output file std::string filename_cc; if (scale != -1) filename_cc = "cross_correlation_01.csv"; else filename_cc = "avgd_cross_correlation.csv"; outputFile1.open(filename_cc.c_str()); // Output file headers if (aggregate || scale == -1) outputFile1 << "id1"<< ", " << "id2" << ", " << "tau" << ", " << "cc" << "\n"; else outputFile1 << "time" << ", " << "id1" << ", " << "id2" << ", " << "tau" << ", " << "cc" << "\n"; //files = {"dir_x00", "dir_x01", "dir_y00", "dir_y01"} for (int a = 0; a < n_inds; ++a) { for (int b = 0; b < n_inds; ++b) { if (b != a) { pFile_x1 = fopen ((directory + files[a]).c_str(), "rb"); pFile_y1 = fopen ((directory + files[a+n_inds]).c_str(), "rb"); pFile_x2 = fopen ((directory + files[b]).c_str(), "rb"); pFile_y2 = fopen ((directory + files[b+n_inds]).c_str(), "rb"); if (pFile_x1==NULL || pFile_y1==NULL || pFile_x2==NULL || pFile_y2==NULL) { fputs ("File error",stderr); exit (1); } // obtain file size fseek (pFile_x1 , 0 , SEEK_END); lSize = ftell (pFile_x1); rewind (pFile_x1); nElements = lSize / sizeof(float); // allocate memory to contain the whole file // device memory hipMalloc((void **) &d_x1, lSize); hipMalloc((void **) &d_y1, lSize); hipMalloc((void **) &d_x2, lSize); hipMalloc((void **) &d_y2, lSize); hipMalloc((void **) &d_res, lSize); // host memory x1 = (float*) malloc(lSize); y1 = (float*) malloc(lSize); x2 = (float*) malloc(lSize); y2 = (float*) malloc(lSize); res = (float*) malloc(lSize); if (x1 == NULL || y1==NULL || x2==NULL || y2==NULL || res==NULL) { fputs ("Memory error",stderr); exit (2); } // copy the file into the respective float pointers result_x1 = fread (x1, sizeof(float), nElements, pFile_x1); result_y1 = fread (y1, sizeof(float), nElements, pFile_y1); result_x2 = fread (x2, sizeof(float), nElements, pFile_x2); result_y2 = fread (y2, sizeof(float), nElements, pFile_y2); if (result_x1 != nElements || result_y1 != nElements || result_x2 != nElements || result_y2 != nElements) { fputs ("Reading error",stderr); exit (3); } // the whole files are now loaded in the memory x1, y1, x2 and y2 respectively hipMemcpy(d_x1, x1, lSize, hipMemcpyHostToDevice); hipMemcpy(d_y1, y1, lSize, hipMemcpyHostToDevice); hipMemcpy(d_x2, x2, lSize, hipMemcpyHostToDevice); hipMemcpy(d_y2, y2, lSize, hipMemcpyHostToDevice); if (scale*fps > nElements) scale = -1; int tau_max[nElements - scale*fps]; float res_tmp[nElements - scale*fps]; float res_max[nElements - scale*fps]; std::fill_n(tau_max, nElements - scale*fps, 0); std::fill_n(res_tmp, nElements - scale*fps, 0.0); std::fill_n(res_max, nElements - scale*fps, -1.0); for (int tau = -time*fps; tau <= time*fps; ++tau) { hipLaunchKernelGGL(( kernel), dim3((nElements + M - 1) / M), dim3(M), 0, 0, d_x1, d_y1, d_x2, d_y2, d_res, tau, na_frames, nElements); hipMemcpy(res, d_res, lSize, hipMemcpyDeviceToHost); if (scale == -1) { float res_now = -1.0f; for (int i = na_frames; i < nElements; ++i) { if (res[i] != res[i]) std::cout << x1[i] << " " << y1[i] << " " << i << " " << tau << "\n"; // if nans res_now += res[i]; } outputFile1 << (to_string(files[a][5])).c_str() << (to_string(files[a][6])).c_str() << (to_string(files[a][7])).c_str() << ", " << (to_string(files[b][5])).c_str() << (to_string(files[b][6])).c_str() << (to_string(files[b][7])).c_str() << ", " << tau << ", " << res_now / nElements << "\n"; } else { std::fill_n(res_tmp, nElements - scale*fps, 0.0); for (int i = na_frames; i < nElements - scale*fps; ++i) { for (int j = i; j < i + scale*fps; ++j) { res_tmp[i] += res[j]; if (j == i + scale*fps - 1 && res_max[i] < res_tmp[i]) { res_max[i] = res_tmp[i]; tau_max[i] = tau; } } } } } if (scale != -1) { for (int t = 0; t < nElements - scale*fps; ++t) { outputFile1 << t + scale*fps/2 << ", " << (to_string(files[a][5])).c_str() << (to_string(files[a][6])).c_str() << (to_string(files[a][7])).c_str() << ", " << (to_string(files[b][5])).c_str() << (to_string(files[b][6])).c_str() << (to_string(files[b][7])).c_str() << ", " << tau_max[t] << ", " << res_max[t] / (scale*fps) << "\n"; } } fclose(pFile_x1); fclose(pFile_x2); fclose(pFile_y1); fclose(pFile_y2); hipFree(d_x1); hipFree(d_y1); hipFree(d_x2); hipFree(d_y2); hipFree(d_res); free(x1); free(y1); free(x2); free(y2); } } } // terminate fclose(pFile_x1); fclose(pFile_y1); fclose(pFile_x2); fclose(pFile_y2); return 0; }
aafc11177e300badb577fa4a4859855df3440d5c.cu
// // CrossCorrelation.cu // CrossCorrelation // // Created by Vivek Sridhar on 29/06/17. // Copyright © 2017 Vivek Sridhar. All rights reserved. // #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <dirent.h> template <typename T> std::string to_string(const T& value) { std::stringstream ss; ss << value; return ss.str(); } long factorial(long val) { std::cout << val << "\n"; long result = 1; for (long i = 1; i <= val; ++i) { result *= i; } return result; } long combination(long n, long r) { return (factorial(n)) / ((factorial(n - r)) * factorial(r)); } __global__ void kernel(float *x1, float *y1, float *x2, float *y2, float *res, int tau, int na_frames, long nElements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (tau < 0) { if (index >= -tau+na_frames) { res[index] = x1[index] * x2[index + tau] + y1[index] * y2[index + tau]; } else res[index] = 0.0; } else { if (index < nElements - tau) { res[index] = x1[index] * x2[index + tau] + y1[index] * y2[index + tau]; } else res[index] = 0.0; } } // total measurement points in the time series is defined by nElements #define M 1024 // number of threads per block #define fps 10 // frames per second of input video (used to determine tau) #define time 5 // time in seconds within which time delayed cross correlation is calculated (tau ranges from -time*fps to time*fps) #define n_inds 10 int na_frames = 0; // number of frames in the start with nas int scale = 1; // time window for analysis in seconds; varying this allows us to examine dynamics of leadership across varying timescales; setting scale larger than the entire time series or -1 gives aggregated statistics across the entire duration (otherwise, timescale of analysis is scale*fps) //const int pairs = combination(n_inds, 2); const bool aggregate = false; // this boolean decides whether you output a dynamic time variable leadership network or a static time aggregated network; scale is set to -1 if aggregate is true std::ofstream outputFile1; int main () { DIR *dir; FILE *pFile_x1; FILE *pFile_y1; FILE *pFile_x2; FILE *pFile_y2; long lSize; long nElements; struct dirent *file; float *d_x1, *d_y1, *d_x2, *d_y2, *d_res; float *x1, *y1, *x2, *y2, *res; size_t result_x1, result_y1, result_x2, result_y2; if (aggregate) scale = -1; std::vector<std::string> files; std::string directory = "/home/user/Documents/Vivek/cuda/DirectionalCorrelation/Data/Input/pigeons/10_birds/ffA3/cross_correlation/"; dir = opendir(directory.c_str()); int idx = 0; while ((file = readdir(dir)) != NULL) { if (file->d_name[0] == 'd') { files.push_back(file->d_name); ++idx; } } std::sort(files.begin(), files.begin()+2*n_inds); closedir(dir); // Open output file std::string filename_cc; if (scale != -1) filename_cc = "cross_correlation_01.csv"; else filename_cc = "avgd_cross_correlation.csv"; outputFile1.open(filename_cc.c_str()); // Output file headers if (aggregate || scale == -1) outputFile1 << "id1"<< ", " << "id2" << ", " << "tau" << ", " << "cc" << "\n"; else outputFile1 << "time" << ", " << "id1" << ", " << "id2" << ", " << "tau" << ", " << "cc" << "\n"; //files = {"dir_x00", "dir_x01", "dir_y00", "dir_y01"} for (int a = 0; a < n_inds; ++a) { for (int b = 0; b < n_inds; ++b) { if (b != a) { pFile_x1 = fopen ((directory + files[a]).c_str(), "rb"); pFile_y1 = fopen ((directory + files[a+n_inds]).c_str(), "rb"); pFile_x2 = fopen ((directory + files[b]).c_str(), "rb"); pFile_y2 = fopen ((directory + files[b+n_inds]).c_str(), "rb"); if (pFile_x1==NULL || pFile_y1==NULL || pFile_x2==NULL || pFile_y2==NULL) { fputs ("File error",stderr); exit (1); } // obtain file size fseek (pFile_x1 , 0 , SEEK_END); lSize = ftell (pFile_x1); rewind (pFile_x1); nElements = lSize / sizeof(float); // allocate memory to contain the whole file // device memory cudaMalloc((void **) &d_x1, lSize); cudaMalloc((void **) &d_y1, lSize); cudaMalloc((void **) &d_x2, lSize); cudaMalloc((void **) &d_y2, lSize); cudaMalloc((void **) &d_res, lSize); // host memory x1 = (float*) malloc(lSize); y1 = (float*) malloc(lSize); x2 = (float*) malloc(lSize); y2 = (float*) malloc(lSize); res = (float*) malloc(lSize); if (x1 == NULL || y1==NULL || x2==NULL || y2==NULL || res==NULL) { fputs ("Memory error",stderr); exit (2); } // copy the file into the respective float pointers result_x1 = fread (x1, sizeof(float), nElements, pFile_x1); result_y1 = fread (y1, sizeof(float), nElements, pFile_y1); result_x2 = fread (x2, sizeof(float), nElements, pFile_x2); result_y2 = fread (y2, sizeof(float), nElements, pFile_y2); if (result_x1 != nElements || result_y1 != nElements || result_x2 != nElements || result_y2 != nElements) { fputs ("Reading error",stderr); exit (3); } // the whole files are now loaded in the memory x1, y1, x2 and y2 respectively cudaMemcpy(d_x1, x1, lSize, cudaMemcpyHostToDevice); cudaMemcpy(d_y1, y1, lSize, cudaMemcpyHostToDevice); cudaMemcpy(d_x2, x2, lSize, cudaMemcpyHostToDevice); cudaMemcpy(d_y2, y2, lSize, cudaMemcpyHostToDevice); if (scale*fps > nElements) scale = -1; int tau_max[nElements - scale*fps]; float res_tmp[nElements - scale*fps]; float res_max[nElements - scale*fps]; std::fill_n(tau_max, nElements - scale*fps, 0); std::fill_n(res_tmp, nElements - scale*fps, 0.0); std::fill_n(res_max, nElements - scale*fps, -1.0); for (int tau = -time*fps; tau <= time*fps; ++tau) { kernel<<<(nElements + M - 1) / M, M>>>(d_x1, d_y1, d_x2, d_y2, d_res, tau, na_frames, nElements); cudaMemcpy(res, d_res, lSize, cudaMemcpyDeviceToHost); if (scale == -1) { float res_now = -1.0f; for (int i = na_frames; i < nElements; ++i) { if (res[i] != res[i]) std::cout << x1[i] << " " << y1[i] << " " << i << " " << tau << "\n"; // if nans res_now += res[i]; } outputFile1 << (to_string(files[a][5])).c_str() << (to_string(files[a][6])).c_str() << (to_string(files[a][7])).c_str() << ", " << (to_string(files[b][5])).c_str() << (to_string(files[b][6])).c_str() << (to_string(files[b][7])).c_str() << ", " << tau << ", " << res_now / nElements << "\n"; } else { std::fill_n(res_tmp, nElements - scale*fps, 0.0); for (int i = na_frames; i < nElements - scale*fps; ++i) { for (int j = i; j < i + scale*fps; ++j) { res_tmp[i] += res[j]; if (j == i + scale*fps - 1 && res_max[i] < res_tmp[i]) { res_max[i] = res_tmp[i]; tau_max[i] = tau; } } } } } if (scale != -1) { for (int t = 0; t < nElements - scale*fps; ++t) { outputFile1 << t + scale*fps/2 << ", " << (to_string(files[a][5])).c_str() << (to_string(files[a][6])).c_str() << (to_string(files[a][7])).c_str() << ", " << (to_string(files[b][5])).c_str() << (to_string(files[b][6])).c_str() << (to_string(files[b][7])).c_str() << ", " << tau_max[t] << ", " << res_max[t] / (scale*fps) << "\n"; } } fclose(pFile_x1); fclose(pFile_x2); fclose(pFile_y1); fclose(pFile_y2); cudaFree(d_x1); cudaFree(d_y1); cudaFree(d_x2); cudaFree(d_y2); cudaFree(d_res); free(x1); free(y1); free(x2); free(y2); } } } // terminate fclose(pFile_x1); fclose(pFile_y1); fclose(pFile_x2); fclose(pFile_y2); return 0; }
e1f74af34272824b4042327a125f332d6f22c669.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <omp.h> #include "node.h" #include "point.h" #include "color.h" #include "global.h" #include "settings.h" #include "real.h" #include "raycreator.h" #define NODE_CACHE_SIZE 128 //#define TEXTURE //Preallocating for up to 4 devices, a little wasteful if fewer are used... Node* nodes_d[4]; Point* points_d[4]; Display_color* image_d[4]; Ray* rays_d[4]; Color* colors_d[4]; unsigned int * stack_d[4]; __device__ __constant__ real_t STEP_SIZE_D; __device__ __constant__ real_t INTERPOLATION_RADIUS_D; __device__ __constant__ real_t X_RADIUS; __device__ __constant__ real_t Y_RADIUS; __device__ __constant__ real_t Z_RADIUS; __device__ __constant__ real_t STEP_FACTOR_D; __device__ __constant__ real_t STEP_LIMIT_D; __device__ __constant__ real_t MAX_INTENSITY_D; __device__ __constant__ Ranges top_ranges; __device__ __constant__ Node root_node; __device__ __constant__ real_t aniso_x; __device__ __constant__ real_t aniso_y; __device__ __constant__ real_t aniso_z; //From raycreator __device__ __constant__ Coord up; __device__ __constant__ Coord eye; __device__ __constant__ Coord screen_center_d; __device__ __constant__ Coord right; __device__ __constant__ Coord top_d; __device__ __constant__ Coord bottom_d; __device__ __constant__ int RESOLUTION_D; #ifdef TEXTURE texture<float4, hipTextureType1D, hipReadModeElementType> pointTexture; #endif inline __device__ Coord make_Coord(real_t x, real_t y, real_t z){ Coord c = {x,y,z}; return c; } inline __device__ Coord operator+(Coord a, Coord b){ return make_Coord(a.x + b.x, a.y + b.y, a.z + b.z); } inline __device__ Coord operator-(Coord a, Coord b){ return make_Coord(a.x - b.x, a.y - b.y, a.z - b.z); } inline __device__ Coord operator*(Coord a, real_t s){ return make_Coord(a.x * s, a.y * s, a.z * s); } inline __device__ Coord operator/(Coord a, Coord b){ return make_Coord(a.x / b.x, a.y / b.y, a.z / b.z); } static __inline__ __device__ Coord fminf(Coord a, Coord b){ return make_Coord(fminf(a.x,b.x), fminf(a.y,b.y), fminf(a.z,b.z)); } static __inline__ __device__ Coord fmaxf(Coord a, Coord b){ return make_Coord(fmaxf(a.x,b.x), fmaxf(a.y,b.y), fmaxf(a.z,b.z)); } __device__ Ray normalize_ray(Ray r){ Ray t = r; real_t d = sqrt(r.dir.x*r.dir.x + r.dir.y*r.dir.y + r.dir.z*r.dir.z); t.dir.x = t.dir.x / d; t.dir.y = t.dir.y / d; t.dir.z = t.dir.z / d; return t; } __device__ Color blend_d(Color c, real_t intensity, Color* colors){ Color new_color, output; new_color = colors[(int)((intensity/MAX_INTENSITY_D)*10000)]; output.r = c.r + (1-c.a)*new_color.a*new_color.r; output.g = c.g + (1-c.a)*new_color.a*new_color.g; output.b = c.b + (1-c.a)*new_color.a*new_color.b; output.a = c.a + (1-c.a)*new_color.a; return output; } __device__ Display_color to_display_color_d(Color c){ Display_color dc; if(c.r > 1.0f){ c.r = 1.0; } if(c.b > 1.0f){ c.b = 1.0; } if(c.g > 1.0f){ c.g = 1.0; } dc.a = (unsigned char)(c.a*255); dc.r = (unsigned char)(c.r*255); dc.g = (unsigned char)(c.g*255); dc.b = (unsigned char)(c.b*255); return dc; } __device__ int get_index_d(Coord pos, Ranges r){ char a = pos.x < r.xmin + (r.xmax - r.xmin)/2.0f; char b = pos.y < r.ymin + (r.ymax - r.ymin)/2.0f; char c = pos.z < r.zmin + (r.zmax - r.zmin)/2.0f; return a*4 + b*2 + c*1; } __device__ Ranges get_ranges_for_index_d(Ranges r, int index){ Ranges nr = r; if(index == 0){ nr.xmin += (r.xmax -r.xmin)/2.0f; nr.ymin += (r.ymax -r.ymin)/2.0f; nr.zmin += (r.zmax -r.zmin)/2.0f; } if(index == 1){ nr.xmin += (r.xmax -r.xmin)/2.0f; nr.ymin += (r.ymax -r.ymin)/2.0f; nr.zmax -= (r.zmax -r.zmin)/2.0f; } if(index == 2){ nr.xmin += (r.xmax -r.xmin)/2.0f; nr.ymax -= (r.ymax -r.ymin)/2.0f; nr.zmin += (r.zmax -r.zmin)/2.0f; } if(index == 3){ nr.xmin += (r.xmax -r.xmin)/2.0f; nr.ymax -= (r.ymax -r.ymin)/2.0f; nr.zmax -= (r.zmax -r.zmin)/2.0f; } if(index == 4){ nr.xmax -= (r.xmax -r.xmin)/2.0f; nr.ymin += (r.ymax -r.ymin)/2.0f; nr.zmin += (r.zmax -r.zmin)/2.0f; } if(index == 5){ nr.xmax -= (r.xmax -r.xmin)/2.0f; nr.ymin += (r.ymax -r.ymin)/2.0f; nr.zmax -= (r.zmax -r.zmin)/2.0f; } if(index == 6){ nr.xmax -= (r.xmax -r.xmin)/2.0f; nr.ymax -= (r.ymax -r.ymin)/2.0f; nr.zmin += (r.zmax -r.zmin)/2.0f; } if(index == 7){ nr.xmax -= (r.xmax -r.xmin)/2.0f; nr.ymax -= (r.ymax -r.ymin)/2.0f; nr.zmax -= (r.zmax -r.zmin)/2.0f; } return nr; } __device__ real_t interpolate(Node n, Coord pos, Point* points){ real_t weight = 0; real_t intensity = 0; for(int c = 0; c < n.num_children; c++){ real_t dx = pos.x - points[(long int)n.pointer + c].x; real_t dy = pos.y - points[(long int)n.pointer + c].y; real_t dz = pos.z - points[(long int)n.pointer + c].z; real_t distance = sqrt(dx*dx + dy*dy + dz*dz); if(distance < INTERPOLATION_RADIUS_D){ intensity += (1/distance)*points[(long int)n.pointer + c].intensity; weight += (1/distance); } } if(intensity <= 0){ return 0; } real_t ratio = intensity/weight; if(ratio <= 1){ return 0; } return log(ratio); } inline __device__ unsigned int get_covered_subnodes(Coord pos, Node n){ // real_t X_RADIUS = INTERPOLATION_RADIUS_D; // real_t Y_RADIUS = INTERPOLATION_RADIUS_D; // real_t Z_RADIUS = INTERPOLATION_RADIUS_D; char xmin = pos.x - X_RADIUS < n.x; char ymin = pos.y - Y_RADIUS < n.y; char zmin = pos.z - Z_RADIUS < n.z; char xmax = pos.x + X_RADIUS > n.x; char ymax = pos.y + Y_RADIUS > n.y; char zmax = pos.z + Z_RADIUS > n.z; unsigned char b = 0; b = b | (unsigned int)(128 * (xmin && ymin && zmin)); b = b | (unsigned int)(64 * (xmin && ymin && zmax)); b = b | (unsigned int)(32 * (xmin && ymax && zmin)); b = b | (unsigned int)(16 * (xmin && ymax && zmax)); b = b | (unsigned int)(8 * (xmax && ymin && zmin)); b = b | (unsigned int)(4 * (xmax && ymin && zmax)); b = b | (unsigned int)(2 * (xmax && ymax && zmin)); b = b | (unsigned int)(1 * (xmax && ymax && zmax)); return b; } #ifdef TEXTURE __device__ real_t get_intensity_for_pos_full(Coord pos, Node* nodes, Node* node_cache, unsigned int* stack, int base){ #else __device__ real_t get_intensity_for_pos_full(Coord pos, const __restrict__ Point* points, Node* nodes, Node* node_cache, unsigned int* stack, int base){ #endif real_t intensity = 0; real_t weight = 0; short tos = 0; unsigned char inc = 1; unsigned int current_node = 0; unsigned int b = get_covered_subnodes(pos, root_node); stack[base + tos] = current_node | (b << 24); tos += inc; while(tos > 0){ current_node = stack[base + tos -inc] & 0x00ffffff; b = (stack[base + tos -inc] & 0xff000000) >> 24; if(b > 255){ return 0; } unsigned int t = 1; while((b & t) == 0){ t = t << 1; } current_node += (unsigned int)log2((float)t); b = b ^ t; if(b == 0){ tos -= inc; } else{ stack[base + tos-inc] = (stack[base + tos -inc] & 0x00ffffff) | (b << 24); } Node n; if(current_node < NODE_CACHE_SIZE){ n = node_cache[current_node]; } else{ n = nodes[current_node]; } if(n.is_leaf){ for(short c = 0; c < n.num_children; c++){ #ifdef TEXTURE float4 point = tex1Dfetch(pointTexture, (long int)n.pointer + c); #else //float4 p = __ldg((const float4*)(&points[(long int)n.pointer + c])); //Point point; point.x = p.x; point.y = p.y; point.z = p.z; point.intensity = p.w; Point point = points[(long int)n.pointer + c]; #endif real_t dx = pos.x - point.x; real_t dy = pos.y - point.y; real_t dz = pos.z - point.z; real_t distance = sqrt(dx*dx*aniso_x + dy*dy*aniso_y + dz*dz*aniso_z); if(distance < INTERPOLATION_RADIUS_D){ #ifdef TEXTURE intensity += (1/distance)*point.w; #else intensity += (1/distance)*point.intensity; #endif weight += (1/distance); } } } else{ b = get_covered_subnodes(pos, nodes[current_node]); unsigned int temp = (unsigned int)nodes[current_node].pointer; stack[base + tos] = (temp) | (b << 24); tos += inc; } } if(intensity <= 0){ return 0; } real_t ratio = intensity/weight; if(ratio <= 1){ return 0; } return log(ratio); } __device__ real_t get_intensity_for_pos_d(Coord pos, Node* nodes, Point* points){ Node n = root_node; Ranges ranges = top_ranges; real_t d = 0; while(n.is_leaf == 0){ d++; int index = get_index_d(pos, ranges); ranges = get_ranges_for_index_d(ranges, index); n = nodes[((long int)n.pointer) + index]; } return interpolate(n, pos, points); } __device__ void create_rays(int index, Ray* rays, real_t pixel_width, int res){ short c = (index % res) - (res/2); short d = (index / res) - (res/2); Coord start = screen_center_d + up * (d*pixel_width) + right * (c*pixel_width); Coord dir = start - eye; Coord t1 = (top_d - start)/dir; Coord t2 = (bottom_d - start)/dir; Coord tmin = fminf(t1, t2); Coord tmax = fmaxf(t1, t2); real_t tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); real_t tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); if(tfar <= tnear){ rays[index].distance = 0; return; } Coord far = start + dir * tfar; tnear = fmax(0, tnear); start = start + dir * tnear; Coord diff = start - far; rays[index].distance = sqrt(diff.x*diff.x + diff.y*diff.y + diff.z*diff.z); rays[index].start = start; rays[index].dir = dir; rays[index].color.r = -1; rays[index].color.g = -1; rays[index].color.b = -1; rays[index].color.a = -1; } #ifdef TEXTURE __global__ void kernel(Node* nodes, Display_color* image, Ray* rays, Color* colors, unsigned int* stack, int stack_size, real_t pixel_width, int offset, int multigpu){ #else __global__ void kernel(const __restrict__ Point* points, Node* nodes, Display_color* image, Ray* rays, Color* colors, unsigned int* stack, int stack_size, real_t pixel_width, int offset, int multigpu){ #endif /* int virtualBlockId = blockIdx.x * 10; while(virtualBlockId >= gridDim.x){ virtualBlockId = virtualBlockId - (gridDim.x -1); } */ int i = (blockIdx.x * blockDim.x + threadIdx.x) + offset; if(multigpu == 0){ create_rays(i, rays, pixel_width, RESOLUTION_D); } __shared__ Node node_cache[NODE_CACHE_SIZE]; if(threadIdx.x < NODE_CACHE_SIZE){ node_cache[threadIdx.x] = nodes[threadIdx.x]; } __syncthreads(); if(rays[i].distance <= 0){ Display_color b = {0,0,0,0}; image[i -offset] = b; return; } rays[i] = normalize_ray(rays[i]); Coord pos = rays[i].start; real_t acc_distance = 0; Color output = {0.0,0.0,0.0,0.0}; real_t local_step_size = STEP_SIZE_D; while(acc_distance < rays[i].distance){ #ifdef TEXTURE real_t intensity = get_intensity_for_pos_full(pos, nodes, node_cache, stack, i*stack_size); #else real_t intensity = get_intensity_for_pos_full(pos, points, nodes, node_cache, stack, i*stack_size); #endif if(intensity > 0 && local_step_size > STEP_SIZE_D){ acc_distance -= local_step_size; pos = pos + (rays[i].dir*(-1*local_step_size)); local_step_size = STEP_SIZE_D; } else if(intensity == 0 && (local_step_size * STEP_FACTOR_D) <= STEP_LIMIT_D){ local_step_size *= STEP_FACTOR_D; } else{ output = blend_d(output, intensity, colors); if(output.a > 0.99f){ break; } } pos = pos + (rays[i].dir*local_step_size); acc_distance += local_step_size; } image[i - offset] = to_display_color_d(output); } int getPowerfullness(hipDeviceProp_t* p){ int cudaCores = 0; if(p->major == 1){ cudaCores = 8; } if(p->major == 2 && p->minor == 0){ cudaCores = 32; } if(p->major == 2 && p->minor == 1){ cudaCores = 48; } if(p->major == 3){ cudaCores = 192; } cudaCores = cudaCores*p->multiProcessorCount; return cudaCores*p->clockRate; } float logit(float x){ float y = log(x/(1-x)); y = y + 5; y = y / 10; return y; } void printError(hipError_t error, char* message){ if(error != hipSuccess){ printf("%s\n", hipGetErrorString(error)); printf("%s\n", message); } } extern "C" void copy_to_devices_invariant(int deviceCount, Tree* tree, Raycreator* rc){ for(int device = 0; device < deviceCount; device++){ hipSetDevice(device); hipMalloc((void**)&nodes_d[device], sizeof(Node)*tree->node_list->current_pos); hipMalloc((void**)&points_d[device], sizeof(Point)*tree->total_num_points); hipMalloc((void**)&image_d[device], sizeof(Display_color)*RESOLUTION*RESOLUTION); hipMalloc((void**)&rays_d[device], sizeof(Ray)*RESOLUTION*RESOLUTION); hipMalloc((void**)&stack_d[device], sizeof(unsigned int)*RESOLUTION*RESOLUTION*10);//tree->max_depth + 2); hipMalloc((void**)&colors_d[device], sizeof(Color)*transfer_overlay->color_table_size); printError(hipGetLastError(), "Problem mallocing"); hipMemcpy(nodes_d[device], tree->node_list->nodes, sizeof(Node)*tree->node_list->current_pos, hipMemcpyHostToDevice); hipMemcpy(points_d[device], tree->point_list->points, sizeof(Point)*tree->total_num_points, hipMemcpyHostToDevice); hipMemcpy(colors_d[device], transfer_overlay->color_table, sizeof(Color)*transfer_overlay->color_table_size, hipMemcpyHostToDevice); printError(hipGetLastError(), "Memcpy"); #ifdef TEXTURE hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32,32,32,32, hipChannelFormatKindFloat); pointTexture.filterMode = hipFilterModePoint; pointTexture.normalized = false; hipBindTexture(NULL, pointTexture, points_d[device], channelDesc, sizeof(Point)*tree->total_num_points); printError(hipGetLastError(), "texture"); #endif hipMemcpyToSymbol(top_ranges, &tree->ranges, sizeof(Ranges)); hipMemcpyToSymbol(root_node, &tree->root, sizeof(Node)); hipMemcpyToSymbol(MAX_INTENSITY_D, &MAX_INTENSITY, sizeof(real_t)); hipMemcpyToSymbol(RESOLUTION_D, &RESOLUTION, sizeof(int)); printError(hipGetLastError(), "memcpy to symbol"); } } void copy_to_devices(int deviceCount, Tree* tree, Raycreator* rc){ Coord top = {rc->ranges->xmax, rc->ranges->ymax, rc->ranges->zmax}; Coord bottom = {rc->ranges->xmin, rc->ranges->ymin, rc->ranges->zmin}; real_t x_radius = INTERPOLATION_RADIUS; real_t y_radius = INTERPOLATION_RADIUS; real_t z_radius = INTERPOLATION_RADIUS; if(ANISOTROPIC){ x_radius *= (1/sqrt(ANISO_MATRIX[0])); y_radius *= (1/sqrt(ANISO_MATRIX[4])); z_radius *= (1/sqrt(ANISO_MATRIX[8])); } for(int device = 0; device < deviceCount; device++){ hipSetDevice(device); hipMemcpy(rays_d[device], rays, sizeof(Ray)*RESOLUTION*RESOLUTION, hipMemcpyHostToDevice); printError(hipGetLastError(), "Memcpy"); hipMemcpyToSymbol(STEP_SIZE_D, &STEP_SIZE, sizeof(real_t)); hipMemcpyToSymbol(STEP_FACTOR_D, &STEP_FACTOR, sizeof(real_t)); hipMemcpyToSymbol(STEP_LIMIT_D, &STEP_LIMIT, sizeof(real_t)); hipMemcpyToSymbol(INTERPOLATION_RADIUS_D, &INTERPOLATION_RADIUS, sizeof(real_t)); hipMemcpyToSymbol(X_RADIUS, &x_radius, sizeof(real_t)); hipMemcpyToSymbol(Y_RADIUS, &y_radius, sizeof(real_t)); hipMemcpyToSymbol(Z_RADIUS, &z_radius, sizeof(real_t)); hipMemcpyToSymbol(aniso_x, &ANISO_MATRIX[0], sizeof(real_t)); hipMemcpyToSymbol(aniso_y, &ANISO_MATRIX[4], sizeof(real_t)); hipMemcpyToSymbol(aniso_z, &ANISO_MATRIX[8], sizeof(real_t)); hipMemcpyToSymbol(up, &rc->up, sizeof(Coord)); hipMemcpyToSymbol(top_d, &top, sizeof(Coord)); hipMemcpyToSymbol(bottom_d, &bottom, sizeof(Coord)); hipMemcpyToSymbol(eye, &rc->eye, sizeof(Coord)); hipMemcpyToSymbol(right, &rc->right, sizeof(Coord)); hipMemcpyToSymbol(screen_center_d, &rc->screen_center, sizeof(Coord)); printError(hipGetLastError(), "memcpy to symbol"); } } float* get_work_fractions(int deviceCount, int useFiftyFifty){ float* workFractions = (float*) malloc(sizeof(float) * deviceCount); if(stored_work_fractions[0] != 0){ for (int d = 0; d < deviceCount; ++d) { workFractions[d] = stored_work_fractions[d]; } } else{ if(useFiftyFifty != 0){ workFractions[0] = 0.5; workFractions[1] = 0.5; } float sum = 0; for(int d = 0; d < deviceCount; d++){ hipDeviceProp_t p; hipGetDeviceProperties(&p, d); workFractions[d] = getPowerfullness(&p); sum += workFractions[d]; } for(int d = 0; d < deviceCount; d++){ workFractions[d] = workFractions[d]/sum; } } return workFractions; } void get_work_fractions_logit(float* workFractions){ workFractions[0] = logit(workFractions[0]); workFractions[1] = 1- workFractions[0]; } void get_work_fractions_ray_length(float* workFractions){ real_t totalLength = 0; for(int i = 0; i < RESOLUTION*RESOLUTION; i++){ totalLength += rays[i].distance; } int i = 0; real_t partial_length = 0; real_t target_partial_length = workFractions[0] * totalLength; while(partial_length < target_partial_length){ partial_length += rays[i].distance; i++; } workFractions[0] = (real_t)i/(real_t)(RESOLUTION*RESOLUTION); workFractions[1] = 1 - workFractions[0]; } int* get_blocks_pr_device(int deviceCount, float* originalWorkFractions, int nBlocks, int useLogit, int useRayLength){ float workFractions[deviceCount]; for (int i = 0; i < deviceCount; ++i) { workFractions[i] = originalWorkFractions[i]; } if(useLogit != 0){ get_work_fractions_logit(workFractions); } if(useRayLength != 0){ get_work_fractions_ray_length(workFractions); } int* blocksPrDevice = (int*)malloc(sizeof(int) * deviceCount); for (int d = 0; d < deviceCount; ++d) { blocksPrDevice[d] = (int)(workFractions[d] * nBlocks); printf("Work for device %d: %f, %d/%d\n", d, workFractions[d], blocksPrDevice[d], nBlocks); } int sum = 0; do{ sum = 0; for (int d = 0; d < deviceCount; ++d) { sum += blocksPrDevice[d]; } if(sum < nBlocks){ blocksPrDevice[0]++; } else if(sum > nBlocks){ blocksPrDevice[0]--; } }while(sum != nBlocks); return blocksPrDevice; } extern "C" void launch_ray_trace_kernel(void* v, Raycreator* rc){ Tree* tree = (Tree*)v; int deviceCount = 0; float* workFractions; if(MULTIGPU == 1){ hipGetDeviceCount(&deviceCount); int useFiftyFifty = 0; workFractions = get_work_fractions(deviceCount, useFiftyFifty); } else{ deviceCount = 1; workFractions = (float*)malloc(sizeof(float)*1); workFractions[0] = 1.0; } printf("Using %d devices\n", deviceCount); copy_to_devices(deviceCount, tree, rc); int nThreads=128; int totalThreads = RESOLUTION*RESOLUTION; int nBlocks = totalThreads/nThreads; nBlocks += ((totalThreads%nThreads)>0)?1:0; int* blocksPrDevice; if(MULTIGPU == 1){ int useLogit = 0; int useRayLength = 1; blocksPrDevice = get_blocks_pr_device(deviceCount, workFractions, nBlocks, useLogit, useRayLength); } else{ blocksPrDevice = (int*)malloc(sizeof(int)*1); blocksPrDevice[0] = nBlocks; } hipEvent_t startEvents[deviceCount]; hipEvent_t endEvents[deviceCount]; hipStream_t streams[deviceCount]; for(int device = 0; device < deviceCount; device++){ int start = 0; if(device > 0){ start = blocksPrDevice[device -1] * nThreads; } hipSetDevice(device); hipEventCreate(&startEvents[device]); hipEventCreate(&endEvents[device]); hipStreamCreate(&streams[device]); #ifdef TEXTURE hipEventRecord(startEvents[device]); hipLaunchKernelGGL(( kernel), dim3(blocksPrDevice[device]), dim3(nThreads),0, streams[device], nodes_d[device], image_d[device], rays_d[device], colors_d[device], stack_d[device], 10, rc->pixel_width, start, MULTIGPU);//tree->max_depth + 2); hipEventRecord(endEvents[device]); #else hipEventRecord(startEvents[device]); printf("start %d blocks: %d\n", start, blocksPrDevice[device]); hipLaunchKernelGGL(( kernel), dim3(blocksPrDevice[device]), dim3(nThreads),0, streams[device], points_d[device], nodes_d[device], image_d[device], rays_d[device], colors_d[device], stack_d[device], 10, rc->pixel_width, start, MULTIGPU);//tree->max_depth + 2); hipEventRecord(endEvents[device]); #endif } float r[deviceCount]; for (int d = 0; d < deviceCount; ++d) { hipEventSynchronize(endEvents[d]); float time; hipEventElapsedTime(&time,startEvents[d], endEvents[d]); r[d] = time/workFractions[d]; printf("Time for device %d: %f\n", d, time); } float total = r[0] + r[1]; stored_work_fractions[0] = r[1]/total; stored_work_fractions[1] = r[0]/total; int previousStart = 0; for(int device = 0; device < deviceCount; device++){ hipSetDevice(device); int images_index = log2((real_t)RESOLUTION) - 4; int offset = previousStart; previousStart = offset + blocksPrDevice[device]*nThreads; hipMemcpy(&images[images_index][offset], image_d[device], sizeof(Display_color)*blocksPrDevice[device]*nThreads, hipMemcpyDeviceToHost); } //freeAndReset(deviceCount); } extern "C" void freeAndReset(){ int deviceCount = 0; if(MULTIGPU == 1){ hipGetDeviceCount(&deviceCount); } else{ deviceCount = 1; } for(int device = 0; device < deviceCount; device++){ hipSetDevice(device); hipFree(nodes_d[device]); hipFree(points_d[device]); hipFree(image_d[device]); hipFree(rays_d[device]); hipFree(colors_d[device]); hipFree(stack_d[device]); hipDeviceReset(); printError(hipGetLastError(), "Free"); } } extern "C" void initDevice(Tree* tree, Raycreator* rc){ int deviceCount = 0; if(MULTIGPU == 1){ hipGetDeviceCount(&deviceCount); } else{ deviceCount = 1; } copy_to_devices_invariant(deviceCount, tree, rc); } extern "C" long int get_memory_size(){ hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, 0); return devProp.totalGlobalMem; }
e1f74af34272824b4042327a125f332d6f22c669.cu
#include <stdio.h> #include <cuda.h> #include <omp.h> #include "node.h" #include "point.h" #include "color.h" #include "global.h" #include "settings.h" #include "real.h" #include "raycreator.h" #define NODE_CACHE_SIZE 128 //#define TEXTURE //Preallocating for up to 4 devices, a little wasteful if fewer are used... Node* nodes_d[4]; Point* points_d[4]; Display_color* image_d[4]; Ray* rays_d[4]; Color* colors_d[4]; unsigned int * stack_d[4]; __device__ __constant__ real_t STEP_SIZE_D; __device__ __constant__ real_t INTERPOLATION_RADIUS_D; __device__ __constant__ real_t X_RADIUS; __device__ __constant__ real_t Y_RADIUS; __device__ __constant__ real_t Z_RADIUS; __device__ __constant__ real_t STEP_FACTOR_D; __device__ __constant__ real_t STEP_LIMIT_D; __device__ __constant__ real_t MAX_INTENSITY_D; __device__ __constant__ Ranges top_ranges; __device__ __constant__ Node root_node; __device__ __constant__ real_t aniso_x; __device__ __constant__ real_t aniso_y; __device__ __constant__ real_t aniso_z; //From raycreator __device__ __constant__ Coord up; __device__ __constant__ Coord eye; __device__ __constant__ Coord screen_center_d; __device__ __constant__ Coord right; __device__ __constant__ Coord top_d; __device__ __constant__ Coord bottom_d; __device__ __constant__ int RESOLUTION_D; #ifdef TEXTURE texture<float4, cudaTextureType1D, cudaReadModeElementType> pointTexture; #endif inline __device__ Coord make_Coord(real_t x, real_t y, real_t z){ Coord c = {x,y,z}; return c; } inline __device__ Coord operator+(Coord a, Coord b){ return make_Coord(a.x + b.x, a.y + b.y, a.z + b.z); } inline __device__ Coord operator-(Coord a, Coord b){ return make_Coord(a.x - b.x, a.y - b.y, a.z - b.z); } inline __device__ Coord operator*(Coord a, real_t s){ return make_Coord(a.x * s, a.y * s, a.z * s); } inline __device__ Coord operator/(Coord a, Coord b){ return make_Coord(a.x / b.x, a.y / b.y, a.z / b.z); } static __inline__ __device__ Coord fminf(Coord a, Coord b){ return make_Coord(fminf(a.x,b.x), fminf(a.y,b.y), fminf(a.z,b.z)); } static __inline__ __device__ Coord fmaxf(Coord a, Coord b){ return make_Coord(fmaxf(a.x,b.x), fmaxf(a.y,b.y), fmaxf(a.z,b.z)); } __device__ Ray normalize_ray(Ray r){ Ray t = r; real_t d = sqrt(r.dir.x*r.dir.x + r.dir.y*r.dir.y + r.dir.z*r.dir.z); t.dir.x = t.dir.x / d; t.dir.y = t.dir.y / d; t.dir.z = t.dir.z / d; return t; } __device__ Color blend_d(Color c, real_t intensity, Color* colors){ Color new_color, output; new_color = colors[(int)((intensity/MAX_INTENSITY_D)*10000)]; output.r = c.r + (1-c.a)*new_color.a*new_color.r; output.g = c.g + (1-c.a)*new_color.a*new_color.g; output.b = c.b + (1-c.a)*new_color.a*new_color.b; output.a = c.a + (1-c.a)*new_color.a; return output; } __device__ Display_color to_display_color_d(Color c){ Display_color dc; if(c.r > 1.0f){ c.r = 1.0; } if(c.b > 1.0f){ c.b = 1.0; } if(c.g > 1.0f){ c.g = 1.0; } dc.a = (unsigned char)(c.a*255); dc.r = (unsigned char)(c.r*255); dc.g = (unsigned char)(c.g*255); dc.b = (unsigned char)(c.b*255); return dc; } __device__ int get_index_d(Coord pos, Ranges r){ char a = pos.x < r.xmin + (r.xmax - r.xmin)/2.0f; char b = pos.y < r.ymin + (r.ymax - r.ymin)/2.0f; char c = pos.z < r.zmin + (r.zmax - r.zmin)/2.0f; return a*4 + b*2 + c*1; } __device__ Ranges get_ranges_for_index_d(Ranges r, int index){ Ranges nr = r; if(index == 0){ nr.xmin += (r.xmax -r.xmin)/2.0f; nr.ymin += (r.ymax -r.ymin)/2.0f; nr.zmin += (r.zmax -r.zmin)/2.0f; } if(index == 1){ nr.xmin += (r.xmax -r.xmin)/2.0f; nr.ymin += (r.ymax -r.ymin)/2.0f; nr.zmax -= (r.zmax -r.zmin)/2.0f; } if(index == 2){ nr.xmin += (r.xmax -r.xmin)/2.0f; nr.ymax -= (r.ymax -r.ymin)/2.0f; nr.zmin += (r.zmax -r.zmin)/2.0f; } if(index == 3){ nr.xmin += (r.xmax -r.xmin)/2.0f; nr.ymax -= (r.ymax -r.ymin)/2.0f; nr.zmax -= (r.zmax -r.zmin)/2.0f; } if(index == 4){ nr.xmax -= (r.xmax -r.xmin)/2.0f; nr.ymin += (r.ymax -r.ymin)/2.0f; nr.zmin += (r.zmax -r.zmin)/2.0f; } if(index == 5){ nr.xmax -= (r.xmax -r.xmin)/2.0f; nr.ymin += (r.ymax -r.ymin)/2.0f; nr.zmax -= (r.zmax -r.zmin)/2.0f; } if(index == 6){ nr.xmax -= (r.xmax -r.xmin)/2.0f; nr.ymax -= (r.ymax -r.ymin)/2.0f; nr.zmin += (r.zmax -r.zmin)/2.0f; } if(index == 7){ nr.xmax -= (r.xmax -r.xmin)/2.0f; nr.ymax -= (r.ymax -r.ymin)/2.0f; nr.zmax -= (r.zmax -r.zmin)/2.0f; } return nr; } __device__ real_t interpolate(Node n, Coord pos, Point* points){ real_t weight = 0; real_t intensity = 0; for(int c = 0; c < n.num_children; c++){ real_t dx = pos.x - points[(long int)n.pointer + c].x; real_t dy = pos.y - points[(long int)n.pointer + c].y; real_t dz = pos.z - points[(long int)n.pointer + c].z; real_t distance = sqrt(dx*dx + dy*dy + dz*dz); if(distance < INTERPOLATION_RADIUS_D){ intensity += (1/distance)*points[(long int)n.pointer + c].intensity; weight += (1/distance); } } if(intensity <= 0){ return 0; } real_t ratio = intensity/weight; if(ratio <= 1){ return 0; } return log(ratio); } inline __device__ unsigned int get_covered_subnodes(Coord pos, Node n){ // real_t X_RADIUS = INTERPOLATION_RADIUS_D; // real_t Y_RADIUS = INTERPOLATION_RADIUS_D; // real_t Z_RADIUS = INTERPOLATION_RADIUS_D; char xmin = pos.x - X_RADIUS < n.x; char ymin = pos.y - Y_RADIUS < n.y; char zmin = pos.z - Z_RADIUS < n.z; char xmax = pos.x + X_RADIUS > n.x; char ymax = pos.y + Y_RADIUS > n.y; char zmax = pos.z + Z_RADIUS > n.z; unsigned char b = 0; b = b | (unsigned int)(128 * (xmin && ymin && zmin)); b = b | (unsigned int)(64 * (xmin && ymin && zmax)); b = b | (unsigned int)(32 * (xmin && ymax && zmin)); b = b | (unsigned int)(16 * (xmin && ymax && zmax)); b = b | (unsigned int)(8 * (xmax && ymin && zmin)); b = b | (unsigned int)(4 * (xmax && ymin && zmax)); b = b | (unsigned int)(2 * (xmax && ymax && zmin)); b = b | (unsigned int)(1 * (xmax && ymax && zmax)); return b; } #ifdef TEXTURE __device__ real_t get_intensity_for_pos_full(Coord pos, Node* nodes, Node* node_cache, unsigned int* stack, int base){ #else __device__ real_t get_intensity_for_pos_full(Coord pos, const __restrict__ Point* points, Node* nodes, Node* node_cache, unsigned int* stack, int base){ #endif real_t intensity = 0; real_t weight = 0; short tos = 0; unsigned char inc = 1; unsigned int current_node = 0; unsigned int b = get_covered_subnodes(pos, root_node); stack[base + tos] = current_node | (b << 24); tos += inc; while(tos > 0){ current_node = stack[base + tos -inc] & 0x00ffffff; b = (stack[base + tos -inc] & 0xff000000) >> 24; if(b > 255){ return 0; } unsigned int t = 1; while((b & t) == 0){ t = t << 1; } current_node += (unsigned int)log2((float)t); b = b ^ t; if(b == 0){ tos -= inc; } else{ stack[base + tos-inc] = (stack[base + tos -inc] & 0x00ffffff) | (b << 24); } Node n; if(current_node < NODE_CACHE_SIZE){ n = node_cache[current_node]; } else{ n = nodes[current_node]; } if(n.is_leaf){ for(short c = 0; c < n.num_children; c++){ #ifdef TEXTURE float4 point = tex1Dfetch(pointTexture, (long int)n.pointer + c); #else //float4 p = __ldg((const float4*)(&points[(long int)n.pointer + c])); //Point point; point.x = p.x; point.y = p.y; point.z = p.z; point.intensity = p.w; Point point = points[(long int)n.pointer + c]; #endif real_t dx = pos.x - point.x; real_t dy = pos.y - point.y; real_t dz = pos.z - point.z; real_t distance = sqrt(dx*dx*aniso_x + dy*dy*aniso_y + dz*dz*aniso_z); if(distance < INTERPOLATION_RADIUS_D){ #ifdef TEXTURE intensity += (1/distance)*point.w; #else intensity += (1/distance)*point.intensity; #endif weight += (1/distance); } } } else{ b = get_covered_subnodes(pos, nodes[current_node]); unsigned int temp = (unsigned int)nodes[current_node].pointer; stack[base + tos] = (temp) | (b << 24); tos += inc; } } if(intensity <= 0){ return 0; } real_t ratio = intensity/weight; if(ratio <= 1){ return 0; } return log(ratio); } __device__ real_t get_intensity_for_pos_d(Coord pos, Node* nodes, Point* points){ Node n = root_node; Ranges ranges = top_ranges; real_t d = 0; while(n.is_leaf == 0){ d++; int index = get_index_d(pos, ranges); ranges = get_ranges_for_index_d(ranges, index); n = nodes[((long int)n.pointer) + index]; } return interpolate(n, pos, points); } __device__ void create_rays(int index, Ray* rays, real_t pixel_width, int res){ short c = (index % res) - (res/2); short d = (index / res) - (res/2); Coord start = screen_center_d + up * (d*pixel_width) + right * (c*pixel_width); Coord dir = start - eye; Coord t1 = (top_d - start)/dir; Coord t2 = (bottom_d - start)/dir; Coord tmin = fminf(t1, t2); Coord tmax = fmaxf(t1, t2); real_t tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); real_t tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); if(tfar <= tnear){ rays[index].distance = 0; return; } Coord far = start + dir * tfar; tnear = fmax(0, tnear); start = start + dir * tnear; Coord diff = start - far; rays[index].distance = sqrt(diff.x*diff.x + diff.y*diff.y + diff.z*diff.z); rays[index].start = start; rays[index].dir = dir; rays[index].color.r = -1; rays[index].color.g = -1; rays[index].color.b = -1; rays[index].color.a = -1; } #ifdef TEXTURE __global__ void kernel(Node* nodes, Display_color* image, Ray* rays, Color* colors, unsigned int* stack, int stack_size, real_t pixel_width, int offset, int multigpu){ #else __global__ void kernel(const __restrict__ Point* points, Node* nodes, Display_color* image, Ray* rays, Color* colors, unsigned int* stack, int stack_size, real_t pixel_width, int offset, int multigpu){ #endif /* int virtualBlockId = blockIdx.x * 10; while(virtualBlockId >= gridDim.x){ virtualBlockId = virtualBlockId - (gridDim.x -1); } */ int i = (blockIdx.x * blockDim.x + threadIdx.x) + offset; if(multigpu == 0){ create_rays(i, rays, pixel_width, RESOLUTION_D); } __shared__ Node node_cache[NODE_CACHE_SIZE]; if(threadIdx.x < NODE_CACHE_SIZE){ node_cache[threadIdx.x] = nodes[threadIdx.x]; } __syncthreads(); if(rays[i].distance <= 0){ Display_color b = {0,0,0,0}; image[i -offset] = b; return; } rays[i] = normalize_ray(rays[i]); Coord pos = rays[i].start; real_t acc_distance = 0; Color output = {0.0,0.0,0.0,0.0}; real_t local_step_size = STEP_SIZE_D; while(acc_distance < rays[i].distance){ #ifdef TEXTURE real_t intensity = get_intensity_for_pos_full(pos, nodes, node_cache, stack, i*stack_size); #else real_t intensity = get_intensity_for_pos_full(pos, points, nodes, node_cache, stack, i*stack_size); #endif if(intensity > 0 && local_step_size > STEP_SIZE_D){ acc_distance -= local_step_size; pos = pos + (rays[i].dir*(-1*local_step_size)); local_step_size = STEP_SIZE_D; } else if(intensity == 0 && (local_step_size * STEP_FACTOR_D) <= STEP_LIMIT_D){ local_step_size *= STEP_FACTOR_D; } else{ output = blend_d(output, intensity, colors); if(output.a > 0.99f){ break; } } pos = pos + (rays[i].dir*local_step_size); acc_distance += local_step_size; } image[i - offset] = to_display_color_d(output); } int getPowerfullness(cudaDeviceProp* p){ int cudaCores = 0; if(p->major == 1){ cudaCores = 8; } if(p->major == 2 && p->minor == 0){ cudaCores = 32; } if(p->major == 2 && p->minor == 1){ cudaCores = 48; } if(p->major == 3){ cudaCores = 192; } cudaCores = cudaCores*p->multiProcessorCount; return cudaCores*p->clockRate; } float logit(float x){ float y = log(x/(1-x)); y = y + 5; y = y / 10; return y; } void printError(cudaError_t error, char* message){ if(error != cudaSuccess){ printf("%s\n", cudaGetErrorString(error)); printf("%s\n", message); } } extern "C" void copy_to_devices_invariant(int deviceCount, Tree* tree, Raycreator* rc){ for(int device = 0; device < deviceCount; device++){ cudaSetDevice(device); cudaMalloc((void**)&nodes_d[device], sizeof(Node)*tree->node_list->current_pos); cudaMalloc((void**)&points_d[device], sizeof(Point)*tree->total_num_points); cudaMalloc((void**)&image_d[device], sizeof(Display_color)*RESOLUTION*RESOLUTION); cudaMalloc((void**)&rays_d[device], sizeof(Ray)*RESOLUTION*RESOLUTION); cudaMalloc((void**)&stack_d[device], sizeof(unsigned int)*RESOLUTION*RESOLUTION*10);//tree->max_depth + 2); cudaMalloc((void**)&colors_d[device], sizeof(Color)*transfer_overlay->color_table_size); printError(cudaGetLastError(), "Problem mallocing"); cudaMemcpy(nodes_d[device], tree->node_list->nodes, sizeof(Node)*tree->node_list->current_pos, cudaMemcpyHostToDevice); cudaMemcpy(points_d[device], tree->point_list->points, sizeof(Point)*tree->total_num_points, cudaMemcpyHostToDevice); cudaMemcpy(colors_d[device], transfer_overlay->color_table, sizeof(Color)*transfer_overlay->color_table_size, cudaMemcpyHostToDevice); printError(cudaGetLastError(), "Memcpy"); #ifdef TEXTURE cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32,32,32,32, cudaChannelFormatKindFloat); pointTexture.filterMode = cudaFilterModePoint; pointTexture.normalized = false; cudaBindTexture(NULL, pointTexture, points_d[device], channelDesc, sizeof(Point)*tree->total_num_points); printError(cudaGetLastError(), "texture"); #endif cudaMemcpyToSymbol(top_ranges, &tree->ranges, sizeof(Ranges)); cudaMemcpyToSymbol(root_node, &tree->root, sizeof(Node)); cudaMemcpyToSymbol(MAX_INTENSITY_D, &MAX_INTENSITY, sizeof(real_t)); cudaMemcpyToSymbol(RESOLUTION_D, &RESOLUTION, sizeof(int)); printError(cudaGetLastError(), "memcpy to symbol"); } } void copy_to_devices(int deviceCount, Tree* tree, Raycreator* rc){ Coord top = {rc->ranges->xmax, rc->ranges->ymax, rc->ranges->zmax}; Coord bottom = {rc->ranges->xmin, rc->ranges->ymin, rc->ranges->zmin}; real_t x_radius = INTERPOLATION_RADIUS; real_t y_radius = INTERPOLATION_RADIUS; real_t z_radius = INTERPOLATION_RADIUS; if(ANISOTROPIC){ x_radius *= (1/sqrt(ANISO_MATRIX[0])); y_radius *= (1/sqrt(ANISO_MATRIX[4])); z_radius *= (1/sqrt(ANISO_MATRIX[8])); } for(int device = 0; device < deviceCount; device++){ cudaSetDevice(device); cudaMemcpy(rays_d[device], rays, sizeof(Ray)*RESOLUTION*RESOLUTION, cudaMemcpyHostToDevice); printError(cudaGetLastError(), "Memcpy"); cudaMemcpyToSymbol(STEP_SIZE_D, &STEP_SIZE, sizeof(real_t)); cudaMemcpyToSymbol(STEP_FACTOR_D, &STEP_FACTOR, sizeof(real_t)); cudaMemcpyToSymbol(STEP_LIMIT_D, &STEP_LIMIT, sizeof(real_t)); cudaMemcpyToSymbol(INTERPOLATION_RADIUS_D, &INTERPOLATION_RADIUS, sizeof(real_t)); cudaMemcpyToSymbol(X_RADIUS, &x_radius, sizeof(real_t)); cudaMemcpyToSymbol(Y_RADIUS, &y_radius, sizeof(real_t)); cudaMemcpyToSymbol(Z_RADIUS, &z_radius, sizeof(real_t)); cudaMemcpyToSymbol(aniso_x, &ANISO_MATRIX[0], sizeof(real_t)); cudaMemcpyToSymbol(aniso_y, &ANISO_MATRIX[4], sizeof(real_t)); cudaMemcpyToSymbol(aniso_z, &ANISO_MATRIX[8], sizeof(real_t)); cudaMemcpyToSymbol(up, &rc->up, sizeof(Coord)); cudaMemcpyToSymbol(top_d, &top, sizeof(Coord)); cudaMemcpyToSymbol(bottom_d, &bottom, sizeof(Coord)); cudaMemcpyToSymbol(eye, &rc->eye, sizeof(Coord)); cudaMemcpyToSymbol(right, &rc->right, sizeof(Coord)); cudaMemcpyToSymbol(screen_center_d, &rc->screen_center, sizeof(Coord)); printError(cudaGetLastError(), "memcpy to symbol"); } } float* get_work_fractions(int deviceCount, int useFiftyFifty){ float* workFractions = (float*) malloc(sizeof(float) * deviceCount); if(stored_work_fractions[0] != 0){ for (int d = 0; d < deviceCount; ++d) { workFractions[d] = stored_work_fractions[d]; } } else{ if(useFiftyFifty != 0){ workFractions[0] = 0.5; workFractions[1] = 0.5; } float sum = 0; for(int d = 0; d < deviceCount; d++){ cudaDeviceProp p; cudaGetDeviceProperties(&p, d); workFractions[d] = getPowerfullness(&p); sum += workFractions[d]; } for(int d = 0; d < deviceCount; d++){ workFractions[d] = workFractions[d]/sum; } } return workFractions; } void get_work_fractions_logit(float* workFractions){ workFractions[0] = logit(workFractions[0]); workFractions[1] = 1- workFractions[0]; } void get_work_fractions_ray_length(float* workFractions){ real_t totalLength = 0; for(int i = 0; i < RESOLUTION*RESOLUTION; i++){ totalLength += rays[i].distance; } int i = 0; real_t partial_length = 0; real_t target_partial_length = workFractions[0] * totalLength; while(partial_length < target_partial_length){ partial_length += rays[i].distance; i++; } workFractions[0] = (real_t)i/(real_t)(RESOLUTION*RESOLUTION); workFractions[1] = 1 - workFractions[0]; } int* get_blocks_pr_device(int deviceCount, float* originalWorkFractions, int nBlocks, int useLogit, int useRayLength){ float workFractions[deviceCount]; for (int i = 0; i < deviceCount; ++i) { workFractions[i] = originalWorkFractions[i]; } if(useLogit != 0){ get_work_fractions_logit(workFractions); } if(useRayLength != 0){ get_work_fractions_ray_length(workFractions); } int* blocksPrDevice = (int*)malloc(sizeof(int) * deviceCount); for (int d = 0; d < deviceCount; ++d) { blocksPrDevice[d] = (int)(workFractions[d] * nBlocks); printf("Work for device %d: %f, %d/%d\n", d, workFractions[d], blocksPrDevice[d], nBlocks); } int sum = 0; do{ sum = 0; for (int d = 0; d < deviceCount; ++d) { sum += blocksPrDevice[d]; } if(sum < nBlocks){ blocksPrDevice[0]++; } else if(sum > nBlocks){ blocksPrDevice[0]--; } }while(sum != nBlocks); return blocksPrDevice; } extern "C" void launch_ray_trace_kernel(void* v, Raycreator* rc){ Tree* tree = (Tree*)v; int deviceCount = 0; float* workFractions; if(MULTIGPU == 1){ cudaGetDeviceCount(&deviceCount); int useFiftyFifty = 0; workFractions = get_work_fractions(deviceCount, useFiftyFifty); } else{ deviceCount = 1; workFractions = (float*)malloc(sizeof(float)*1); workFractions[0] = 1.0; } printf("Using %d devices\n", deviceCount); copy_to_devices(deviceCount, tree, rc); int nThreads=128; int totalThreads = RESOLUTION*RESOLUTION; int nBlocks = totalThreads/nThreads; nBlocks += ((totalThreads%nThreads)>0)?1:0; int* blocksPrDevice; if(MULTIGPU == 1){ int useLogit = 0; int useRayLength = 1; blocksPrDevice = get_blocks_pr_device(deviceCount, workFractions, nBlocks, useLogit, useRayLength); } else{ blocksPrDevice = (int*)malloc(sizeof(int)*1); blocksPrDevice[0] = nBlocks; } cudaEvent_t startEvents[deviceCount]; cudaEvent_t endEvents[deviceCount]; cudaStream_t streams[deviceCount]; for(int device = 0; device < deviceCount; device++){ int start = 0; if(device > 0){ start = blocksPrDevice[device -1] * nThreads; } cudaSetDevice(device); cudaEventCreate(&startEvents[device]); cudaEventCreate(&endEvents[device]); cudaStreamCreate(&streams[device]); #ifdef TEXTURE cudaEventRecord(startEvents[device]); kernel<<<blocksPrDevice[device], nThreads,0, streams[device]>>>(nodes_d[device], image_d[device], rays_d[device], colors_d[device], stack_d[device], 10, rc->pixel_width, start, MULTIGPU);//tree->max_depth + 2); cudaEventRecord(endEvents[device]); #else cudaEventRecord(startEvents[device]); printf("start %d blocks: %d\n", start, blocksPrDevice[device]); kernel<<<blocksPrDevice[device], nThreads,0, streams[device]>>>(points_d[device], nodes_d[device], image_d[device], rays_d[device], colors_d[device], stack_d[device], 10, rc->pixel_width, start, MULTIGPU);//tree->max_depth + 2); cudaEventRecord(endEvents[device]); #endif } float r[deviceCount]; for (int d = 0; d < deviceCount; ++d) { cudaEventSynchronize(endEvents[d]); float time; cudaEventElapsedTime(&time,startEvents[d], endEvents[d]); r[d] = time/workFractions[d]; printf("Time for device %d: %f\n", d, time); } float total = r[0] + r[1]; stored_work_fractions[0] = r[1]/total; stored_work_fractions[1] = r[0]/total; int previousStart = 0; for(int device = 0; device < deviceCount; device++){ cudaSetDevice(device); int images_index = log2((real_t)RESOLUTION) - 4; int offset = previousStart; previousStart = offset + blocksPrDevice[device]*nThreads; cudaMemcpy(&images[images_index][offset], image_d[device], sizeof(Display_color)*blocksPrDevice[device]*nThreads, cudaMemcpyDeviceToHost); } //freeAndReset(deviceCount); } extern "C" void freeAndReset(){ int deviceCount = 0; if(MULTIGPU == 1){ cudaGetDeviceCount(&deviceCount); } else{ deviceCount = 1; } for(int device = 0; device < deviceCount; device++){ cudaSetDevice(device); cudaFree(nodes_d[device]); cudaFree(points_d[device]); cudaFree(image_d[device]); cudaFree(rays_d[device]); cudaFree(colors_d[device]); cudaFree(stack_d[device]); cudaDeviceReset(); printError(cudaGetLastError(), "Free"); } } extern "C" void initDevice(Tree* tree, Raycreator* rc){ int deviceCount = 0; if(MULTIGPU == 1){ cudaGetDeviceCount(&deviceCount); } else{ deviceCount = 1; } copy_to_devices_invariant(deviceCount, tree, rc); } extern "C" long int get_memory_size(){ cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, 0); return devProp.totalGlobalMem; }
fa5b1e329ae28a6bcef53ebd30b5225a021be9dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from ztranspose_inplace.cu normal z -> s, Sat Nov 15 19:53:59 2014 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_s #define NB 16 //////////////////////////////////////////////////////////////////////////////// // grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd. // lower indicates blocks in lower triangle of grid, including diagonal. // lower blocks cover left side of matrix, including diagonal. // upper blocks swap block indices (x,y) and shift by grid width (or width-1) // to cover right side of matrix. // [ A00 A01 A02 ] [ A00 . . | . . ] // [ A10 A11 A12 ] [ A10 A11 . | . . ] // grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ] // [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ] // [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ] // // See stranspose_inplace_even for description of threads. __global__ void stranspose_inplace_odd( int n, float *matrix, int lda ) { __shared__ float sA[ NB ][ NB+1 ]; __shared__ float sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x >= blockIdx.y); int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1)); int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y )); ii *= NB; jj *= NB; float *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { float *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } //////////////////////////////////////////////////////////////////////////////// // grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even. // lower indicates blocks in strictly lower triangle of grid, excluding diagonal. // lower blocks shift up by one to cover left side of matrix including diagonal. // upper blocks swap block indices (x,y) and shift by grid width // to cover right side of matrix. // [ A00 A01 ] [ A10 . | . . ] // [ A10 A11 ] [ A20 A21 | . . ] // grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ] // [ A30 A31 ] [ A40 A41 | A01 A11 ] // [ A40 A41 ] // // Each block is NB x NB threads. // For non-diagonal block A, block B is symmetric block. // Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed, // syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j). // Threads outside the matrix do not touch memory. __global__ void stranspose_inplace_even( int n, float *matrix, int lda ) { __shared__ float sA[ NB ][ NB+1 ]; __shared__ float sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x > blockIdx.y); int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y)); int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y)); ii *= NB; jj *= NB; float *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { float *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } /** Purpose ------- stranspose_inplace_q transposes a square N-by-N matrix in-place. Same as stranspose_inplace, but adds queue argument. Arguments --------- @param[in] n INTEGER The number of rows & columns of the matrix dA. N >= 0. @param[in] dA REAL array, dimension (LDDA,N) The N-by-N matrix dA. On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_inplace_q( magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( ldda < n ) info = -3; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NB, NB ); int nblock = (n + NB - 1)/NB; // need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix. // block assignment differs depending on whether nblock is odd or even. if( nblock % 2 == 1 ) { dim3 grid( nblock, (nblock+1)/2 ); hipLaunchKernelGGL(( stranspose_inplace_odd), dim3(grid), dim3(threads), 0, queue , n, dA, ldda ); } else { dim3 grid( nblock+1, nblock/2 ); hipLaunchKernelGGL(( stranspose_inplace_even), dim3(grid), dim3(threads), 0, queue , n, dA, ldda ); } } /** @see magmablas_stranspose_inplace_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_inplace( magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda ) { magmablas_stranspose_inplace_q( n, dA, ldda, magma_stream ); }
fa5b1e329ae28a6bcef53ebd30b5225a021be9dc.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from ztranspose_inplace.cu normal z -> s, Sat Nov 15 19:53:59 2014 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_s #define NB 16 //////////////////////////////////////////////////////////////////////////////// // grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd. // lower indicates blocks in lower triangle of grid, including diagonal. // lower blocks cover left side of matrix, including diagonal. // upper blocks swap block indices (x,y) and shift by grid width (or width-1) // to cover right side of matrix. // [ A00 A01 A02 ] [ A00 . . | . . ] // [ A10 A11 A12 ] [ A10 A11 . | . . ] // grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ] // [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ] // [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ] // // See stranspose_inplace_even for description of threads. __global__ void stranspose_inplace_odd( int n, float *matrix, int lda ) { __shared__ float sA[ NB ][ NB+1 ]; __shared__ float sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x >= blockIdx.y); int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1)); int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y )); ii *= NB; jj *= NB; float *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { float *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } //////////////////////////////////////////////////////////////////////////////// // grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even. // lower indicates blocks in strictly lower triangle of grid, excluding diagonal. // lower blocks shift up by one to cover left side of matrix including diagonal. // upper blocks swap block indices (x,y) and shift by grid width // to cover right side of matrix. // [ A00 A01 ] [ A10 . | . . ] // [ A10 A11 ] [ A20 A21 | . . ] // grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ] // [ A30 A31 ] [ A40 A41 | A01 A11 ] // [ A40 A41 ] // // Each block is NB x NB threads. // For non-diagonal block A, block B is symmetric block. // Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed, // syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j). // Threads outside the matrix do not touch memory. __global__ void stranspose_inplace_even( int n, float *matrix, int lda ) { __shared__ float sA[ NB ][ NB+1 ]; __shared__ float sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x > blockIdx.y); int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y)); int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y)); ii *= NB; jj *= NB; float *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { float *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } /** Purpose ------- stranspose_inplace_q transposes a square N-by-N matrix in-place. Same as stranspose_inplace, but adds queue argument. Arguments --------- @param[in] n INTEGER The number of rows & columns of the matrix dA. N >= 0. @param[in] dA REAL array, dimension (LDDA,N) The N-by-N matrix dA. On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_inplace_q( magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( ldda < n ) info = -3; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NB, NB ); int nblock = (n + NB - 1)/NB; // need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix. // block assignment differs depending on whether nblock is odd or even. if( nblock % 2 == 1 ) { dim3 grid( nblock, (nblock+1)/2 ); stranspose_inplace_odd<<< grid, threads, 0, queue >>>( n, dA, ldda ); } else { dim3 grid( nblock+1, nblock/2 ); stranspose_inplace_even<<< grid, threads, 0, queue >>>( n, dA, ldda ); } } /** @see magmablas_stranspose_inplace_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_inplace( magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda ) { magmablas_stranspose_inplace_q( n, dA, ldda, magma_stream ); }
4311023868ba45b4da903412e5a4e1db8a1e16e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/max_pool_with_index.h" #include "caffe2/utils/conversions.h" namespace caffe2 { namespace { /*** * Note: CUDA kernels are minor changes from those at: * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/pooling_layer.cu * Originally licensed under BSD **/ template <typename Dtype> __global__ void MaxPoolForward( const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask) { CUDA_1D_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); float maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (convert::To<Dtype, float>(bottom_slice[h * width + w]) > maxval) { maxidx = h * width + w; maxval = convert::To<Dtype, float>(bottom_slice[maxidx]); } } } top_data[index] = convert::To<float, Dtype>(maxval); mask[index] = maxidx; } } template <typename Dtype> __global__ void MaxPoolBackward( const int nthreads, const Dtype* const top_diff, const int* const mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); float gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += convert::To<Dtype, float>(top_diff_slice[ph * pooled_width + pw]); } } } bottom_diff[index] = convert::To<float, Dtype>(gradient); } } }; template <typename T> bool MaxPoolWithIndexOp::DoRunWithType() { auto& X = Input(0); auto* Y = Output(0); auto* mask = Output(1); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1)); int output_size = Y->size(); mask->Resize(output_size); hipLaunchKernelGGL(( MaxPoolForward<T>), dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<T>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->mutable_data<T>(), mask->mutable_data<int>()); return true; } bool MaxPoolWithIndexOp::RunOnDevice() { auto& X = Input(0); CAFFE_ENFORCE(X.ndim() == 4, "Operator only supports 4D tensors"); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<float16>()) { return DoRunWithType<float16>(); } else { CAFFE_THROW("Unsupported input type"); } } template <typename T> bool MaxPoolWithIndexGradientOp::DoRunWithType() { auto& X = Input(0); auto& dY = Input(1); auto& mask = Input(2); auto* dX = Output(0); CAFFE_ENFORCE(X.ndim() == 4, "Operator only supports 4D tensors"); dX->ResizeLike(X); ConvPoolOpBase<CUDAContext>::ComputePads(vector<int>{X.dim32(2), X.dim32(3)}); hipLaunchKernelGGL(( MaxPoolBackward<T>), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<T>(), mask.data<int>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<T>()); return true; } bool MaxPoolWithIndexGradientOp::RunOnDevice() { auto& X = Input(0); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<float16>()) { return DoRunWithType<float16>(); } else { CAFFE_THROW("Unsupported input type"); } } namespace { REGISTER_CUDA_OPERATOR(MaxPoolWithIndex, MaxPoolWithIndexOp); REGISTER_CUDA_OPERATOR(MaxPoolWithIndexGradient, MaxPoolWithIndexGradientOp); class GetMaxPoolWithIndexGradient : public GradientMakerBase { using GradientMakerBase::GradientMakerBase; vector<OperatorDef> GetGradientDefs() override { return SingleGradientDef( "MaxPoolWithIndexGradient", "", vector<string>{I(0), GO(0), O(1)}, vector<string>{GI(0)}); } }; REGISTER_GRADIENT(MaxPoolWithIndex, GetMaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndex) .NumInputs(1) .NumOutputs(2) .TensorInferenceFunction(ConvPoolOpBase<CPUContext>::TensorInferenceForPool) .SetDoc(R"DOC( MaxPoolWithIndex consumes an input blob X and applies max pooling across the blob according to kernel sizes, stride sizes and pad lengths defined by the ConvPoolOpBase operator. It also produces an explicit mask that defines the location that all maximum values were found, which is re-used in the gradient pass. This op is deterministic. )DOC") .Input( 0, "X", "Input data tensor from the previous operator; dimensions " "depend on whether the NCHW or NHWC operators are being used. For " "example, in the former, the input has size (N x C x H x W), where N is" " the batch size, C is the number of channels, and H and W are the " "height and the width of the data. The corresponding permutation of " "dimensions is used in the latter case. ") .Output( 0, "Y", "Output data tensor from average pooling across the input " "tensor. Dimensions will vary based on various kernel, stride, and pad " "sizes.") .Output( 1, "Index", "Mask of location indices of the found maximum values, " " used in the gradient operator to accumulate dY values to the " "appropriate locations in Y"); }; }; // namespace caffe2
4311023868ba45b4da903412e5a4e1db8a1e16e2.cu
#include "caffe2/operators/max_pool_with_index.h" #include "caffe2/utils/conversions.h" namespace caffe2 { namespace { /*** * Note: CUDA kernels are minor changes from those at: * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/pooling_layer.cu * Originally licensed under BSD **/ template <typename Dtype> __global__ void MaxPoolForward( const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask) { CUDA_1D_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); float maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (convert::To<Dtype, float>(bottom_slice[h * width + w]) > maxval) { maxidx = h * width + w; maxval = convert::To<Dtype, float>(bottom_slice[maxidx]); } } } top_data[index] = convert::To<float, Dtype>(maxval); mask[index] = maxidx; } } template <typename Dtype> __global__ void MaxPoolBackward( const int nthreads, const Dtype* const top_diff, const int* const mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); float gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += convert::To<Dtype, float>(top_diff_slice[ph * pooled_width + pw]); } } } bottom_diff[index] = convert::To<float, Dtype>(gradient); } } }; template <typename T> bool MaxPoolWithIndexOp::DoRunWithType() { auto& X = Input(0); auto* Y = Output(0); auto* mask = Output(1); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1)); int output_size = Y->size(); mask->Resize(output_size); MaxPoolForward<T><<< CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<T>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->mutable_data<T>(), mask->mutable_data<int>()); return true; } bool MaxPoolWithIndexOp::RunOnDevice() { auto& X = Input(0); CAFFE_ENFORCE(X.ndim() == 4, "Operator only supports 4D tensors"); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<float16>()) { return DoRunWithType<float16>(); } else { CAFFE_THROW("Unsupported input type"); } } template <typename T> bool MaxPoolWithIndexGradientOp::DoRunWithType() { auto& X = Input(0); auto& dY = Input(1); auto& mask = Input(2); auto* dX = Output(0); CAFFE_ENFORCE(X.ndim() == 4, "Operator only supports 4D tensors"); dX->ResizeLike(X); ConvPoolOpBase<CUDAContext>::ComputePads(vector<int>{X.dim32(2), X.dim32(3)}); MaxPoolBackward<T><<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<T>(), mask.data<int>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<T>()); return true; } bool MaxPoolWithIndexGradientOp::RunOnDevice() { auto& X = Input(0); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<float16>()) { return DoRunWithType<float16>(); } else { CAFFE_THROW("Unsupported input type"); } } namespace { REGISTER_CUDA_OPERATOR(MaxPoolWithIndex, MaxPoolWithIndexOp); REGISTER_CUDA_OPERATOR(MaxPoolWithIndexGradient, MaxPoolWithIndexGradientOp); class GetMaxPoolWithIndexGradient : public GradientMakerBase { using GradientMakerBase::GradientMakerBase; vector<OperatorDef> GetGradientDefs() override { return SingleGradientDef( "MaxPoolWithIndexGradient", "", vector<string>{I(0), GO(0), O(1)}, vector<string>{GI(0)}); } }; REGISTER_GRADIENT(MaxPoolWithIndex, GetMaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndex) .NumInputs(1) .NumOutputs(2) .TensorInferenceFunction(ConvPoolOpBase<CPUContext>::TensorInferenceForPool) .SetDoc(R"DOC( MaxPoolWithIndex consumes an input blob X and applies max pooling across the blob according to kernel sizes, stride sizes and pad lengths defined by the ConvPoolOpBase operator. It also produces an explicit mask that defines the location that all maximum values were found, which is re-used in the gradient pass. This op is deterministic. )DOC") .Input( 0, "X", "Input data tensor from the previous operator; dimensions " "depend on whether the NCHW or NHWC operators are being used. For " "example, in the former, the input has size (N x C x H x W), where N is" " the batch size, C is the number of channels, and H and W are the " "height and the width of the data. The corresponding permutation of " "dimensions is used in the latter case. ") .Output( 0, "Y", "Output data tensor from average pooling across the input " "tensor. Dimensions will vary based on various kernel, stride, and pad " "sizes.") .Output( 1, "Index", "Mask of location indices of the found maximum values, " " used in the gradient operator to accumulate dY values to the " "appropriate locations in Y"); }; }; // namespace caffe2
2f3d2c163e8aa41253529cfc45689425830d0159.hip
// !!! This is a file automatically generated by hipify!!! /* * @Author: Xu.Wang * @Date: 2020-04-24 00:06:07 * @Last Modified by: Xu.Wang * @Last Modified time: 2020-04-24 01:14:48 */ #include <hip/hip_runtime.h> #include <cuda_double.h> #include<iostream> __global__ void kernel_double(int *in, int *out, const int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i=index;i<n;i+=stride){ out[i] = in[i]*2; } } void cudaDouble(int *hIn, int *hOut,const int n) { int *dIn; int *dOut; hipHostMalloc((void**)&dIn, n * sizeof(int)); hipHostMalloc((void**)&dOut, n * sizeof(int)); hipMemcpy(dIn, hIn, n * sizeof(int), hipMemcpyHostToDevice); int blockSize = 256; int numBlocks = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernel_double), dim3(numBlocks),dim3(blockSize), 0, 0, dIn, dOut, n); hipDeviceSynchronize(); hipMemcpy(hOut, dOut, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(dIn); hipFree(dOut); }
2f3d2c163e8aa41253529cfc45689425830d0159.cu
/* * @Author: Xu.Wang * @Date: 2020-04-24 00:06:07 * @Last Modified by: Xu.Wang * @Last Modified time: 2020-04-24 01:14:48 */ #include <cuda_runtime.h> #include <cuda_double.h> #include<iostream> __global__ void kernel_double(int *in, int *out, const int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i=index;i<n;i+=stride){ out[i] = in[i]*2; } } void cudaDouble(int *hIn, int *hOut,const int n) { int *dIn; int *dOut; cudaMallocHost((void**)&dIn, n * sizeof(int)); cudaMallocHost((void**)&dOut, n * sizeof(int)); cudaMemcpy(dIn, hIn, n * sizeof(int), cudaMemcpyHostToDevice); int blockSize = 256; int numBlocks = (n + blockSize - 1) / blockSize; kernel_double<<<numBlocks,blockSize>>>(dIn, dOut, n); cudaDeviceSynchronize(); cudaMemcpy(hOut, dOut, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dIn); cudaFree(dOut); }
29ce69c924fa0a13db43fcf4ad8364252b3c28e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CCD_CUDA.h" __device__ float RankOneUpdate_dev(const unsigned* Rcol_ptr, const unsigned* Rrow_idx, const float* Rval, const unsigned j, const float* u_vec_t, const float lambda ) { float g = 0, h = lambda; if (Rcol_ptr[j + 1] == Rcol_ptr[j]) { return 0; } for (unsigned idx = Rcol_ptr[j]; idx < Rcol_ptr[j + 1]; ++idx) { unsigned i = Rrow_idx[idx]; g += u_vec_t[i] * Rval[idx]; h += u_vec_t[i] * u_vec_t[i]; } float newvj = g / h; return newvj; } __global__ void RankOneUpdate_v_kernel(const unsigned Rcols, const unsigned* Rcol_ptr, const unsigned* Rrow_idx, const float* Rval, float* u, float* v, const float lambda ) { long thread_id = threadIdx.x + blockIdx.x * blockDim.x; long total_threads = blockDim.x * gridDim.x; for (long c = thread_id; c < Rcols; c += total_threads) { v[c] = RankOneUpdate_dev(Rcol_ptr, Rrow_idx, Rval, c, u, lambda * (Rcol_ptr[c + 1] - Rcol_ptr[c])); } } __global__ void RankOneUpdate_u_kernel(const unsigned Rcols_t, const unsigned* Rcol_ptr_t, const unsigned* Rrow_idx_t, const float* Rval_t, float* u, float* v, const float lambda ) { long thread_id = threadIdx.x + blockIdx.x * blockDim.x; long total_threads = blockDim.x * gridDim.x; for (long c = thread_id; c < Rcols_t; c += total_threads) { u[c] = RankOneUpdate_dev(Rcol_ptr_t, Rrow_idx_t, Rval_t, c, v, lambda * (Rcol_ptr_t[c + 1] - Rcol_ptr_t[c])); } } __global__ void UpdateRating_DUAL_kernel_NoLoss(const unsigned Rcols, const unsigned* Rcol_ptr, const unsigned* Rrow_idx, float* Rval, const float* Wt_vec_t, const float* Ht_vec_t, const bool add, const unsigned Rcols_t, const unsigned* Rcol_ptr_t, const unsigned* Rrow_idx_t, float* Rval_t, const bool add_t ) { long thread_id = threadIdx.x + blockIdx.x * blockDim.x; long total_threads = blockDim.x * gridDim.x; for (long i = thread_id; i < Rcols; i += total_threads) { if (add) { float Htc = Ht_vec_t[i]; for (unsigned idx = Rcol_ptr[i]; idx < Rcol_ptr[i + 1]; ++idx) { Rval[idx] += Wt_vec_t[Rrow_idx[idx]] * Htc; //change R.val } } else { float Htc = Ht_vec_t[i]; for (unsigned idx = Rcol_ptr[i]; idx < Rcol_ptr[i + 1]; ++idx) { Rval[idx] -= Wt_vec_t[Rrow_idx[idx]] * Htc; //change R.val } } } for (long i = thread_id; i < Rcols_t; i += total_threads) { if (add_t) { float Htc = Wt_vec_t[i]; for (unsigned idx = Rcol_ptr_t[i]; idx < Rcol_ptr_t[i + 1]; ++idx) { Rval_t[idx] += Ht_vec_t[Rrow_idx_t[idx]] * Htc; //change R.val } } else { float Htc = Wt_vec_t[i]; for (unsigned idx = Rcol_ptr_t[i]; idx < Rcol_ptr_t[i + 1]; ++idx) { Rval_t[idx] -= Ht_vec_t[Rrow_idx_t[idx]] * Htc; //change R.val } } } } __global__ void UpdateRating_W_kernel(const unsigned Rcols, const unsigned* Rcol_ptr, const unsigned* Rrow_idx, float* Rval, const float* Wt_vec_t, const float* Ht_vec_t, const bool add ) { long thread_id = threadIdx.x + blockIdx.x * blockDim.x; long total_threads = blockDim.x * gridDim.x; for (long i = thread_id; i < Rcols; i += total_threads) { if (add) { float Htc = Ht_vec_t[i]; for (unsigned idx = Rcol_ptr[i]; idx < Rcol_ptr[i + 1]; ++idx) { Rval[idx] += Wt_vec_t[Rrow_idx[idx]] * Htc; //change R.val } } else { float Htc = Ht_vec_t[i]; for (unsigned idx = Rcol_ptr[i]; idx < Rcol_ptr[i + 1]; ++idx) { Rval[idx] -= Wt_vec_t[Rrow_idx[idx]] * Htc; //change R.val } } } } __global__ void UpdateRating_H_kernel(const unsigned Rcols_t, const unsigned* Rcol_ptr_t, const unsigned* Rrow_idx_t, float* Rval_t, const float* Wt_vec_t, const float* Ht_vec_t, const bool add_t ) { long thread_id = threadIdx.x + blockIdx.x * blockDim.x; long total_threads = blockDim.x * gridDim.x; for (long i = thread_id; i < Rcols_t; i += total_threads) { if (add_t) { float Htc = Wt_vec_t[i]; for (unsigned idx = Rcol_ptr_t[i]; idx < Rcol_ptr_t[i + 1]; ++idx) { Rval_t[idx] += Ht_vec_t[Rrow_idx_t[idx]] * Htc; //change R.val } } else { float Htc = Wt_vec_t[i]; for (unsigned idx = Rcol_ptr_t[i]; idx < Rcol_ptr_t[i + 1]; ++idx) { Rval_t[idx] -= Ht_vec_t[Rrow_idx_t[idx]] * Htc; //change R.val } } } } void kernel_wrapper_ccdpp_NV(SparseMatrix& R, TestData& T, MatData& W, MatData& H, parameter& parameters) { hipError_t cudaStatus; // Reset GPU. cudaStatus = hipDeviceReset(); gpuErrchk(cudaStatus); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); gpuErrchk(cudaStatus); cudaStatus = ccdpp_NV(R, T, W, H, parameters); if (cudaStatus != hipSuccess) { fprintf(stderr, "CCD FAILED: %s\n", hipGetErrorString(cudaStatus)); } cudaStatus = hipDeviceReset(); gpuErrchk(cudaStatus); } inline hipError_t updateRating(unsigned int nThreadsPerBlock, unsigned int nBlocks, const SparseMatrix& R_C, const SparseMatrix& Rt, const unsigned* dev_Rcol_ptr, const unsigned* dev_Rrow_idx, const unsigned* dev_Rcol_ptr_T, const unsigned* dev_Rrow_idx_T, float* dev_Rval, float* dev_Rval_t, const float* dev_Wt_vec_t, const float* dev_Ht_vec_t, const bool add, hipError_t& cudaStatus) { hipLaunchKernelGGL(( UpdateRating_DUAL_kernel_NoLoss), dim3(nBlocks), dim3(nThreadsPerBlock), 0, 0, R_C.cols, dev_Rcol_ptr, dev_Rrow_idx, dev_Rval, dev_Wt_vec_t, dev_Ht_vec_t, add, Rt.cols, dev_Rcol_ptr_T, dev_Rrow_idx_T, dev_Rval_t, add); // UpdateRating_W_kernel<<<nBlocks, nThreadsPerBlock>>>(R_C.cols, dev_Rcol_ptr, dev_Rrow_idx, // dev_Rval, dev_Wt_vec_t, dev_Ht_vec_t, add); // UpdateRating_H_kernel<<<nBlocks, nThreadsPerBlock>>>(Rt.cols, dev_Rcol_ptr_T, dev_Rrow_idx_T, // dev_Rval_t, dev_Wt_vec_t, dev_Ht_vec_t, add); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); gpuErrchk(cudaStatus); cudaStatus = hipDeviceSynchronize(); gpuErrchk(cudaStatus); return cudaStatus; } inline hipError_t RankOneUpdate(unsigned int nThreadsPerBlock, unsigned int nBlocks, float lambda, const SparseMatrix& R_C, const SparseMatrix& Rt, const unsigned* dev_Rcol_ptr, const unsigned* dev_Rrow_idx, const unsigned* dev_Rcol_ptr_T, const unsigned* dev_Rrow_idx_T, const float* dev_Rval, const float* dev_Rval_t, float* dev_Wt_vec_t,float* dev_Ht_vec_t, hipError_t& cudaStatus) { hipLaunchKernelGGL(( RankOneUpdate_v_kernel), dim3(nBlocks), dim3(nThreadsPerBlock), 0, 0, R_C.cols, dev_Rcol_ptr, dev_Rrow_idx, dev_Rval, dev_Wt_vec_t, dev_Ht_vec_t, lambda); hipLaunchKernelGGL(( RankOneUpdate_u_kernel), dim3(nBlocks), dim3(nThreadsPerBlock), 0, 0, Rt.cols, dev_Rcol_ptr_T, dev_Rrow_idx_T, dev_Rval_t, dev_Wt_vec_t, dev_Ht_vec_t, lambda); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); gpuErrchk(cudaStatus); cudaStatus = hipDeviceSynchronize(); gpuErrchk(cudaStatus); return cudaStatus; } hipError_t ccdpp_NV(SparseMatrix& R_C, TestData& T, MatData& W, MatData& H, parameter& parameters) { unsigned nThreadsPerBlock = parameters.nThreadsPerBlock; unsigned nBlocks = parameters.nBlocks; hipError_t cudaStatus; int k = parameters.k; float lambda = parameters.lambda; // Create transpose view of R SparseMatrix Rt; Rt = R_C.get_shallow_transpose(); unsigned* dev_Rcol_ptr = nullptr; unsigned* dev_Rrow_idx = nullptr; unsigned* dev_Rcol_ptr_T = nullptr; unsigned* dev_Rrow_idx_T = nullptr; float* dev_Rval = nullptr; float* dev_Rval_t = nullptr; float* dev_Wt_vec_t = nullptr; //u float* dev_Ht_vec_t = nullptr; //v float* dev_W_ = nullptr; float* dev_H_ = nullptr; size_t nbits_W_ = R_C.rows * k * sizeof(float); float* W_ = (float*) malloc(nbits_W_); size_t nbits_H_ = R_C.cols * k * sizeof(float); float* H_ = (float*) malloc(nbits_H_); int indexPosition = 0; for (long i = 0; i < k; ++i) { for (int j = 0; j < R_C.rows; ++j) { W_[indexPosition] = W[i][j]; ++indexPosition; } } // indexPosition = 0; // for (long i = 0; i < k; ++i) { // for (int j = 0; j < R_C.cols; ++j) { // H_[indexPosition] = H[i][j]; // ++indexPosition; // } // } size_t nbits_col_ptr = (R_C.cols + 1) * sizeof(unsigned); size_t nbits_row_ptr = (R_C.rows + 1) * sizeof(unsigned); size_t nbits_idx = R_C.nnz * sizeof(unsigned); size_t nbits_val = R_C.nnz * sizeof(DTYPE); cudaStatus = hipMalloc((void**) &dev_W_, nbits_W_); gpuErrchk(cudaStatus); cudaStatus = hipMalloc((void**) &dev_H_, nbits_H_); gpuErrchk(cudaStatus); cudaStatus = hipMemcpy(dev_W_, W_, nbits_W_, hipMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = hipMemset(dev_H_, 0, nbits_H_); // cudaStatus = hipMemcpy(dev_H_, H_, nbits_H_, hipMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = hipMalloc((void**) &dev_Rcol_ptr, nbits_col_ptr); gpuErrchk(cudaStatus); cudaStatus = hipMalloc((void**) &dev_Rrow_idx, nbits_idx); gpuErrchk(cudaStatus); cudaStatus = hipMalloc((void**) &dev_Rcol_ptr_T, nbits_row_ptr); gpuErrchk(cudaStatus); cudaStatus = hipMalloc((void**) &dev_Rrow_idx_T, nbits_idx); gpuErrchk(cudaStatus); cudaStatus = hipMalloc((void**) &dev_Rval, nbits_val); gpuErrchk(cudaStatus); cudaStatus = hipMalloc((void**) &dev_Rval_t, nbits_val); gpuErrchk(cudaStatus); cudaStatus = hipMemcpy(dev_Rcol_ptr, R_C.get_csc_col_ptr(), nbits_col_ptr, hipMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = hipMemcpy(dev_Rrow_idx, R_C.get_csc_row_indx(), nbits_idx, hipMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = hipMemcpy(dev_Rcol_ptr_T, Rt.get_csc_col_ptr(), nbits_row_ptr, hipMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = hipMemcpy(dev_Rrow_idx_T, Rt.get_csc_row_indx(), nbits_idx, hipMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = hipMemcpy(dev_Rval, R_C.get_csc_val(), nbits_val, hipMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = hipMemcpy(dev_Rval_t, Rt.get_csc_val(), nbits_val, hipMemcpyHostToDevice); gpuErrchk(cudaStatus); float* rmse = (float*) malloc((T.nnz + 1) * sizeof(float)); unsigned* d_test_row; unsigned* d_test_col; float* d_test_val; float* d_pred_v; float* d_rmse; gpuErrchk(hipMalloc((void**) &d_test_row, (T.nnz + 1) * sizeof(unsigned))); gpuErrchk(hipMalloc((void**) &d_test_col, (T.nnz + 1) * sizeof(unsigned))); gpuErrchk(hipMalloc((void**) &d_test_val, (T.nnz + 1) * sizeof(float))); gpuErrchk(hipMalloc((void**) &d_pred_v, (T.nnz + 1) * sizeof(float))); gpuErrchk(hipMalloc((void**) &d_rmse, (T.nnz + 1) * sizeof(float))); gpuErrchk(hipMemcpy(d_test_row, T.getTestRow(), (T.nnz + 1) * sizeof(unsigned), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_test_col, T.getTestCol(), (T.nnz + 1) * sizeof(unsigned), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_test_val, T.getTestVal(), (T.nnz + 1) * sizeof(float), hipMemcpyHostToDevice)); float update_time_acc = 0; float rank_time_acc = 0; for (int oiter = 1; oiter <= parameters.maxiter; ++oiter) { float update_time = 0; float rank_time = 0; GpuTimer update_timer; GpuTimer rmse_timer; GpuTimer rank_timer; for (int t = 0; t < k; ++t) { dev_Wt_vec_t = dev_W_ + t * R_C.rows; //u dev_Ht_vec_t = dev_H_ + t * R_C.cols; //v if (oiter > 1) { update_timer.Start(); cudaStatus = updateRating(nThreadsPerBlock, nBlocks, R_C, Rt, dev_Rcol_ptr, dev_Rrow_idx, dev_Rcol_ptr_T, dev_Rrow_idx_T, dev_Rval, dev_Rval_t, dev_Wt_vec_t, dev_Ht_vec_t, true, cudaStatus); update_timer.Stop(); update_time += update_timer.Elapsed(); } rank_timer.Start(); for (int iter = 1; iter <= parameters.maxinneriter; ++iter) { cudaStatus = RankOneUpdate(nThreadsPerBlock, nBlocks, lambda, R_C, Rt, dev_Rcol_ptr, dev_Rrow_idx, dev_Rcol_ptr_T, dev_Rrow_idx_T, dev_Rval, dev_Rval_t, dev_Wt_vec_t, dev_Ht_vec_t, cudaStatus); } rank_timer.Stop(); rank_time += rank_timer.Elapsed(); update_timer.Start(); cudaStatus = updateRating(nThreadsPerBlock, nBlocks, R_C, Rt, dev_Rcol_ptr, dev_Rrow_idx, dev_Rcol_ptr_T, dev_Rrow_idx_T, dev_Rval, dev_Rval_t, dev_Wt_vec_t, dev_Ht_vec_t, false, cudaStatus); update_timer.Stop(); update_time += update_timer.Elapsed(); } update_time_acc += update_time; rank_time_acc += rank_time; /*********************Check RMSE*********************/ rmse_timer.Start(); gpuErrchk(hipMemset(d_rmse, 0, (T.nnz + 1) * sizeof(float))); gpuErrchk(hipMemset(d_pred_v, 0, (T.nnz + 1) * sizeof(float))); hipLaunchKernelGGL(( GPU_rmse), dim3((T.nnz + 1023) / 1024), dim3(1024), 0, 0, d_test_row, d_test_col, d_test_val, d_pred_v, d_rmse, dev_W_, dev_H_, T.nnz, k, R_C.rows, R_C.cols, false); cudaStatus = hipGetLastError(); gpuErrchk(cudaStatus); cudaStatus = hipDeviceSynchronize(); gpuErrchk(cudaStatus); double tot_rmse = 0; double f_rmse = 0; gpuErrchk(hipMemcpy(rmse, d_rmse, (T.nnz + 1) * sizeof(float), hipMemcpyDeviceToHost)); for (long i = 0; i < T.nnz; ++i) { tot_rmse += rmse[i]; } f_rmse = sqrtf(tot_rmse / T.nnz); rmse_timer.Stop(); float rmse_time = rmse_timer.Elapsed(); printf("[-INFO-] iteration num %d \trank_time %.4lf|%.4lf s \tupdate_time %.4lf|%.4lfs \tRMSE=%lf time:%fs\n", oiter, rank_time, rank_time_acc, update_time, update_time_acc, f_rmse, rmse_time); } cudaStatus = hipMemcpy(H_, dev_H_, nbits_H_, hipMemcpyDeviceToHost); gpuErrchk(cudaStatus); cudaStatus = hipMemcpy(W_, dev_W_, nbits_W_, hipMemcpyDeviceToHost); gpuErrchk(cudaStatus); indexPosition = 0; for (long i = 0; i < k; ++i) { for (int j = 0; j < R_C.rows; ++j) { W[i][j] = W_[indexPosition]; ++indexPosition; } } indexPosition = 0; for (long i = 0; i < k; ++i) { for (int j = 0; j < R_C.cols; ++j) { H[i][j] = H_[indexPosition]; ++indexPosition; } } free(W_); free(H_); free(rmse); gpuErrchk(hipFree(dev_W_)); gpuErrchk(hipFree(dev_H_)); gpuErrchk(hipFree(dev_Rcol_ptr)); gpuErrchk(hipFree(dev_Rrow_idx)); gpuErrchk(hipFree(dev_Rcol_ptr_T)); gpuErrchk(hipFree(dev_Rrow_idx_T)); gpuErrchk(hipFree(dev_Rval)); gpuErrchk(hipFree(dev_Rval_t)); gpuErrchk(hipFree(d_test_row)); gpuErrchk(hipFree(d_test_col)); gpuErrchk(hipFree(d_test_val)); gpuErrchk(hipFree(d_pred_v)); gpuErrchk(hipFree(d_rmse)); return cudaStatus; }
29ce69c924fa0a13db43fcf4ad8364252b3c28e0.cu
#include "CCD_CUDA.h" __device__ float RankOneUpdate_dev(const unsigned* Rcol_ptr, const unsigned* Rrow_idx, const float* Rval, const unsigned j, const float* u_vec_t, const float lambda ) { float g = 0, h = lambda; if (Rcol_ptr[j + 1] == Rcol_ptr[j]) { return 0; } for (unsigned idx = Rcol_ptr[j]; idx < Rcol_ptr[j + 1]; ++idx) { unsigned i = Rrow_idx[idx]; g += u_vec_t[i] * Rval[idx]; h += u_vec_t[i] * u_vec_t[i]; } float newvj = g / h; return newvj; } __global__ void RankOneUpdate_v_kernel(const unsigned Rcols, const unsigned* Rcol_ptr, const unsigned* Rrow_idx, const float* Rval, float* u, float* v, const float lambda ) { long thread_id = threadIdx.x + blockIdx.x * blockDim.x; long total_threads = blockDim.x * gridDim.x; for (long c = thread_id; c < Rcols; c += total_threads) { v[c] = RankOneUpdate_dev(Rcol_ptr, Rrow_idx, Rval, c, u, lambda * (Rcol_ptr[c + 1] - Rcol_ptr[c])); } } __global__ void RankOneUpdate_u_kernel(const unsigned Rcols_t, const unsigned* Rcol_ptr_t, const unsigned* Rrow_idx_t, const float* Rval_t, float* u, float* v, const float lambda ) { long thread_id = threadIdx.x + blockIdx.x * blockDim.x; long total_threads = blockDim.x * gridDim.x; for (long c = thread_id; c < Rcols_t; c += total_threads) { u[c] = RankOneUpdate_dev(Rcol_ptr_t, Rrow_idx_t, Rval_t, c, v, lambda * (Rcol_ptr_t[c + 1] - Rcol_ptr_t[c])); } } __global__ void UpdateRating_DUAL_kernel_NoLoss(const unsigned Rcols, const unsigned* Rcol_ptr, const unsigned* Rrow_idx, float* Rval, const float* Wt_vec_t, const float* Ht_vec_t, const bool add, const unsigned Rcols_t, const unsigned* Rcol_ptr_t, const unsigned* Rrow_idx_t, float* Rval_t, const bool add_t ) { long thread_id = threadIdx.x + blockIdx.x * blockDim.x; long total_threads = blockDim.x * gridDim.x; for (long i = thread_id; i < Rcols; i += total_threads) { if (add) { float Htc = Ht_vec_t[i]; for (unsigned idx = Rcol_ptr[i]; idx < Rcol_ptr[i + 1]; ++idx) { Rval[idx] += Wt_vec_t[Rrow_idx[idx]] * Htc; //change R.val } } else { float Htc = Ht_vec_t[i]; for (unsigned idx = Rcol_ptr[i]; idx < Rcol_ptr[i + 1]; ++idx) { Rval[idx] -= Wt_vec_t[Rrow_idx[idx]] * Htc; //change R.val } } } for (long i = thread_id; i < Rcols_t; i += total_threads) { if (add_t) { float Htc = Wt_vec_t[i]; for (unsigned idx = Rcol_ptr_t[i]; idx < Rcol_ptr_t[i + 1]; ++idx) { Rval_t[idx] += Ht_vec_t[Rrow_idx_t[idx]] * Htc; //change R.val } } else { float Htc = Wt_vec_t[i]; for (unsigned idx = Rcol_ptr_t[i]; idx < Rcol_ptr_t[i + 1]; ++idx) { Rval_t[idx] -= Ht_vec_t[Rrow_idx_t[idx]] * Htc; //change R.val } } } } __global__ void UpdateRating_W_kernel(const unsigned Rcols, const unsigned* Rcol_ptr, const unsigned* Rrow_idx, float* Rval, const float* Wt_vec_t, const float* Ht_vec_t, const bool add ) { long thread_id = threadIdx.x + blockIdx.x * blockDim.x; long total_threads = blockDim.x * gridDim.x; for (long i = thread_id; i < Rcols; i += total_threads) { if (add) { float Htc = Ht_vec_t[i]; for (unsigned idx = Rcol_ptr[i]; idx < Rcol_ptr[i + 1]; ++idx) { Rval[idx] += Wt_vec_t[Rrow_idx[idx]] * Htc; //change R.val } } else { float Htc = Ht_vec_t[i]; for (unsigned idx = Rcol_ptr[i]; idx < Rcol_ptr[i + 1]; ++idx) { Rval[idx] -= Wt_vec_t[Rrow_idx[idx]] * Htc; //change R.val } } } } __global__ void UpdateRating_H_kernel(const unsigned Rcols_t, const unsigned* Rcol_ptr_t, const unsigned* Rrow_idx_t, float* Rval_t, const float* Wt_vec_t, const float* Ht_vec_t, const bool add_t ) { long thread_id = threadIdx.x + blockIdx.x * blockDim.x; long total_threads = blockDim.x * gridDim.x; for (long i = thread_id; i < Rcols_t; i += total_threads) { if (add_t) { float Htc = Wt_vec_t[i]; for (unsigned idx = Rcol_ptr_t[i]; idx < Rcol_ptr_t[i + 1]; ++idx) { Rval_t[idx] += Ht_vec_t[Rrow_idx_t[idx]] * Htc; //change R.val } } else { float Htc = Wt_vec_t[i]; for (unsigned idx = Rcol_ptr_t[i]; idx < Rcol_ptr_t[i + 1]; ++idx) { Rval_t[idx] -= Ht_vec_t[Rrow_idx_t[idx]] * Htc; //change R.val } } } } void kernel_wrapper_ccdpp_NV(SparseMatrix& R, TestData& T, MatData& W, MatData& H, parameter& parameters) { cudaError_t cudaStatus; // Reset GPU. cudaStatus = cudaDeviceReset(); gpuErrchk(cudaStatus); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); gpuErrchk(cudaStatus); cudaStatus = ccdpp_NV(R, T, W, H, parameters); if (cudaStatus != cudaSuccess) { fprintf(stderr, "CCD FAILED: %s\n", cudaGetErrorString(cudaStatus)); } cudaStatus = cudaDeviceReset(); gpuErrchk(cudaStatus); } inline cudaError_t updateRating(unsigned int nThreadsPerBlock, unsigned int nBlocks, const SparseMatrix& R_C, const SparseMatrix& Rt, const unsigned* dev_Rcol_ptr, const unsigned* dev_Rrow_idx, const unsigned* dev_Rcol_ptr_T, const unsigned* dev_Rrow_idx_T, float* dev_Rval, float* dev_Rval_t, const float* dev_Wt_vec_t, const float* dev_Ht_vec_t, const bool add, cudaError_t& cudaStatus) { UpdateRating_DUAL_kernel_NoLoss<<<nBlocks, nThreadsPerBlock>>>(R_C.cols, dev_Rcol_ptr, dev_Rrow_idx, dev_Rval, dev_Wt_vec_t, dev_Ht_vec_t, add, Rt.cols, dev_Rcol_ptr_T, dev_Rrow_idx_T, dev_Rval_t, add); // UpdateRating_W_kernel<<<nBlocks, nThreadsPerBlock>>>(R_C.cols, dev_Rcol_ptr, dev_Rrow_idx, // dev_Rval, dev_Wt_vec_t, dev_Ht_vec_t, add); // UpdateRating_H_kernel<<<nBlocks, nThreadsPerBlock>>>(Rt.cols, dev_Rcol_ptr_T, dev_Rrow_idx_T, // dev_Rval_t, dev_Wt_vec_t, dev_Ht_vec_t, add); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); gpuErrchk(cudaStatus); cudaStatus = cudaDeviceSynchronize(); gpuErrchk(cudaStatus); return cudaStatus; } inline cudaError_t RankOneUpdate(unsigned int nThreadsPerBlock, unsigned int nBlocks, float lambda, const SparseMatrix& R_C, const SparseMatrix& Rt, const unsigned* dev_Rcol_ptr, const unsigned* dev_Rrow_idx, const unsigned* dev_Rcol_ptr_T, const unsigned* dev_Rrow_idx_T, const float* dev_Rval, const float* dev_Rval_t, float* dev_Wt_vec_t,float* dev_Ht_vec_t, cudaError_t& cudaStatus) { RankOneUpdate_v_kernel<<<nBlocks, nThreadsPerBlock>>>(R_C.cols, dev_Rcol_ptr, dev_Rrow_idx, dev_Rval, dev_Wt_vec_t, dev_Ht_vec_t, lambda); RankOneUpdate_u_kernel<<<nBlocks, nThreadsPerBlock>>>(Rt.cols, dev_Rcol_ptr_T, dev_Rrow_idx_T, dev_Rval_t, dev_Wt_vec_t, dev_Ht_vec_t, lambda); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); gpuErrchk(cudaStatus); cudaStatus = cudaDeviceSynchronize(); gpuErrchk(cudaStatus); return cudaStatus; } cudaError_t ccdpp_NV(SparseMatrix& R_C, TestData& T, MatData& W, MatData& H, parameter& parameters) { unsigned nThreadsPerBlock = parameters.nThreadsPerBlock; unsigned nBlocks = parameters.nBlocks; cudaError_t cudaStatus; int k = parameters.k; float lambda = parameters.lambda; // Create transpose view of R SparseMatrix Rt; Rt = R_C.get_shallow_transpose(); unsigned* dev_Rcol_ptr = nullptr; unsigned* dev_Rrow_idx = nullptr; unsigned* dev_Rcol_ptr_T = nullptr; unsigned* dev_Rrow_idx_T = nullptr; float* dev_Rval = nullptr; float* dev_Rval_t = nullptr; float* dev_Wt_vec_t = nullptr; //u float* dev_Ht_vec_t = nullptr; //v float* dev_W_ = nullptr; float* dev_H_ = nullptr; size_t nbits_W_ = R_C.rows * k * sizeof(float); float* W_ = (float*) malloc(nbits_W_); size_t nbits_H_ = R_C.cols * k * sizeof(float); float* H_ = (float*) malloc(nbits_H_); int indexPosition = 0; for (long i = 0; i < k; ++i) { for (int j = 0; j < R_C.rows; ++j) { W_[indexPosition] = W[i][j]; ++indexPosition; } } // indexPosition = 0; // for (long i = 0; i < k; ++i) { // for (int j = 0; j < R_C.cols; ++j) { // H_[indexPosition] = H[i][j]; // ++indexPosition; // } // } size_t nbits_col_ptr = (R_C.cols + 1) * sizeof(unsigned); size_t nbits_row_ptr = (R_C.rows + 1) * sizeof(unsigned); size_t nbits_idx = R_C.nnz * sizeof(unsigned); size_t nbits_val = R_C.nnz * sizeof(DTYPE); cudaStatus = cudaMalloc((void**) &dev_W_, nbits_W_); gpuErrchk(cudaStatus); cudaStatus = cudaMalloc((void**) &dev_H_, nbits_H_); gpuErrchk(cudaStatus); cudaStatus = cudaMemcpy(dev_W_, W_, nbits_W_, cudaMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = cudaMemset(dev_H_, 0, nbits_H_); // cudaStatus = cudaMemcpy(dev_H_, H_, nbits_H_, cudaMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = cudaMalloc((void**) &dev_Rcol_ptr, nbits_col_ptr); gpuErrchk(cudaStatus); cudaStatus = cudaMalloc((void**) &dev_Rrow_idx, nbits_idx); gpuErrchk(cudaStatus); cudaStatus = cudaMalloc((void**) &dev_Rcol_ptr_T, nbits_row_ptr); gpuErrchk(cudaStatus); cudaStatus = cudaMalloc((void**) &dev_Rrow_idx_T, nbits_idx); gpuErrchk(cudaStatus); cudaStatus = cudaMalloc((void**) &dev_Rval, nbits_val); gpuErrchk(cudaStatus); cudaStatus = cudaMalloc((void**) &dev_Rval_t, nbits_val); gpuErrchk(cudaStatus); cudaStatus = cudaMemcpy(dev_Rcol_ptr, R_C.get_csc_col_ptr(), nbits_col_ptr, cudaMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = cudaMemcpy(dev_Rrow_idx, R_C.get_csc_row_indx(), nbits_idx, cudaMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = cudaMemcpy(dev_Rcol_ptr_T, Rt.get_csc_col_ptr(), nbits_row_ptr, cudaMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = cudaMemcpy(dev_Rrow_idx_T, Rt.get_csc_row_indx(), nbits_idx, cudaMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = cudaMemcpy(dev_Rval, R_C.get_csc_val(), nbits_val, cudaMemcpyHostToDevice); gpuErrchk(cudaStatus); cudaStatus = cudaMemcpy(dev_Rval_t, Rt.get_csc_val(), nbits_val, cudaMemcpyHostToDevice); gpuErrchk(cudaStatus); float* rmse = (float*) malloc((T.nnz + 1) * sizeof(float)); unsigned* d_test_row; unsigned* d_test_col; float* d_test_val; float* d_pred_v; float* d_rmse; gpuErrchk(cudaMalloc((void**) &d_test_row, (T.nnz + 1) * sizeof(unsigned))); gpuErrchk(cudaMalloc((void**) &d_test_col, (T.nnz + 1) * sizeof(unsigned))); gpuErrchk(cudaMalloc((void**) &d_test_val, (T.nnz + 1) * sizeof(float))); gpuErrchk(cudaMalloc((void**) &d_pred_v, (T.nnz + 1) * sizeof(float))); gpuErrchk(cudaMalloc((void**) &d_rmse, (T.nnz + 1) * sizeof(float))); gpuErrchk(cudaMemcpy(d_test_row, T.getTestRow(), (T.nnz + 1) * sizeof(unsigned), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_test_col, T.getTestCol(), (T.nnz + 1) * sizeof(unsigned), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_test_val, T.getTestVal(), (T.nnz + 1) * sizeof(float), cudaMemcpyHostToDevice)); float update_time_acc = 0; float rank_time_acc = 0; for (int oiter = 1; oiter <= parameters.maxiter; ++oiter) { float update_time = 0; float rank_time = 0; GpuTimer update_timer; GpuTimer rmse_timer; GpuTimer rank_timer; for (int t = 0; t < k; ++t) { dev_Wt_vec_t = dev_W_ + t * R_C.rows; //u dev_Ht_vec_t = dev_H_ + t * R_C.cols; //v if (oiter > 1) { update_timer.Start(); cudaStatus = updateRating(nThreadsPerBlock, nBlocks, R_C, Rt, dev_Rcol_ptr, dev_Rrow_idx, dev_Rcol_ptr_T, dev_Rrow_idx_T, dev_Rval, dev_Rval_t, dev_Wt_vec_t, dev_Ht_vec_t, true, cudaStatus); update_timer.Stop(); update_time += update_timer.Elapsed(); } rank_timer.Start(); for (int iter = 1; iter <= parameters.maxinneriter; ++iter) { cudaStatus = RankOneUpdate(nThreadsPerBlock, nBlocks, lambda, R_C, Rt, dev_Rcol_ptr, dev_Rrow_idx, dev_Rcol_ptr_T, dev_Rrow_idx_T, dev_Rval, dev_Rval_t, dev_Wt_vec_t, dev_Ht_vec_t, cudaStatus); } rank_timer.Stop(); rank_time += rank_timer.Elapsed(); update_timer.Start(); cudaStatus = updateRating(nThreadsPerBlock, nBlocks, R_C, Rt, dev_Rcol_ptr, dev_Rrow_idx, dev_Rcol_ptr_T, dev_Rrow_idx_T, dev_Rval, dev_Rval_t, dev_Wt_vec_t, dev_Ht_vec_t, false, cudaStatus); update_timer.Stop(); update_time += update_timer.Elapsed(); } update_time_acc += update_time; rank_time_acc += rank_time; /*********************Check RMSE*********************/ rmse_timer.Start(); gpuErrchk(cudaMemset(d_rmse, 0, (T.nnz + 1) * sizeof(float))); gpuErrchk(cudaMemset(d_pred_v, 0, (T.nnz + 1) * sizeof(float))); GPU_rmse<<<(T.nnz + 1023) / 1024, 1024>>>(d_test_row, d_test_col, d_test_val, d_pred_v, d_rmse, dev_W_, dev_H_, T.nnz, k, R_C.rows, R_C.cols, false); cudaStatus = cudaGetLastError(); gpuErrchk(cudaStatus); cudaStatus = cudaDeviceSynchronize(); gpuErrchk(cudaStatus); double tot_rmse = 0; double f_rmse = 0; gpuErrchk(cudaMemcpy(rmse, d_rmse, (T.nnz + 1) * sizeof(float), cudaMemcpyDeviceToHost)); for (long i = 0; i < T.nnz; ++i) { tot_rmse += rmse[i]; } f_rmse = sqrtf(tot_rmse / T.nnz); rmse_timer.Stop(); float rmse_time = rmse_timer.Elapsed(); printf("[-INFO-] iteration num %d \trank_time %.4lf|%.4lf s \tupdate_time %.4lf|%.4lfs \tRMSE=%lf time:%fs\n", oiter, rank_time, rank_time_acc, update_time, update_time_acc, f_rmse, rmse_time); } cudaStatus = cudaMemcpy(H_, dev_H_, nbits_H_, cudaMemcpyDeviceToHost); gpuErrchk(cudaStatus); cudaStatus = cudaMemcpy(W_, dev_W_, nbits_W_, cudaMemcpyDeviceToHost); gpuErrchk(cudaStatus); indexPosition = 0; for (long i = 0; i < k; ++i) { for (int j = 0; j < R_C.rows; ++j) { W[i][j] = W_[indexPosition]; ++indexPosition; } } indexPosition = 0; for (long i = 0; i < k; ++i) { for (int j = 0; j < R_C.cols; ++j) { H[i][j] = H_[indexPosition]; ++indexPosition; } } free(W_); free(H_); free(rmse); gpuErrchk(cudaFree(dev_W_)); gpuErrchk(cudaFree(dev_H_)); gpuErrchk(cudaFree(dev_Rcol_ptr)); gpuErrchk(cudaFree(dev_Rrow_idx)); gpuErrchk(cudaFree(dev_Rcol_ptr_T)); gpuErrchk(cudaFree(dev_Rrow_idx_T)); gpuErrchk(cudaFree(dev_Rval)); gpuErrchk(cudaFree(dev_Rval_t)); gpuErrchk(cudaFree(d_test_row)); gpuErrchk(cudaFree(d_test_col)); gpuErrchk(cudaFree(d_test_val)); gpuErrchk(cudaFree(d_pred_v)); gpuErrchk(cudaFree(d_rmse)); return cudaStatus; }
93778d1e193bb010534105d709bdf63f0f4ace4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_sinhf (size_t n, float *result, float *x) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = sinhf(x[id]); } }
93778d1e193bb010534105d709bdf63f0f4ace4b.cu
#include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_sinhf (size_t n, float *result, float *x) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = sinhf(x[id]); } }
4f37c9e0a3358982460288fb2e5d6051fb351b75.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define cfd_NBLOCKS 16*2*6 //#define cfd_SUPER_BLOCKS_PER_SM 5 #define cfd_BLOCK_SIZE 256 //const int cfd_BLOCK_SIZE = 256; const int cfd_nBlksPerCluster = 16; const int cfd_nAtom = cfd_BLOCK_SIZE * cfd_NBLOCKS; const int cfd_maxNeighbors = 8; texture<float,1,hipReadModeElementType> tex_mx; texture<float,1,hipReadModeElementType> tex_my; texture<float,1,hipReadModeElementType> tex_mz; texture<float,1,hipReadModeElementType> tex_energy; texture<float,1,hipReadModeElementType> tex_density; texture<int,1,hipReadModeElementType> tex_neighbor; texture<float,1,hipReadModeElementType> tex_normals; inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom, int* neighborList, int blockSz) { //create non-uniform data sharing //but avoid that tasks sharing the same data are neighbor tasks by randomization vector<int> atomInds(nAtom); vector<int> blkInds((nAtom+blockSz-1)/blockSz); for(int i=0; i<blkInds.size(); ++i) blkInds[i] = i; random_shuffle(blkInds.begin(), blkInds.end()); int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int)); for(int i=0; i<blkInds.size(); ++i) blkOrder[i] = blkInds[i]; int j=0; for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it) { int blkInd = *it; for(int i=0; i<blockSz; ++i) atomInds[j++] = blkInd*blockSz + i; } int superBlockSz = blockSz * cfd_nBlksPerCluster; // Build Neighbor List for (int i = 0; i < nAtom; i++) { int start = i - i%superBlockSz; //difference is here //int end = i + (superBlockSz - i%superBlockSz)-1; int nNeighbors = 0; do { int j = start + rand() % superBlockSz; if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j]; nNeighbors ++; } while(nNeighbors<cfd_maxNeighbors); } return blkOrder; } #define GAMMA 1.4f #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define NDIM 3 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) __host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __host__ __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } void check_cfd(int nelr, int* elements_surrounding_elements, float* normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes) { const float smoothing_coefficient = float(0.2f); //const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int i=0;i<cfd_NBLOCKS*cfd_BLOCK_SIZE;i++){ int j, nb; float3 normal; float normal_len; float factor; //float density_i = variables[i + VAR_DENSITY*nelr]; float density_i = density[i]; float3 momentum_i; //momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; //momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; //momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; momentum_i.x = mx[i]; momentum_i.y = my[i]; momentum_i.z = mz[i]; //float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float density_energy_i = density_energy[i]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); //float flux_i_density = float(0.0f); float flux_i_density = 0.0; float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < cfd_maxNeighbors; j++) { nb = elements_surrounding_elements[i + j*nelr]; //optimal layout already // |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ... // |Z for neighbor 0, Z for neighbor 1, ... | normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr]; normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr]; normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { //density_nb = variables[nb + VAR_DENSITY*nelr]; //momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; //momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; //momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_nb = density[nb]; momentum_nb.x = mx[nb]; momentum_nb.y = my[nb]; momentum_nb.z = mz[nb]; //density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; density_energy_nb = density_energy[nb]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity //factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); factor = 1.3; flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } } /*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\ ((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/ if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\ ((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01))) {printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\ fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\ fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\ fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\ fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy); return;} } printf("GOOD! passed!\n"); return; } __global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, const float* normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes,int *d_flag) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; //float density_i = variables[i + VAR_DENSITY*nelr]; float density_i = density[i];//tex1Dfetch(tex_density,i); float3 momentum_i; //momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; //momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; //momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; momentum_i.x = mx[i]; momentum_i.y = my[i];//tex1Dfetch(tex_my,i);//my[i]; momentum_i.z = mz[i];//tex1Dfetch(tex_mz,i); //mz[i]; //float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); //float flux_i_density = float(0.0f); float flux_i_density = 0.0; float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < cfd_maxNeighbors; j++) { nb = tex1Dfetch(tex_neighbor,i+j*nelr);//elements_surrounding_elements[i + j*nelr]; //optimal layout already // |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ... // |Z for neighbor 0, Z for neighbor 1, ... | normal.x = tex1Dfetch(tex_normals,i + (j + 0*cfd_maxNeighbors)*nelr);//normals[i + (j + 0*cfd_maxNeighbors)*nelr]; normal.y = tex1Dfetch(tex_normals,i + (j + 1*cfd_maxNeighbors)*nelr);//normals[i + (j + 1*cfd_maxNeighbors)*nelr]; normal.z = tex1Dfetch(tex_normals,i + (j + 2*cfd_maxNeighbors)*nelr);//normals[i + (j + 2*cfd_maxNeighbors)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { //density_nb = variables[nb + VAR_DENSITY*nelr]; //momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; //momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; //momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_nb = density[nb];//tex1Dfetch(tex_density,nb); momentum_nb.x = mx[nb];//tex1Dfetch(tex_mx,nb);//mx[nb]; momentum_nb.y = my[nb];//tex1Dfetch(tex_my,nb);//my[nb]; momentum_nb.z = mz[nb];//tex1Dfetch(tex_mz,nb);//mz[nb]; //density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity //factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); factor = 1.3; flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; //if (threadIdx.x==0) atomicAdd(d_flag,1); } int main(int argc, char **argv) { hipSetDevice(2); srand(2013); // Allocate problem data on host //posVecType* position; //forceVecType* force; float *density; float *mx; float *my; float *mz; float *density_energy; float *normals; float *fluxes; int* cfd_neighborList; hipHostMalloc((void**)&density, cfd_nAtom*sizeof(float)); hipHostMalloc((void**)&mx, cfd_nAtom*sizeof(float)); hipHostMalloc((void**)&my, cfd_nAtom*sizeof(float)); hipHostMalloc((void**)&mz, cfd_nAtom*sizeof(float)); hipHostMalloc((void**)&density_energy, cfd_nAtom*sizeof(float)); hipHostMalloc((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float)); hipHostMalloc((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float)); hipHostMalloc((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int)); // Allocate device memory for position and force //forceVecType* d_force; //posVecType* d_position; float *d_density; float *d_mx; float *d_my; float *d_mz; float *d_density_energy; float *d_normals; float *d_fluxes; hipMalloc((void**)&d_density, cfd_nAtom*sizeof(float)); hipMalloc((void**)&d_mx, cfd_nAtom*sizeof(float)); hipMalloc((void**)&d_my, cfd_nAtom*sizeof(float)); hipMalloc((void**)&d_mz, cfd_nAtom*sizeof(float)); hipMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float)); hipMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float)); hipMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float)); hipMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float)); //hipMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType)); // Allocate device memory for neighbor list int* d_cfd_neighborList; hipMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int)); //cout << "Initializing test problem (this can take several " // "minutes for large problems)\n"; // Initialize positions -- random distribution in cubic domain // domainEdge constant specifies edge length for (int i = 0; i < cfd_nAtom; i++) { density[i] = (float)(drand48()); density_energy[i] = (float)(drand48() ); mx[i] = (float)(drand48() ); my[i] = (float)(drand48() ); mz[i] = (float)(drand48() ); /* density[i] = 1.1+i*0.01; density_energy[i] = 1.1+i*0.01; mx[i] = 1.1+i*0.01; my[i] = 1.1+i*0.01; mz[i] = 1.1+i*0.01; */ } for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i) normals[i] = (float)(drand48()); cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, cfd_BLOCK_SIZE); hipMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), hipMemcpyHostToDevice); // Copy data to GPU hipMemcpy(d_density, density, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_my, my, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), hipMemcpyHostToDevice); hipSetDeviceFlags(hipDeviceMapHost); int *flag_cfd,*d_flag_cfd; hipHostMalloc((void**)&flag_cfd,sizeof( int),hipHostMallocMapped); hipHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0); hipBindTexture(0,tex_mx,d_mx,cfd_nAtom*sizeof(float)); hipBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float)); hipBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float)); hipBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float)); hipBindTexture(0,tex_density,d_density,cfd_nAtom*sizeof(float)); hipBindTexture(0,tex_neighbor,d_cfd_neighborList,cfd_maxNeighbors*cfd_nAtom*sizeof(int)); hipBindTexture(0,tex_normals,d_normals,cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float)); hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); float kernel_time = 0.0f; hipEventRecord(kernel_start, 0); int cfd_gridSize = (cfd_nAtom-1+cfd_BLOCK_SIZE) / cfd_BLOCK_SIZE; hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(cfd_BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy, d_fluxes,d_flag_cfd); hipDeviceSynchronize(); hipEventRecord(kernel_stop, 0); hipEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; hipMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), hipMemcpyDeviceToHost); check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes); //TODO:verified on small inputs /* ifstream fluxesF("../org/fluxes.txt"); for(int i=0; i<cfd_nAtom*NVAR; ++i) { float f; fluxesF >> f; if(abs(f - fluxes[i]) > 0.001) { fprintf(stderr, "Test failed! i = %d\n", i); return 1; } }*/ // printf("Test passed!\n"); // fluxesF.close(); return 0; }
4f37c9e0a3358982460288fb2e5d6051fb351b75.cu
#include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define cfd_NBLOCKS 16*2*6 //#define cfd_SUPER_BLOCKS_PER_SM 5 #define cfd_BLOCK_SIZE 256 //const int cfd_BLOCK_SIZE = 256; const int cfd_nBlksPerCluster = 16; const int cfd_nAtom = cfd_BLOCK_SIZE * cfd_NBLOCKS; const int cfd_maxNeighbors = 8; texture<float,1,cudaReadModeElementType> tex_mx; texture<float,1,cudaReadModeElementType> tex_my; texture<float,1,cudaReadModeElementType> tex_mz; texture<float,1,cudaReadModeElementType> tex_energy; texture<float,1,cudaReadModeElementType> tex_density; texture<int,1,cudaReadModeElementType> tex_neighbor; texture<float,1,cudaReadModeElementType> tex_normals; inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom, int* neighborList, int blockSz) { //create non-uniform data sharing //but avoid that tasks sharing the same data are neighbor tasks by randomization vector<int> atomInds(nAtom); vector<int> blkInds((nAtom+blockSz-1)/blockSz); for(int i=0; i<blkInds.size(); ++i) blkInds[i] = i; random_shuffle(blkInds.begin(), blkInds.end()); int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int)); for(int i=0; i<blkInds.size(); ++i) blkOrder[i] = blkInds[i]; int j=0; for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it) { int blkInd = *it; for(int i=0; i<blockSz; ++i) atomInds[j++] = blkInd*blockSz + i; } int superBlockSz = blockSz * cfd_nBlksPerCluster; // Build Neighbor List for (int i = 0; i < nAtom; i++) { int start = i - i%superBlockSz; //difference is here //int end = i + (superBlockSz - i%superBlockSz)-1; int nNeighbors = 0; do { int j = start + rand() % superBlockSz; if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j]; nNeighbors ++; } while(nNeighbors<cfd_maxNeighbors); } return blkOrder; } #define GAMMA 1.4f #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define NDIM 3 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) __host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __host__ __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } void check_cfd(int nelr, int* elements_surrounding_elements, float* normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes) { const float smoothing_coefficient = float(0.2f); //const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int i=0;i<cfd_NBLOCKS*cfd_BLOCK_SIZE;i++){ int j, nb; float3 normal; float normal_len; float factor; //float density_i = variables[i + VAR_DENSITY*nelr]; float density_i = density[i]; float3 momentum_i; //momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; //momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; //momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; momentum_i.x = mx[i]; momentum_i.y = my[i]; momentum_i.z = mz[i]; //float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float density_energy_i = density_energy[i]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); //float flux_i_density = float(0.0f); float flux_i_density = 0.0; float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < cfd_maxNeighbors; j++) { nb = elements_surrounding_elements[i + j*nelr]; //optimal layout already // |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ... // |Z for neighbor 0, Z for neighbor 1, ... | normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr]; normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr]; normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { //density_nb = variables[nb + VAR_DENSITY*nelr]; //momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; //momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; //momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_nb = density[nb]; momentum_nb.x = mx[nb]; momentum_nb.y = my[nb]; momentum_nb.z = mz[nb]; //density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; density_energy_nb = density_energy[nb]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity //factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); factor = 1.3; flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } } /*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\ ((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\ ((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/ if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\ ((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\ ((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01))) {printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\ fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\ fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\ fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\ fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy); return;} } printf("GOOD! passed!\n"); return; } __global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, const float* normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes,int *d_flag) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; //float density_i = variables[i + VAR_DENSITY*nelr]; float density_i = density[i];//tex1Dfetch(tex_density,i); float3 momentum_i; //momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; //momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; //momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; momentum_i.x = mx[i]; momentum_i.y = my[i];//tex1Dfetch(tex_my,i);//my[i]; momentum_i.z = mz[i];//tex1Dfetch(tex_mz,i); //mz[i]; //float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); //float flux_i_density = float(0.0f); float flux_i_density = 0.0; float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < cfd_maxNeighbors; j++) { nb = tex1Dfetch(tex_neighbor,i+j*nelr);//elements_surrounding_elements[i + j*nelr]; //optimal layout already // |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ... // |Z for neighbor 0, Z for neighbor 1, ... | normal.x = tex1Dfetch(tex_normals,i + (j + 0*cfd_maxNeighbors)*nelr);//normals[i + (j + 0*cfd_maxNeighbors)*nelr]; normal.y = tex1Dfetch(tex_normals,i + (j + 1*cfd_maxNeighbors)*nelr);//normals[i + (j + 1*cfd_maxNeighbors)*nelr]; normal.z = tex1Dfetch(tex_normals,i + (j + 2*cfd_maxNeighbors)*nelr);//normals[i + (j + 2*cfd_maxNeighbors)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { //density_nb = variables[nb + VAR_DENSITY*nelr]; //momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; //momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; //momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_nb = density[nb];//tex1Dfetch(tex_density,nb); momentum_nb.x = mx[nb];//tex1Dfetch(tex_mx,nb);//mx[nb]; momentum_nb.y = my[nb];//tex1Dfetch(tex_my,nb);//my[nb]; momentum_nb.z = mz[nb];//tex1Dfetch(tex_mz,nb);//mz[nb]; //density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity //factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); factor = 1.3; flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; //if (threadIdx.x==0) atomicAdd(d_flag,1); } int main(int argc, char **argv) { cudaSetDevice(2); srand(2013); // Allocate problem data on host //posVecType* position; //forceVecType* force; float *density; float *mx; float *my; float *mz; float *density_energy; float *normals; float *fluxes; int* cfd_neighborList; cudaMallocHost((void**)&density, cfd_nAtom*sizeof(float)); cudaMallocHost((void**)&mx, cfd_nAtom*sizeof(float)); cudaMallocHost((void**)&my, cfd_nAtom*sizeof(float)); cudaMallocHost((void**)&mz, cfd_nAtom*sizeof(float)); cudaMallocHost((void**)&density_energy, cfd_nAtom*sizeof(float)); cudaMallocHost((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float)); cudaMallocHost((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float)); cudaMallocHost((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int)); // Allocate device memory for position and force //forceVecType* d_force; //posVecType* d_position; float *d_density; float *d_mx; float *d_my; float *d_mz; float *d_density_energy; float *d_normals; float *d_fluxes; cudaMalloc((void**)&d_density, cfd_nAtom*sizeof(float)); cudaMalloc((void**)&d_mx, cfd_nAtom*sizeof(float)); cudaMalloc((void**)&d_my, cfd_nAtom*sizeof(float)); cudaMalloc((void**)&d_mz, cfd_nAtom*sizeof(float)); cudaMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float)); cudaMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float)); cudaMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float)); cudaMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float)); //cudaMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType)); // Allocate device memory for neighbor list int* d_cfd_neighborList; cudaMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int)); //cout << "Initializing test problem (this can take several " // "minutes for large problems)\n"; // Initialize positions -- random distribution in cubic domain // domainEdge constant specifies edge length for (int i = 0; i < cfd_nAtom; i++) { density[i] = (float)(drand48()); density_energy[i] = (float)(drand48() ); mx[i] = (float)(drand48() ); my[i] = (float)(drand48() ); mz[i] = (float)(drand48() ); /* density[i] = 1.1+i*0.01; density_energy[i] = 1.1+i*0.01; mx[i] = 1.1+i*0.01; my[i] = 1.1+i*0.01; mz[i] = 1.1+i*0.01; */ } for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i) normals[i] = (float)(drand48()); cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, cfd_BLOCK_SIZE); cudaMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), cudaMemcpyHostToDevice); // Copy data to GPU cudaMemcpy(d_density, density, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_my, my, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), cudaMemcpyHostToDevice); cudaSetDeviceFlags(cudaDeviceMapHost); int *flag_cfd,*d_flag_cfd; cudaHostAlloc((void**)&flag_cfd,sizeof( int),cudaHostAllocMapped); cudaHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0); cudaBindTexture(0,tex_mx,d_mx,cfd_nAtom*sizeof(float)); cudaBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float)); cudaBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float)); cudaBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float)); cudaBindTexture(0,tex_density,d_density,cfd_nAtom*sizeof(float)); cudaBindTexture(0,tex_neighbor,d_cfd_neighborList,cfd_maxNeighbors*cfd_nAtom*sizeof(int)); cudaBindTexture(0,tex_normals,d_normals,cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float)); cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); float kernel_time = 0.0f; cudaEventRecord(kernel_start, 0); int cfd_gridSize = (cfd_nAtom-1+cfd_BLOCK_SIZE) / cfd_BLOCK_SIZE; cfd_kernel<<<cfd_gridSize, cfd_BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy, d_fluxes,d_flag_cfd); cudaDeviceSynchronize(); cudaEventRecord(kernel_stop, 0); cudaEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; cudaMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), cudaMemcpyDeviceToHost); check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes); //TODO:verified on small inputs /* ifstream fluxesF("../org/fluxes.txt"); for(int i=0; i<cfd_nAtom*NVAR; ++i) { float f; fluxesF >> f; if(abs(f - fluxes[i]) > 0.001) { fprintf(stderr, "Test failed! i = %d\n", i); return 1; } }*/ // printf("Test passed!\n"); // fluxesF.close(); return 0; }
1a2b84fc2881480aacaf06d151cd192fa88e1935.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <opencv2/opencv.hpp> #include "../include/slic.h" #include <sys/time.h> int main(int argc, char** argv) { if(argc !=2) { printf("Invalid number of parameters (please provide image filename)\n"); return -1; } char* imageName = argv[1]; cv::Mat m_image; m_image = cv::imread(imageName, cv::IMREAD_COLOR); if(!m_image.data) { printf("Could not open image\n"); return -2; } cv::Size size(pix_width, pix_height); cv::Mat m_resized; cv::resize(m_image, m_resized, size); if(!m_resized.isContinuous()) { printf("OpenCV is being difficult. Sorry :,(. Suiciding.\n"); return -3; } cv::imwrite("./resized_image.tif", m_resized); cv::Mat m_lab_image; cv::cvtColor(m_resized, m_lab_image, cv::COLOR_BGR2Lab); printf("Bytes: %lu\n", m_lab_image.total()*m_lab_image.channels()); // Preparations for Kernel invocation pix_data* d_pix_data; own_data* d_own_data; spx_data* d_spx_data; int pix_byte_size = pix_width * pix_height * sizeof(pix_data); int own_byte_size = pix_width * pix_height * sizeof(own_data); int spx_byte_size = spx_width * spx_height * sizeof(spx_data); // Error code to check return values for CUDA calls hipError_t err = hipSuccess; err = hipMalloc(&d_pix_data, pix_byte_size); reportError(err, __FILE__, __LINE__); err = hipMalloc(&d_own_data, own_byte_size); reportError(err, __FILE__, __LINE__); err = hipMalloc(&d_spx_data, spx_byte_size); reportError(err, __FILE__, __LINE__); pix_data* h_pix_data = (pix_data*)malloc(pix_byte_size); for (int x=0; x<pix_width; x++) for (int y=0; y<pix_height; y++) { int pix_idx = x + pix_width*y; h_pix_data[pix_idx].l = ((pix_original_data*)m_lab_image.data)[pix_idx].l; h_pix_data[pix_idx].a = ((pix_original_data*)m_lab_image.data)[pix_idx].a; h_pix_data[pix_idx].b = ((pix_original_data*)m_lab_image.data)[pix_idx].b; } err = hipMemcpy(d_pix_data, h_pix_data, pix_byte_size, hipMemcpyHostToDevice); reportError(err, __FILE__, __LINE__); own_data* h_own_data = (own_data*)malloc(own_byte_size); initialize_own(h_own_data); err = hipMemcpy(d_own_data, h_own_data, own_byte_size, hipMemcpyHostToDevice); reportError(err, __FILE__, __LINE__); own_data* h_n_own_data = (own_data*)malloc(own_byte_size); initialize_n_own(h_n_own_data); spx_data* h_spx_data = (spx_data*)malloc(spx_byte_size); initialize_spx(h_spx_data); err = hipMemcpy(d_spx_data, h_spx_data, spx_byte_size, hipMemcpyHostToDevice); reportError(err, __FILE__, __LINE__); initializeSlicFactor(); // = (float)slic_m / slic_s // std::numeric_limits<float>::max() // -------------------- The Kernel magic -------------------- //k_measure<<<dim3(10,10), dim3(32,32)>>>(0,1234); // Original cumulativeSum kernel dim3 pix_threadsPerBlock( 32, 8 ) ; int pix_blockPerGridX = (pix_width + pix_threadsPerBlock.x-1)/pix_threadsPerBlock.x; int pix_blockPerGridY = (pix_height + pix_threadsPerBlock.y-1)/pix_threadsPerBlock.y; dim3 pix_blocksPerGrid(pix_blockPerGridX, pix_blockPerGridY, 1); // Ownership kernel - TODO: Optimize //dim3 pix_threadsPerBlockOwn( 32, 32 ) ; // Original #define horiz 32 #define vert 1 dim3 pix_threadsPerBlockOwn( horiz, vert ) ; // Optimized int pix_blockPerGridXOwn = 4096 / horiz;//(pix_width + pix_threadsPerBlockOwn.x-1)/pix_threadsPerBlockOwn.x; //32 int pix_blockPerGridYOwn = 2048 / (vert * pix_per_thread);//(pix_height + pix_threadsPerBlockOwn.y-1)/pix_threadsPerBlockOwn.y; //16 printf("%i || %i\n",pix_blockPerGridXOwn, pix_blockPerGridYOwn); dim3 pix_blocksPerGridOwn(pix_blockPerGridXOwn, pix_blockPerGridYOwn, 1); // Optimized cumulativeSum kernel dim3 pix_threadsPerBlockOpt( 128, 1*OPT6 ); int pix_blockPerGridXOpt = (pix_width + pix_threadsPerBlockOpt.x-1)/pix_threadsPerBlockOpt.x; int pix_blockPerGridYOpt = (pix_height + pix_threadsPerBlockOpt.y-1)/pix_threadsPerBlockOpt.y; dim3 pix_blocksPerGridOpt(pix_blockPerGridXOpt, OPT6*(pix_blockPerGridYOpt+pix_at_a_time-1)/pix_at_a_time, 1); printf("Y:%d\n", pix_blocksPerGridOpt.y); //k_ownership<<<pix_blocksPerGridOwn, pix_threadsPerBlockOwn>>>(d_pix_data, d_own_data, d_spx_data); hipLaunchKernelGGL(( k_cumulativeCount), dim3(pix_blocksPerGridOpt), dim3(pix_threadsPerBlockOpt), 0, 0, d_pix_data, d_own_data, d_spx_data #ifdef BANKDEBUG , true #endif ); printf("1\n"); hipDeviceSynchronize(); //TODO // Reset and Averaging kernels dim3 spx_threadsPerBlock(32, 32); int spx_blockPerGridX = (spx_width + spx_threadsPerBlock.x-1)/spx_threadsPerBlock.x; int spx_blockPerGridY = (spx_height + spx_threadsPerBlock.y-1)/spx_threadsPerBlock.y; dim3 spx_blocksPerGrid(spx_blockPerGridX, spx_blockPerGridY, 1); hipLaunchKernelGGL(( k_averaging), dim3(spx_blocksPerGrid), dim3(spx_threadsPerBlock), 0, 0, d_spx_data); const int iterations = 10; hipDeviceSynchronize(); double ts_start = getTimestamp(); for (int i = 0 ; i<iterations; i++) { hipLaunchKernelGGL(( k_reset), dim3(spx_blocksPerGrid), dim3(spx_threadsPerBlock), 0, 0, d_spx_data); err = hipGetLastError(); reportError(err, __FILE__, __LINE__); hipLaunchKernelGGL(( k_ownership), dim3(pix_blocksPerGridOwn), dim3(pix_threadsPerBlockOwn), 0, 0, d_pix_data, d_own_data, d_spx_data); err = hipGetLastError(); reportError(err, __FILE__, __LINE__); hipLaunchKernelGGL(( k_cumulativeCount), dim3(pix_blocksPerGridOpt), dim3(pix_threadsPerBlockOpt), 0, 0, d_pix_data, d_own_data, d_spx_data #ifdef BANKDEBUG , false #endif ); err = hipGetLastError(); reportError(err, __FILE__, __LINE__); //printf("REMOVE THIS BEFORE MEASURING\n"); hipDeviceSynchronize(); //TODO hipLaunchKernelGGL(( k_averaging), dim3(spx_blocksPerGrid), dim3(spx_threadsPerBlock), 0, 0, d_spx_data); err = hipGetLastError(); reportError(err, __FILE__, __LINE__); } hipDeviceSynchronize(); double ts_end = getTimestamp(); printf("Average time %0.9f, total %0.9f iters %d\n", (ts_end - ts_start)/iterations, (ts_end - ts_start), iterations); err = hipMemcpy(h_pix_data, d_pix_data, pix_byte_size, hipMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); err = hipMemcpy(h_own_data, d_own_data, own_byte_size, hipMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); err = hipMemcpy(h_spx_data, d_spx_data, spx_byte_size, hipMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); const bool doConnectivity = true; if (doConnectivity) { enforce_label_connectivity(h_own_data, pix_width, pix_height, h_n_own_data, spx_width * spx_height); err = hipMemcpy(d_own_data, h_n_own_data, own_byte_size, hipMemcpyHostToDevice); reportError(err, __FILE__, __LINE__); } else { // NO-OP } hipLaunchKernelGGL(( k_reset), dim3(spx_blocksPerGrid), dim3(spx_threadsPerBlock), 0, 0, d_spx_data); err = hipGetLastError(); reportError(err, __FILE__, __LINE__); // Has to be original cumulativeCount, because we can't assume window size of 1 after conn. enforcement hipLaunchKernelGGL(( k_cumulativeCountOrig), dim3(pix_blocksPerGrid), dim3(pix_threadsPerBlock), 0, 0, d_pix_data, d_own_data, d_spx_data); err = hipGetLastError(); reportError(err, __FILE__, __LINE__); printf("3\n"); hipDeviceSynchronize(); //TODO hipLaunchKernelGGL(( k_averaging), dim3(spx_blocksPerGrid), dim3(spx_threadsPerBlock), 0, 0, d_spx_data); err = hipMemcpy(h_pix_data, d_pix_data, pix_byte_size, hipMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); err = hipMemcpy(h_own_data, d_own_data, own_byte_size, hipMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); err = hipMemcpy(h_spx_data, d_spx_data, spx_byte_size, hipMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); color_solid(h_pix_data, h_own_data, h_spx_data); //color_borders(h_pix_data, h_own_data, h_spx_data); //test_color_own(h_pix_data, h_own_data, h_spx_data); for (int x=0; x<pix_width; x++) for (int y=0; y<pix_height; y++) { int pix_idx = x + pix_width*y; ((pix_original_data*)m_lab_image.data)[pix_idx].l = h_pix_data[pix_idx].l; ((pix_original_data*)m_lab_image.data)[pix_idx].a = h_pix_data[pix_idx].a; ((pix_original_data*)m_lab_image.data)[pix_idx].b = h_pix_data[pix_idx].b; } cv::Mat m_rgb_result_image; cv::cvtColor(m_lab_image, m_rgb_result_image, cv::COLOR_Lab2BGR); cv::imwrite("./processed_image.jpg", m_rgb_result_image); hipDeviceSynchronize(); hipDeviceReset(); printf("SUCCESS!\n"); } // Initializes superpixel centers to be distributed evenly void initialize_spx(spx_data* h_spx_data) { for (int i = 0; i < spx_width; i++) { for(int j = 0; j < spx_height; j++) { int spx_index = j * spx_width + i; h_spx_data[spx_index].l = 127; h_spx_data[spx_index].a = 127; h_spx_data[spx_index].b = 127; h_spx_data[spx_index].x = (2 * spx_size * i + spx_size)/2; h_spx_data[spx_index].y = (2 * spx_size * j + spx_size)/2; for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx) { h_spx_data[spx_index].accum[ny][nx][0] = 0; h_spx_data[spx_index].accum[ny][nx][1] = 0; h_spx_data[spx_index].accum[ny][nx][2] = 0; h_spx_data[spx_index].accum[ny][nx][3] = 0; h_spx_data[spx_index].accum[ny][nx][4] = 0; h_spx_data[spx_index].accum[ny][nx][5] = 0; } } } } // Initializes superpixel ownership to regular squares void initialize_own(own_data* h_own_data) { for (int x = 0; x < pix_width; x++) { for(int y = 0; y < pix_height; y++) { int own_index = y * pix_width + x; int i = x/spx_size; int j = y/spx_size; h_own_data[own_index].i = i; h_own_data[own_index].j = j; } } } // Initializes superpixel ownership for continuity enforcement void initialize_n_own(own_data* h_n_own_data) { for (int x = 0; x < pix_width; x++) { for(int y = 0; y < pix_height; y++) { int own_index = y * pix_width + x; h_n_own_data[own_index].i = -1; h_n_own_data[own_index].j = -1; } } } // Solid colorizer: Paints each superpixel with its average color void color_solid(pix_data* h_pix_data, const own_data* h_own_data, const spx_data* h_spx_data) { for (int x = 0; x < pix_width; x++) { for(int y = 0; y < pix_height; y++) { int pix_index = y * pix_width + x; int spx_index = h_own_data[pix_index].j * spx_width + h_own_data[pix_index].i; h_pix_data[pix_index].l = h_spx_data[spx_index].l; h_pix_data[pix_index].a = h_spx_data[spx_index].a; h_pix_data[pix_index].b = h_spx_data[spx_index].b; } } } int get_spx_id(const own_data* h_own_data, int x, int y) { int pix_index = y * pix_width + x; return h_own_data[pix_index].j * spx_width + h_own_data[pix_index].i; } // Colors the border of superpixels to make it easier to visualize them void color_borders(pix_data* h_pix_data, const own_data* h_own_data, const spx_data* h_spx_data) { for (int x = 0; x < pix_width; x++) { for(int y = 0; y < pix_height; y++) { int pix_index = y * pix_width + x; int spx_id = get_spx_id(h_own_data, x, y); bool border = false; border = border || (x == 0) || spx_id != get_spx_id(h_own_data, x-1, y); border = border || (x == pix_width-1) || spx_id != get_spx_id(h_own_data, x+1, y); border = border || (y == 0) || spx_id != get_spx_id(h_own_data, x, y-1); border = border || (y == pix_height-1) || spx_id != get_spx_id(h_own_data, x, y+1); border = border || (x == 0) || (y==0) || spx_id != get_spx_id(h_own_data, x-1, y-1); border = border || (x == 0) || (y == pix_height-1) || spx_id != get_spx_id(h_own_data, x-1, y+1); border = border || (x == pix_width-1) || (y==0) || spx_id != get_spx_id(h_own_data, x+1, y-1); border = border || (x == pix_width-1) || (y == pix_height-1) || spx_id != get_spx_id(h_own_data, x+1, y+1); if(border) { h_pix_data[pix_index].l = 0; h_pix_data[pix_index].a = 0; h_pix_data[pix_index].b = 0; } } } } double getTimestamp() { struct timeval tv; gettimeofday(&tv, NULL); return (double) tv.tv_usec/1000000.0 + tv.tv_sec; } void reportError(hipError_t err, const char* file, int line) { if (err != hipSuccess) { printf("%s failed at %i\n", file, line); printf("%s\n", hipGetErrorString(err)); exit(-1); } }
1a2b84fc2881480aacaf06d151cd192fa88e1935.cu
#include <stdio.h> #include <opencv2/opencv.hpp> #include "../include/slic.h" #include <sys/time.h> int main(int argc, char** argv) { if(argc !=2) { printf("Invalid number of parameters (please provide image filename)\n"); return -1; } char* imageName = argv[1]; cv::Mat m_image; m_image = cv::imread(imageName, cv::IMREAD_COLOR); if(!m_image.data) { printf("Could not open image\n"); return -2; } cv::Size size(pix_width, pix_height); cv::Mat m_resized; cv::resize(m_image, m_resized, size); if(!m_resized.isContinuous()) { printf("OpenCV is being difficult. Sorry :,(. Suiciding.\n"); return -3; } cv::imwrite("./resized_image.tif", m_resized); cv::Mat m_lab_image; cv::cvtColor(m_resized, m_lab_image, cv::COLOR_BGR2Lab); printf("Bytes: %lu\n", m_lab_image.total()*m_lab_image.channels()); // Preparations for Kernel invocation pix_data* d_pix_data; own_data* d_own_data; spx_data* d_spx_data; int pix_byte_size = pix_width * pix_height * sizeof(pix_data); int own_byte_size = pix_width * pix_height * sizeof(own_data); int spx_byte_size = spx_width * spx_height * sizeof(spx_data); // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; err = cudaMalloc(&d_pix_data, pix_byte_size); reportError(err, __FILE__, __LINE__); err = cudaMalloc(&d_own_data, own_byte_size); reportError(err, __FILE__, __LINE__); err = cudaMalloc(&d_spx_data, spx_byte_size); reportError(err, __FILE__, __LINE__); pix_data* h_pix_data = (pix_data*)malloc(pix_byte_size); for (int x=0; x<pix_width; x++) for (int y=0; y<pix_height; y++) { int pix_idx = x + pix_width*y; h_pix_data[pix_idx].l = ((pix_original_data*)m_lab_image.data)[pix_idx].l; h_pix_data[pix_idx].a = ((pix_original_data*)m_lab_image.data)[pix_idx].a; h_pix_data[pix_idx].b = ((pix_original_data*)m_lab_image.data)[pix_idx].b; } err = cudaMemcpy(d_pix_data, h_pix_data, pix_byte_size, cudaMemcpyHostToDevice); reportError(err, __FILE__, __LINE__); own_data* h_own_data = (own_data*)malloc(own_byte_size); initialize_own(h_own_data); err = cudaMemcpy(d_own_data, h_own_data, own_byte_size, cudaMemcpyHostToDevice); reportError(err, __FILE__, __LINE__); own_data* h_n_own_data = (own_data*)malloc(own_byte_size); initialize_n_own(h_n_own_data); spx_data* h_spx_data = (spx_data*)malloc(spx_byte_size); initialize_spx(h_spx_data); err = cudaMemcpy(d_spx_data, h_spx_data, spx_byte_size, cudaMemcpyHostToDevice); reportError(err, __FILE__, __LINE__); initializeSlicFactor(); // = (float)slic_m / slic_s // std::numeric_limits<float>::max() // -------------------- The Kernel magic -------------------- //k_measure<<<dim3(10,10), dim3(32,32)>>>(0,1234); // Original cumulativeSum kernel dim3 pix_threadsPerBlock( 32, 8 ) ; int pix_blockPerGridX = (pix_width + pix_threadsPerBlock.x-1)/pix_threadsPerBlock.x; int pix_blockPerGridY = (pix_height + pix_threadsPerBlock.y-1)/pix_threadsPerBlock.y; dim3 pix_blocksPerGrid(pix_blockPerGridX, pix_blockPerGridY, 1); // Ownership kernel - TODO: Optimize //dim3 pix_threadsPerBlockOwn( 32, 32 ) ; // Original #define horiz 32 #define vert 1 dim3 pix_threadsPerBlockOwn( horiz, vert ) ; // Optimized int pix_blockPerGridXOwn = 4096 / horiz;//(pix_width + pix_threadsPerBlockOwn.x-1)/pix_threadsPerBlockOwn.x; //32 int pix_blockPerGridYOwn = 2048 / (vert * pix_per_thread);//(pix_height + pix_threadsPerBlockOwn.y-1)/pix_threadsPerBlockOwn.y; //16 printf("%i || %i\n",pix_blockPerGridXOwn, pix_blockPerGridYOwn); dim3 pix_blocksPerGridOwn(pix_blockPerGridXOwn, pix_blockPerGridYOwn, 1); // Optimized cumulativeSum kernel dim3 pix_threadsPerBlockOpt( 128, 1*OPT6 ); int pix_blockPerGridXOpt = (pix_width + pix_threadsPerBlockOpt.x-1)/pix_threadsPerBlockOpt.x; int pix_blockPerGridYOpt = (pix_height + pix_threadsPerBlockOpt.y-1)/pix_threadsPerBlockOpt.y; dim3 pix_blocksPerGridOpt(pix_blockPerGridXOpt, OPT6*(pix_blockPerGridYOpt+pix_at_a_time-1)/pix_at_a_time, 1); printf("Y:%d\n", pix_blocksPerGridOpt.y); //k_ownership<<<pix_blocksPerGridOwn, pix_threadsPerBlockOwn>>>(d_pix_data, d_own_data, d_spx_data); k_cumulativeCount<<<pix_blocksPerGridOpt, pix_threadsPerBlockOpt>>>(d_pix_data, d_own_data, d_spx_data #ifdef BANKDEBUG , true #endif ); printf("1\n"); cudaDeviceSynchronize(); //TODO // Reset and Averaging kernels dim3 spx_threadsPerBlock(32, 32); int spx_blockPerGridX = (spx_width + spx_threadsPerBlock.x-1)/spx_threadsPerBlock.x; int spx_blockPerGridY = (spx_height + spx_threadsPerBlock.y-1)/spx_threadsPerBlock.y; dim3 spx_blocksPerGrid(spx_blockPerGridX, spx_blockPerGridY, 1); k_averaging<<<spx_blocksPerGrid, spx_threadsPerBlock>>>(d_spx_data); const int iterations = 10; cudaDeviceSynchronize(); double ts_start = getTimestamp(); for (int i = 0 ; i<iterations; i++) { k_reset<<<spx_blocksPerGrid, spx_threadsPerBlock>>>(d_spx_data); err = cudaGetLastError(); reportError(err, __FILE__, __LINE__); k_ownership<<<pix_blocksPerGridOwn, pix_threadsPerBlockOwn>>>(d_pix_data, d_own_data, d_spx_data); err = cudaGetLastError(); reportError(err, __FILE__, __LINE__); k_cumulativeCount<<<pix_blocksPerGridOpt, pix_threadsPerBlockOpt>>>(d_pix_data, d_own_data, d_spx_data #ifdef BANKDEBUG , false #endif ); err = cudaGetLastError(); reportError(err, __FILE__, __LINE__); //printf("REMOVE THIS BEFORE MEASURING\n"); cudaDeviceSynchronize(); //TODO k_averaging<<<spx_blocksPerGrid, spx_threadsPerBlock>>>(d_spx_data); err = cudaGetLastError(); reportError(err, __FILE__, __LINE__); } cudaDeviceSynchronize(); double ts_end = getTimestamp(); printf("Average time %0.9f, total %0.9f iters %d\n", (ts_end - ts_start)/iterations, (ts_end - ts_start), iterations); err = cudaMemcpy(h_pix_data, d_pix_data, pix_byte_size, cudaMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); err = cudaMemcpy(h_own_data, d_own_data, own_byte_size, cudaMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); err = cudaMemcpy(h_spx_data, d_spx_data, spx_byte_size, cudaMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); const bool doConnectivity = true; if (doConnectivity) { enforce_label_connectivity(h_own_data, pix_width, pix_height, h_n_own_data, spx_width * spx_height); err = cudaMemcpy(d_own_data, h_n_own_data, own_byte_size, cudaMemcpyHostToDevice); reportError(err, __FILE__, __LINE__); } else { // NO-OP } k_reset<<<spx_blocksPerGrid, spx_threadsPerBlock>>>(d_spx_data); err = cudaGetLastError(); reportError(err, __FILE__, __LINE__); // Has to be original cumulativeCount, because we can't assume window size of 1 after conn. enforcement k_cumulativeCountOrig<<<pix_blocksPerGrid, pix_threadsPerBlock>>>(d_pix_data, d_own_data, d_spx_data); err = cudaGetLastError(); reportError(err, __FILE__, __LINE__); printf("3\n"); cudaDeviceSynchronize(); //TODO k_averaging<<<spx_blocksPerGrid, spx_threadsPerBlock>>>(d_spx_data); err = cudaMemcpy(h_pix_data, d_pix_data, pix_byte_size, cudaMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); err = cudaMemcpy(h_own_data, d_own_data, own_byte_size, cudaMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); err = cudaMemcpy(h_spx_data, d_spx_data, spx_byte_size, cudaMemcpyDeviceToHost); reportError(err, __FILE__, __LINE__); color_solid(h_pix_data, h_own_data, h_spx_data); //color_borders(h_pix_data, h_own_data, h_spx_data); //test_color_own(h_pix_data, h_own_data, h_spx_data); for (int x=0; x<pix_width; x++) for (int y=0; y<pix_height; y++) { int pix_idx = x + pix_width*y; ((pix_original_data*)m_lab_image.data)[pix_idx].l = h_pix_data[pix_idx].l; ((pix_original_data*)m_lab_image.data)[pix_idx].a = h_pix_data[pix_idx].a; ((pix_original_data*)m_lab_image.data)[pix_idx].b = h_pix_data[pix_idx].b; } cv::Mat m_rgb_result_image; cv::cvtColor(m_lab_image, m_rgb_result_image, cv::COLOR_Lab2BGR); cv::imwrite("./processed_image.jpg", m_rgb_result_image); cudaDeviceSynchronize(); cudaDeviceReset(); printf("SUCCESS!\n"); } // Initializes superpixel centers to be distributed evenly void initialize_spx(spx_data* h_spx_data) { for (int i = 0; i < spx_width; i++) { for(int j = 0; j < spx_height; j++) { int spx_index = j * spx_width + i; h_spx_data[spx_index].l = 127; h_spx_data[spx_index].a = 127; h_spx_data[spx_index].b = 127; h_spx_data[spx_index].x = (2 * spx_size * i + spx_size)/2; h_spx_data[spx_index].y = (2 * spx_size * j + spx_size)/2; for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx) { h_spx_data[spx_index].accum[ny][nx][0] = 0; h_spx_data[spx_index].accum[ny][nx][1] = 0; h_spx_data[spx_index].accum[ny][nx][2] = 0; h_spx_data[spx_index].accum[ny][nx][3] = 0; h_spx_data[spx_index].accum[ny][nx][4] = 0; h_spx_data[spx_index].accum[ny][nx][5] = 0; } } } } // Initializes superpixel ownership to regular squares void initialize_own(own_data* h_own_data) { for (int x = 0; x < pix_width; x++) { for(int y = 0; y < pix_height; y++) { int own_index = y * pix_width + x; int i = x/spx_size; int j = y/spx_size; h_own_data[own_index].i = i; h_own_data[own_index].j = j; } } } // Initializes superpixel ownership for continuity enforcement void initialize_n_own(own_data* h_n_own_data) { for (int x = 0; x < pix_width; x++) { for(int y = 0; y < pix_height; y++) { int own_index = y * pix_width + x; h_n_own_data[own_index].i = -1; h_n_own_data[own_index].j = -1; } } } // Solid colorizer: Paints each superpixel with its average color void color_solid(pix_data* h_pix_data, const own_data* h_own_data, const spx_data* h_spx_data) { for (int x = 0; x < pix_width; x++) { for(int y = 0; y < pix_height; y++) { int pix_index = y * pix_width + x; int spx_index = h_own_data[pix_index].j * spx_width + h_own_data[pix_index].i; h_pix_data[pix_index].l = h_spx_data[spx_index].l; h_pix_data[pix_index].a = h_spx_data[spx_index].a; h_pix_data[pix_index].b = h_spx_data[spx_index].b; } } } int get_spx_id(const own_data* h_own_data, int x, int y) { int pix_index = y * pix_width + x; return h_own_data[pix_index].j * spx_width + h_own_data[pix_index].i; } // Colors the border of superpixels to make it easier to visualize them void color_borders(pix_data* h_pix_data, const own_data* h_own_data, const spx_data* h_spx_data) { for (int x = 0; x < pix_width; x++) { for(int y = 0; y < pix_height; y++) { int pix_index = y * pix_width + x; int spx_id = get_spx_id(h_own_data, x, y); bool border = false; border = border || (x == 0) || spx_id != get_spx_id(h_own_data, x-1, y); border = border || (x == pix_width-1) || spx_id != get_spx_id(h_own_data, x+1, y); border = border || (y == 0) || spx_id != get_spx_id(h_own_data, x, y-1); border = border || (y == pix_height-1) || spx_id != get_spx_id(h_own_data, x, y+1); border = border || (x == 0) || (y==0) || spx_id != get_spx_id(h_own_data, x-1, y-1); border = border || (x == 0) || (y == pix_height-1) || spx_id != get_spx_id(h_own_data, x-1, y+1); border = border || (x == pix_width-1) || (y==0) || spx_id != get_spx_id(h_own_data, x+1, y-1); border = border || (x == pix_width-1) || (y == pix_height-1) || spx_id != get_spx_id(h_own_data, x+1, y+1); if(border) { h_pix_data[pix_index].l = 0; h_pix_data[pix_index].a = 0; h_pix_data[pix_index].b = 0; } } } } double getTimestamp() { struct timeval tv; gettimeofday(&tv, NULL); return (double) tv.tv_usec/1000000.0 + tv.tv_sec; } void reportError(cudaError_t err, const char* file, int line) { if (err != cudaSuccess) { printf("%s failed at %i\n", file, line); printf("%s\n", cudaGetErrorString(err)); exit(-1); } }
82256e871e28366b4eab8859c5d580a5987e7dbb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if GOOGLE_CUDA #include <iostream> #include <stdio.h> #define EIGEN_USE_GPU #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" // CUDA: index helpers #define idx4_5(index, d1, d2, d3, d4, d5) (index % d5) #define idx4_4(index, d1, d2, d3, d4, d5) ((index / d5) % d4) #define idx4_3(index, d1, d2, d3, d4, d5) ((index / d5 / d4) % d3) #define idx4_2(index, d1, d2, d3, d4, d5) ((index / d5 / d4 / d3) % d2) #define idx4_1(index, d1, d2, d3, d4, d5) ((index / d5 / d4 / d3 / d2) %d1) // CUDA: various checks for different function calls. #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ return 1; \ } \ } while (0) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) // CUDA: use 512 threads per block const int CAFFE_CUDA_NUM_THREADS = 512; // CUDA: number of blocks for threads. inline int CAFFE_GET_BLOCKS(const int N) { // TODO rewrite this part to be consistent with tf conventions int optimal_number_of_blocks = (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; int max_number_of_blocks = 65000; return ::min(optimal_number_of_blocks, max_number_of_blocks); } #define Dtype float __global__ void RoiPooling3DKernel(const Dtype* input, const int* rois, int n_rois, int channels, int height, int width, int depth, int pooled_height, int pooled_width, int pooled_depth, Dtype* output, int* argmax_output) { int output_size = n_rois * channels * pooled_height * pooled_width * pooled_depth; CUDA_KERNEL_LOOP(index, output_size) { // (n, c, ph, pw, pd) is an element in the pooled output int pd = idx4_5(index, n_rois, channels, pooled_height, pooled_width, pooled_depth); int pw = idx4_4(index, n_rois, channels, pooled_height, pooled_width, pooled_depth); int ph = idx4_3(index, n_rois, channels, pooled_height, pooled_width, pooled_depth); int c = idx4_2(index, n_rois, channels, pooled_height, pooled_width, pooled_depth); int n = idx4_1(index, n_rois, channels, pooled_height, pooled_width, pooled_depth); auto bottom_rois_act = rois + n * 7; int roi_batch_ind = bottom_rois_act[0]; int roi_start_d = bottom_rois_act[1]; int roi_start_w = bottom_rois_act[2]; int roi_start_h = bottom_rois_act[3]; int roi_end_d = bottom_rois_act[4]; int roi_end_w = bottom_rois_act[5]; int roi_end_h = bottom_rois_act[6]; // Force malformed ROIs to be 1x1 // NOTE(maciek): roi_start, roi_end seems to be inclusive int roi_depth = max(roi_end_d - roi_start_d + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); // divide the ROIs into smaller regions for max pooling Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype bin_size_d = static_cast<Dtype>(roi_depth) / static_cast<Dtype>(pooled_depth); // compute the precise coordinates of each pooling subregion of the ROIs int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int dstart = static_cast<int>(floor(static_cast<Dtype>(pd) * bin_size_d)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); int dend = static_cast<int>(ceil(static_cast<Dtype>(pd + 1) * bin_size_d)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); dstart = min(max(dstart + roi_start_d, 0), depth); dend = min(max(dend + roi_start_d, 0), depth); //printf("%d %d %d %d %d %d %d %d\n", n, c, pw, ph, hstart, hend, wstart, wend); bool is_empty = (hend <= hstart) || (wend <= wstart) || (dend <= dstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -999999999.0; //Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; auto input_act = input + (roi_batch_ind * height * width * depth * channels); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { int bottom_index = (h * width * depth + w * depth + d) * channels + c; // bottom index is relative to 2d image only if (input_act[bottom_index] > maxval) { maxval = input_act[bottom_index]; maxidx = bottom_index; } } } } output[index] = maxval; argmax_output[index] = maxidx; } } void RoiPooling3DKernelLauncher(const float* input, const int* rois, int n_rois, int channels, int height, int width, int depth, int pooled_height, int pooled_width, int pooled_depth, Dtype* output, int* argmax_output) { int out_size = n_rois * channels * pooled_height * pooled_width * pooled_depth; hipLaunchKernelGGL(( RoiPooling3DKernel), dim3(CAFFE_GET_BLOCKS(out_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, input, rois, n_rois, channels, height, width, depth, pooled_height, pooled_width, pooled_depth, output, argmax_output); } /////////////// Grad __global__ void RoiPooling3DGradKernel(const Dtype* orig_input, const int* orig_rois, int mb_size, int n_rois, int channels, int height, int width, int depth, int pooled_height, int pooled_width, int pooled_depth, const Dtype* orig_output, const int* orig_argmax_output, const Dtype* orig_output_grad, Dtype* output) { int orig_input_size = mb_size * height * width * depth * channels; CUDA_KERNEL_LOOP(index, orig_input_size) { // (n, h, w, d, c) coords in bottom data int c = idx4_5(index, mb_size, height, width, depth, channels); int d = idx4_4(index, mb_size, height, width, depth, channels); int w = idx4_3(index, mb_size, height, width, depth, channels); int h = idx4_2(index, mb_size, height, width, depth, channels); int n = idx4_1(index, mb_size, height, width, depth, channels); Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < n_rois; ++roi_n) { const int* offset_bottom_rois = orig_rois + roi_n * 7; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_d = offset_bottom_rois[1]; int roi_start_w = offset_bottom_rois[2]; int roi_start_h = offset_bottom_rois[3]; int roi_end_d = offset_bottom_rois[4]; int roi_end_w = offset_bottom_rois[5]; int roi_end_h = offset_bottom_rois[6]; // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h && d >= roi_start_d && d <= roi_end_d); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width * pooled_depth; const Dtype* offset_top_diff = orig_output_grad + offset; const int* offset_argmax_data = orig_argmax_output + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_depth = max(roi_end_d - roi_start_d + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype bin_size_d = static_cast<Dtype>(roi_depth) / static_cast<Dtype>(pooled_depth); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); int pdstart = floor(static_cast<Dtype>(d - roi_start_d) / bin_size_d); int pdend = ceil(static_cast<Dtype>(d - roi_start_d + 1) / bin_size_d); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); pdstart = min(max(pdstart, 0), pooled_depth); pdend = min(max(pdend, 0), pooled_depth); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { if (offset_argmax_data[ph * pooled_width * pooled_depth + pw * pooled_depth + pd] == (h * width * depth + w * depth + d)) { gradient += offset_top_diff[ph * pooled_width * pooled_depth + pw * pooled_depth + pd]; } } } } } output[index] = gradient; } } void RoiPooling3DGradKernelLauncher(const Dtype* orig_input, const int* orig_rois, int mb_size, int n_rois, int channels, int height, int width, int depth, int pooled_height, int pooled_width, int pooled_depth, const Dtype* orig_output, const int* orig_argmax_output, const Dtype* orig_output_grad, Dtype* output) { int out_size = mb_size * height * width * depth * channels; hipLaunchKernelGGL(( RoiPooling3DGradKernel), dim3(CAFFE_GET_BLOCKS(out_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, orig_input, orig_rois, mb_size, n_rois, channels, height, width, depth, pooled_height, pooled_width, pooled_depth, orig_output, orig_argmax_output, orig_output_grad, output); } #endif
82256e871e28366b4eab8859c5d580a5987e7dbb.cu
#if GOOGLE_CUDA #include <iostream> #include <stdio.h> #define EIGEN_USE_GPU #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" // CUDA: index helpers #define idx4_5(index, d1, d2, d3, d4, d5) (index % d5) #define idx4_4(index, d1, d2, d3, d4, d5) ((index / d5) % d4) #define idx4_3(index, d1, d2, d3, d4, d5) ((index / d5 / d4) % d3) #define idx4_2(index, d1, d2, d3, d4, d5) ((index / d5 / d4 / d3) % d2) #define idx4_1(index, d1, d2, d3, d4, d5) ((index / d5 / d4 / d3 / d2) %d1) // CUDA: various checks for different function calls. #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ return 1; \ } \ } while (0) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) // CUDA: use 512 threads per block const int CAFFE_CUDA_NUM_THREADS = 512; // CUDA: number of blocks for threads. inline int CAFFE_GET_BLOCKS(const int N) { // TODO rewrite this part to be consistent with tf conventions int optimal_number_of_blocks = (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; int max_number_of_blocks = 65000; return std::min(optimal_number_of_blocks, max_number_of_blocks); } #define Dtype float __global__ void RoiPooling3DKernel(const Dtype* input, const int* rois, int n_rois, int channels, int height, int width, int depth, int pooled_height, int pooled_width, int pooled_depth, Dtype* output, int* argmax_output) { int output_size = n_rois * channels * pooled_height * pooled_width * pooled_depth; CUDA_KERNEL_LOOP(index, output_size) { // (n, c, ph, pw, pd) is an element in the pooled output int pd = idx4_5(index, n_rois, channels, pooled_height, pooled_width, pooled_depth); int pw = idx4_4(index, n_rois, channels, pooled_height, pooled_width, pooled_depth); int ph = idx4_3(index, n_rois, channels, pooled_height, pooled_width, pooled_depth); int c = idx4_2(index, n_rois, channels, pooled_height, pooled_width, pooled_depth); int n = idx4_1(index, n_rois, channels, pooled_height, pooled_width, pooled_depth); auto bottom_rois_act = rois + n * 7; int roi_batch_ind = bottom_rois_act[0]; int roi_start_d = bottom_rois_act[1]; int roi_start_w = bottom_rois_act[2]; int roi_start_h = bottom_rois_act[3]; int roi_end_d = bottom_rois_act[4]; int roi_end_w = bottom_rois_act[5]; int roi_end_h = bottom_rois_act[6]; // Force malformed ROIs to be 1x1 // NOTE(maciek): roi_start, roi_end seems to be inclusive int roi_depth = max(roi_end_d - roi_start_d + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); // divide the ROIs into smaller regions for max pooling Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype bin_size_d = static_cast<Dtype>(roi_depth) / static_cast<Dtype>(pooled_depth); // compute the precise coordinates of each pooling subregion of the ROIs int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int dstart = static_cast<int>(floor(static_cast<Dtype>(pd) * bin_size_d)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); int dend = static_cast<int>(ceil(static_cast<Dtype>(pd + 1) * bin_size_d)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); dstart = min(max(dstart + roi_start_d, 0), depth); dend = min(max(dend + roi_start_d, 0), depth); //printf("%d %d %d %d %d %d %d %d\n", n, c, pw, ph, hstart, hend, wstart, wend); bool is_empty = (hend <= hstart) || (wend <= wstart) || (dend <= dstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -999999999.0; //Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; auto input_act = input + (roi_batch_ind * height * width * depth * channels); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { int bottom_index = (h * width * depth + w * depth + d) * channels + c; // bottom index is relative to 2d image only if (input_act[bottom_index] > maxval) { maxval = input_act[bottom_index]; maxidx = bottom_index; } } } } output[index] = maxval; argmax_output[index] = maxidx; } } void RoiPooling3DKernelLauncher(const float* input, const int* rois, int n_rois, int channels, int height, int width, int depth, int pooled_height, int pooled_width, int pooled_depth, Dtype* output, int* argmax_output) { int out_size = n_rois * channels * pooled_height * pooled_width * pooled_depth; RoiPooling3DKernel<<<CAFFE_GET_BLOCKS(out_size), CAFFE_CUDA_NUM_THREADS>>>(input, rois, n_rois, channels, height, width, depth, pooled_height, pooled_width, pooled_depth, output, argmax_output); } /////////////// Grad __global__ void RoiPooling3DGradKernel(const Dtype* orig_input, const int* orig_rois, int mb_size, int n_rois, int channels, int height, int width, int depth, int pooled_height, int pooled_width, int pooled_depth, const Dtype* orig_output, const int* orig_argmax_output, const Dtype* orig_output_grad, Dtype* output) { int orig_input_size = mb_size * height * width * depth * channels; CUDA_KERNEL_LOOP(index, orig_input_size) { // (n, h, w, d, c) coords in bottom data int c = idx4_5(index, mb_size, height, width, depth, channels); int d = idx4_4(index, mb_size, height, width, depth, channels); int w = idx4_3(index, mb_size, height, width, depth, channels); int h = idx4_2(index, mb_size, height, width, depth, channels); int n = idx4_1(index, mb_size, height, width, depth, channels); Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < n_rois; ++roi_n) { const int* offset_bottom_rois = orig_rois + roi_n * 7; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_d = offset_bottom_rois[1]; int roi_start_w = offset_bottom_rois[2]; int roi_start_h = offset_bottom_rois[3]; int roi_end_d = offset_bottom_rois[4]; int roi_end_w = offset_bottom_rois[5]; int roi_end_h = offset_bottom_rois[6]; // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h && d >= roi_start_d && d <= roi_end_d); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width * pooled_depth; const Dtype* offset_top_diff = orig_output_grad + offset; const int* offset_argmax_data = orig_argmax_output + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_depth = max(roi_end_d - roi_start_d + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype bin_size_d = static_cast<Dtype>(roi_depth) / static_cast<Dtype>(pooled_depth); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); int pdstart = floor(static_cast<Dtype>(d - roi_start_d) / bin_size_d); int pdend = ceil(static_cast<Dtype>(d - roi_start_d + 1) / bin_size_d); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); pdstart = min(max(pdstart, 0), pooled_depth); pdend = min(max(pdend, 0), pooled_depth); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { if (offset_argmax_data[ph * pooled_width * pooled_depth + pw * pooled_depth + pd] == (h * width * depth + w * depth + d)) { gradient += offset_top_diff[ph * pooled_width * pooled_depth + pw * pooled_depth + pd]; } } } } } output[index] = gradient; } } void RoiPooling3DGradKernelLauncher(const Dtype* orig_input, const int* orig_rois, int mb_size, int n_rois, int channels, int height, int width, int depth, int pooled_height, int pooled_width, int pooled_depth, const Dtype* orig_output, const int* orig_argmax_output, const Dtype* orig_output_grad, Dtype* output) { int out_size = mb_size * height * width * depth * channels; RoiPooling3DGradKernel<<<CAFFE_GET_BLOCKS(out_size), CAFFE_CUDA_NUM_THREADS>>>(orig_input, orig_rois, mb_size, n_rois, channels, height, width, depth, pooled_height, pooled_width, pooled_depth, orig_output, orig_argmax_output, orig_output_grad, output); } #endif
c0d9adeb1015312fb642bc8b3dc22f3dbb4f2502.hip
// !!! This is a file automatically generated by hipify!!! #include "head.h" //variable for cublas hipblasStatus_t stat; hipblasHandle_t handle=0; float * A; float * d_A; float * b; float * d_b; float * C; float * d_C; float * x; float * d_x; float alpha, beta; #define batchsize 1 int * PivotA; int * infoA; int * h_PivotA; int * h_infoA; float ** h_Apoint; float ** d_Apoint; float ** h_Cpoint; float ** d_Cpoint; void Allocate_Memory(){ hipError_t Error; A = (float *)malloc(n*n*sizeof(float)); b = (float *)malloc(n*sizeof(float)); x = (float *)malloc(n*sizeof(float)); C = (float *)malloc(n*n*sizeof(float)); Error = hipMalloc((void **)&d_A, n*n*sizeof(float)); printf("CUDA error(malloc d_A) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void **)&d_b, n*sizeof(float)); printf("CUDA error(malloc d_b) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void **)&d_x, n*sizeof(float)); printf("CUDA error(malloc d_x) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void **)&d_C, n*n*sizeof(float)); printf("CUDA error(malloc d_C) = %s\n",hipGetErrorString(Error)); h_PivotA = (int *)malloc(n*batchsize*sizeof(int)); h_infoA = (int *)malloc(batchsize*sizeof(int)); Error = hipMalloc((void **)&PivotA, n*batchsize*sizeof(int)); printf("CUDA error(malloc PivotA) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void **)&infoA, batchsize*sizeof(int)); printf("CUDA error(malloc infoA) = %s\n",hipGetErrorString(Error)); h_Apoint = (float **)malloc(batchsize*sizeof(float*)); Error = hipMalloc((void **)&d_Apoint, batchsize*sizeof(float*)); printf("CUDA error(malloc d_Apoint) = %s\n",hipGetErrorString(Error)); h_Cpoint = (float **)malloc(batchsize*sizeof(float*)); Error = hipMalloc((void **)&d_Cpoint, batchsize*sizeof(float*)); printf("CUDA error(malloc d_Cpoint) = %s\n",hipGetErrorString(Error)); } void Init(){ int i, j; for(j=0;j<n;j++){ for(i=0;i<n;i++){ A[i+j*n] = int(rand()%10); } } for(i=0;i<n;i++){ b[i] = int(rand()%10); x[i] = 0.0; } for(i=0;i<n*n;i++){ C[i] = 0.0; } alpha = 1.0, beta = 0.0; for(i=0;i<batchsize;i++){ h_Apoint[0] = d_A + i*n*n; h_Cpoint[0] = (float *)((char*)d_C+i*((size_t)n*n)*sizeof(float)); } } void Send_To_Device(){ hipError_t Error; Error = hipMemcpy(d_A, A, n*n*sizeof(float), hipMemcpyHostToDevice); printf("CUDA error(memcpy A) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(d_b, b, n*sizeof(float), hipMemcpyHostToDevice); printf("CUDA error(memcpy b) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(d_C, C, n*n*sizeof(float), hipMemcpyHostToDevice); printf("CUDA error(memcpy C) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(d_Apoint, h_Apoint, batchsize*sizeof(float*), hipMemcpyHostToDevice); printf("CUDA error(memcpy Apoint) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(d_Cpoint, h_Cpoint, batchsize*sizeof(float*), hipMemcpyHostToDevice); printf("CUDA error(memcpy Cpoint) = %s\n",hipGetErrorString(Error)); } void Call_GPUFunction(){ stat = hipblasCreate(&handle); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("CUBLAS initialization failed\n"); } hipblasSgetrfBatched( handle, n, d_Apoint, n, PivotA, infoA, batchsize); hipblasSgetriBatched( handle, n, (const float **)d_Apoint, n, PivotA, d_Cpoint, n, infoA, batchsize); hipblasSgemv( handle, HIPBLAS_OP_N, n, n, &alpha, d_C, n, d_b, 1, &beta, d_x, 1); hipDeviceSynchronize(); } void Send_To_Host(){ hipError_t Error; Error = hipMemcpy(x, d_x, n*sizeof(float), hipMemcpyDeviceToHost); printf("CUDA error(memcpy d_x->x) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(C, d_C, n*n*sizeof(float), hipMemcpyDeviceToHost); printf("CUDA error(memcpy d_C->C) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(A, d_A, n*n*sizeof(float), hipMemcpyDeviceToHost); printf("CUDA error(memcpy d_A->A) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(h_PivotA, PivotA, n*batchsize*sizeof(int), hipMemcpyDeviceToHost); printf("CUDA error(memcpy PivotA) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(h_infoA, infoA, batchsize*sizeof(int), hipMemcpyDeviceToHost); printf("CUDA error(memcpy infoA) = %s\n",hipGetErrorString(Error)); } void Free_Memory(){ if (A) free(A); if (d_A) hipFree(d_A); if (b) free(b); if (d_b) hipFree(d_b); if (x) free(x); if (d_x) hipFree(d_x); if (C) free(C); if (d_C) hipFree(d_C); if (h_PivotA) free(h_PivotA); if (PivotA) hipFree(PivotA); if (h_infoA) free(h_infoA); if (infoA) hipFree(infoA); if (h_Apoint) free(h_Apoint); if (d_Apoint) hipFree(d_Apoint); if (h_Cpoint) free(h_Cpoint); if (d_Cpoint) hipFree(d_Cpoint); if (handle) hipblasDestroy(handle); } void Save_Result() { FILE *pFile; int i; // Save the matrix pFile = fopen("x.txt","w"); // Save the vector x for (i = 0; i < n; i++) { fprintf(pFile, "%g\n", x[i]); } fclose(pFile); }
c0d9adeb1015312fb642bc8b3dc22f3dbb4f2502.cu
#include "head.h" //variable for cublas cublasStatus_t stat; cublasHandle_t handle=0; float * A; float * d_A; float * b; float * d_b; float * C; float * d_C; float * x; float * d_x; float alpha, beta; #define batchsize 1 int * PivotA; int * infoA; int * h_PivotA; int * h_infoA; float ** h_Apoint; float ** d_Apoint; float ** h_Cpoint; float ** d_Cpoint; void Allocate_Memory(){ cudaError_t Error; A = (float *)malloc(n*n*sizeof(float)); b = (float *)malloc(n*sizeof(float)); x = (float *)malloc(n*sizeof(float)); C = (float *)malloc(n*n*sizeof(float)); Error = cudaMalloc((void **)&d_A, n*n*sizeof(float)); printf("CUDA error(malloc d_A) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void **)&d_b, n*sizeof(float)); printf("CUDA error(malloc d_b) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void **)&d_x, n*sizeof(float)); printf("CUDA error(malloc d_x) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void **)&d_C, n*n*sizeof(float)); printf("CUDA error(malloc d_C) = %s\n",cudaGetErrorString(Error)); h_PivotA = (int *)malloc(n*batchsize*sizeof(int)); h_infoA = (int *)malloc(batchsize*sizeof(int)); Error = cudaMalloc((void **)&PivotA, n*batchsize*sizeof(int)); printf("CUDA error(malloc PivotA) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void **)&infoA, batchsize*sizeof(int)); printf("CUDA error(malloc infoA) = %s\n",cudaGetErrorString(Error)); h_Apoint = (float **)malloc(batchsize*sizeof(float*)); Error = cudaMalloc((void **)&d_Apoint, batchsize*sizeof(float*)); printf("CUDA error(malloc d_Apoint) = %s\n",cudaGetErrorString(Error)); h_Cpoint = (float **)malloc(batchsize*sizeof(float*)); Error = cudaMalloc((void **)&d_Cpoint, batchsize*sizeof(float*)); printf("CUDA error(malloc d_Cpoint) = %s\n",cudaGetErrorString(Error)); } void Init(){ int i, j; for(j=0;j<n;j++){ for(i=0;i<n;i++){ A[i+j*n] = int(rand()%10); } } for(i=0;i<n;i++){ b[i] = int(rand()%10); x[i] = 0.0; } for(i=0;i<n*n;i++){ C[i] = 0.0; } alpha = 1.0, beta = 0.0; for(i=0;i<batchsize;i++){ h_Apoint[0] = d_A + i*n*n; h_Cpoint[0] = (float *)((char*)d_C+i*((size_t)n*n)*sizeof(float)); } } void Send_To_Device(){ cudaError_t Error; Error = cudaMemcpy(d_A, A, n*n*sizeof(float), cudaMemcpyHostToDevice); printf("CUDA error(memcpy A) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(d_b, b, n*sizeof(float), cudaMemcpyHostToDevice); printf("CUDA error(memcpy b) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(d_C, C, n*n*sizeof(float), cudaMemcpyHostToDevice); printf("CUDA error(memcpy C) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(d_Apoint, h_Apoint, batchsize*sizeof(float*), cudaMemcpyHostToDevice); printf("CUDA error(memcpy Apoint) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(d_Cpoint, h_Cpoint, batchsize*sizeof(float*), cudaMemcpyHostToDevice); printf("CUDA error(memcpy Cpoint) = %s\n",cudaGetErrorString(Error)); } void Call_GPUFunction(){ stat = cublasCreate(&handle); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("CUBLAS initialization failed\n"); } cublasSgetrfBatched( handle, n, d_Apoint, n, PivotA, infoA, batchsize); cublasSgetriBatched( handle, n, (const float **)d_Apoint, n, PivotA, d_Cpoint, n, infoA, batchsize); cublasSgemv( handle, CUBLAS_OP_N, n, n, &alpha, d_C, n, d_b, 1, &beta, d_x, 1); cudaDeviceSynchronize(); } void Send_To_Host(){ cudaError_t Error; Error = cudaMemcpy(x, d_x, n*sizeof(float), cudaMemcpyDeviceToHost); printf("CUDA error(memcpy d_x->x) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(C, d_C, n*n*sizeof(float), cudaMemcpyDeviceToHost); printf("CUDA error(memcpy d_C->C) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(A, d_A, n*n*sizeof(float), cudaMemcpyDeviceToHost); printf("CUDA error(memcpy d_A->A) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(h_PivotA, PivotA, n*batchsize*sizeof(int), cudaMemcpyDeviceToHost); printf("CUDA error(memcpy PivotA) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(h_infoA, infoA, batchsize*sizeof(int), cudaMemcpyDeviceToHost); printf("CUDA error(memcpy infoA) = %s\n",cudaGetErrorString(Error)); } void Free_Memory(){ if (A) free(A); if (d_A) cudaFree(d_A); if (b) free(b); if (d_b) cudaFree(d_b); if (x) free(x); if (d_x) cudaFree(d_x); if (C) free(C); if (d_C) cudaFree(d_C); if (h_PivotA) free(h_PivotA); if (PivotA) cudaFree(PivotA); if (h_infoA) free(h_infoA); if (infoA) cudaFree(infoA); if (h_Apoint) free(h_Apoint); if (d_Apoint) cudaFree(d_Apoint); if (h_Cpoint) free(h_Cpoint); if (d_Cpoint) cudaFree(d_Cpoint); if (handle) cublasDestroy(handle); } void Save_Result() { FILE *pFile; int i; // Save the matrix pFile = fopen("x.txt","w"); // Save the vector x for (i = 0; i < n; i++) { fprintf(pFile, "%g\n", x[i]); } fclose(pFile); }
ead344054be73be59da05ed46b45325f10de69c0.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <Eigen/Core> #include <Eigen/Dense> #include <tdp/data/image.h> #include <tdp/eigen/dense.h> #include <tdp/cuda/cuda.h> #include <tdp/camera/camera_base.h> #include <tdp/camera/camera.h> #include <tdp/camera/camera_poly.h> namespace tdp { // populate rays given a pinhole camera parameterization template<int D, typename Derived> __global__ void KernelRay( CameraBase<float,D,Derived> cam, Image<Vector3fda> ray) { const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int idy = threadIdx.y + blockDim.y * blockIdx.y; if (idx < ray.w_ && idy < ray.h_) { ray(idx, idy) = cam.Unproject(idx,idy,1.); } } template<int D, typename Derived> void ComputeCameraRays( const CameraBase<float,D,Derived>& cam, Image<Vector3fda>& ray ) { dim3 threads, blocks; ComputeKernelParamsForImage(blocks,threads,ray,32,32); hipLaunchKernelGGL(( KernelRay<D,Derived>), dim3(blocks),dim3(threads), 0, 0, cam,ray); checkCudaErrors(hipDeviceSynchronize()); } template void ComputeCameraRays( const BaseCameraf& cam, Image<Vector3fda>& ray ); template void ComputeCameraRays( const BaseCameraPoly3f& cam, Image<Vector3fda>& ray ); }
ead344054be73be59da05ed46b45325f10de69c0.cu
#include <cuda.h> #include <Eigen/Core> #include <Eigen/Dense> #include <tdp/data/image.h> #include <tdp/eigen/dense.h> #include <tdp/cuda/cuda.h> #include <tdp/camera/camera_base.h> #include <tdp/camera/camera.h> #include <tdp/camera/camera_poly.h> namespace tdp { // populate rays given a pinhole camera parameterization template<int D, typename Derived> __global__ void KernelRay( CameraBase<float,D,Derived> cam, Image<Vector3fda> ray) { const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int idy = threadIdx.y + blockDim.y * blockIdx.y; if (idx < ray.w_ && idy < ray.h_) { ray(idx, idy) = cam.Unproject(idx,idy,1.); } } template<int D, typename Derived> void ComputeCameraRays( const CameraBase<float,D,Derived>& cam, Image<Vector3fda>& ray ) { dim3 threads, blocks; ComputeKernelParamsForImage(blocks,threads,ray,32,32); KernelRay<D,Derived><<<blocks,threads>>>(cam,ray); checkCudaErrors(cudaDeviceSynchronize()); } template void ComputeCameraRays( const BaseCameraf& cam, Image<Vector3fda>& ray ); template void ComputeCameraRays( const BaseCameraPoly3f& cam, Image<Vector3fda>& ray ); }
642bc35dbf32acfc748d2797fa08f6a3c50c3491.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include "common.h" #include "cuda_common.cuh" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define BDIMX 64 #define BDIMY 8 #define IPAD 2 __global__ void transpose_read_raw_write_column_benchmark(int * mat, int* transpose, int nx, int ny) { int ix = blockDim.x * blockIdx.x + threadIdx.x; int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { //read by row, write by col transpose[ix * ny + iy] = mat[iy * nx + ix]; } } __global__ void transpose_smem(int * in, int* out, int nx, int ny) { __shared__ int tile[BDIMY][BDIMX]; //input index int ix, iy, in_index; //output index int i_row, i_col, _1d_index, out_ix, out_iy, out_index; //ix and iy calculation for input index ix = blockDim.x * blockIdx.x + threadIdx.x; iy = blockDim.y * blockIdx.y + threadIdx.y; //input index in_index = iy * nx + ix; //1D index calculation fro shared memory _1d_index = threadIdx.y * blockDim.x + threadIdx.x; //col major row and col index calcuation i_row = _1d_index / blockDim.y; i_col = _1d_index % blockDim.y; //coordinate for transpose matrix out_ix = blockIdx.y * blockDim.y + i_col; out_iy = blockIdx.x * blockDim.x + i_row; //output array access in row major format out_index = out_iy * ny + out_ix; if (ix < nx && iy < ny) { //load from in array in row major and store to shared memory in row major tile[threadIdx.y][threadIdx.x] = in[in_index]; //wait untill all the threads load the values __syncthreads(); out[out_index] = tile[i_col][i_row]; } } __global__ void transpose_smem_pad(int * in, int* out, int nx, int ny) { __shared__ int tile[BDIMY][BDIMX + IPAD]; //input index int ix, iy, in_index; //output index int i_row, i_col, _1d_index, out_ix, out_iy, out_index; //ix and iy calculation for input index ix = blockDim.x * blockIdx.x + threadIdx.x; iy = blockDim.y * blockIdx.y + threadIdx.y; //input index in_index = iy * nx + ix; //1D index calculation fro shared memory _1d_index = threadIdx.y * blockDim.x + threadIdx.x; //col major row and col index calcuation i_row = _1d_index / blockDim.y; i_col = _1d_index % blockDim.y; //coordinate for transpose matrix out_ix = blockIdx.y * blockDim.y + i_col; out_iy = blockIdx.x * blockDim.x + i_row; //output array access in row major format out_index = out_iy * ny + out_ix; if (ix < nx && iy < ny) { //load from in array in row major and store to shared memory in row major tile[threadIdx.y][threadIdx.x] = in[in_index]; //wait untill all the threads load the values __syncthreads(); out[out_index] = tile[i_col][i_row]; } } __global__ void transpose_smem_pad_unrolling(int * in, int* out, int nx, int ny) { __shared__ int tile[BDIMY * (2 * BDIMX + IPAD)]; //input index int ix, iy, in_index; //output index int i_row, i_col, _1d_index, out_ix, out_iy, out_index; //ix and iy calculation for input index ix = 2 * blockDim.x * blockIdx.x + threadIdx.x; iy = blockDim.y * blockIdx.y + threadIdx.y; //input index in_index = iy * nx + ix; //1D index calculation fro shared memory _1d_index = threadIdx.y * blockDim.x + threadIdx.x; //col major row and col index calcuation i_row = _1d_index / blockDim.y; i_col = _1d_index % blockDim.y; //coordinate for transpose matrix out_ix = blockIdx.y * blockDim.y + i_col; out_iy = 2 * blockIdx.x * blockDim.x + i_row; //output array access in row major format out_index = out_iy * ny + out_ix; if (ix < nx && iy < ny) { int row_idx = threadIdx.y * (2 * blockDim.x + IPAD) + threadIdx.x; //load from in array in row major and store to shared memory in row major tile[row_idx] = in[in_index]; tile[row_idx+ BDIMX] = in[in_index + BDIMX]; //wait untill all the threads load the values __syncthreads(); int col_idx = i_col * (2 * blockDim.x + IPAD) + i_row; out[out_index] = tile[col_idx]; out[out_index + ny* BDIMX] = tile[col_idx + BDIMX]; } } //int main(int argc, char** argv) //{ // //default values for variabless // int nx = 1024; // int ny = 1024; // int block_x = BDIMX; // int block_y = BDIMY; // int kernel_num = 0; // // //set the variable based on arguments // if (argc > 1) // nx = 1 << atoi(argv[1]); // if (argc > 2) // ny = 1 << atoi(argv[2]); // if (argc > 3) // block_x = 1 << atoi(argv[3]); // if (argc > 4) // block_y = 1 <<atoi(argv[4]); // // int size = nx * ny; // int byte_size = sizeof(int*) * size; // // printf("Matrix transpose for %d X % d matrix with block size %d X %d \n",nx,ny,block_x,block_y); // // int * h_mat_array = (int*)malloc(byte_size); // int * h_trans_array = (int*)malloc(byte_size); // int * h_ref = (int*)malloc(byte_size); // // initialize(h_mat_array,size ,INIT_ONE_TO_TEN); // // //matirx transpose in CPU // mat_transpose_cpu(h_mat_array, h_trans_array, nx, ny); // // int * d_mat_array, *d_trans_array; // // gpuErrchk(hipMalloc((void**)&d_mat_array, byte_size)); // gpuErrchk(hipMalloc((void**)&d_trans_array, byte_size)); // // gpuErrchk(hipMemcpy(d_mat_array, h_mat_array, byte_size, hipMemcpyHostToDevice)); // gpuErrchk(hipMemset(d_trans_array, 0, byte_size)); // // dim3 blocks(block_x, block_y); // dim3 grid(nx/block_x, ny/block_y); // // printf("Launching smem kernel \n"); // transpose_smem <<< grid, blocks>> > (d_mat_array,d_trans_array,nx, ny); // gpuErrchk(hipDeviceSynchronize()); // // gpuErrchk(hipMemcpy(h_ref, d_trans_array, byte_size, hipMemcpyDeviceToHost)); // compare_arrays(h_ref, h_trans_array,size); // // printf("Launching benchmark kernel \n"); // hipMemset(d_trans_array,0, byte_size); // transpose_read_raw_write_column_benchmark << < grid, blocks >> > (d_mat_array, d_trans_array, nx, ny); // gpuErrchk(hipDeviceSynchronize()); // // gpuErrchk(hipMemcpy(h_ref, d_trans_array, byte_size, hipMemcpyDeviceToHost)); // compare_arrays(h_ref, h_trans_array, size); // // printf("Launching smem padding kernel \n"); // hipMemset(d_trans_array, 0, byte_size); // transpose_smem_pad << < grid, blocks >> > (d_mat_array, d_trans_array, nx, ny); // gpuErrchk(hipDeviceSynchronize()); // // gpuErrchk(hipMemcpy(h_ref, d_trans_array, byte_size, hipMemcpyDeviceToHost)); // compare_arrays(h_ref, h_trans_array, size); // // printf("Launching smem padding and unrolling kernel \n"); // hipMemset(d_trans_array, 0, byte_size); // // grid.x = grid.x / 2; // // transpose_smem_pad_unrolling << < grid, blocks >> > (d_mat_array, d_trans_array, nx, ny); // gpuErrchk(hipDeviceSynchronize()); // // gpuErrchk(hipMemcpy(h_ref, d_trans_array, byte_size, hipMemcpyDeviceToHost)); // compare_arrays(h_ref, h_trans_array, size); // // hipFree(d_trans_array); // hipFree(d_mat_array); // free(h_ref); // free(h_trans_array); // free(h_mat_array); // // gpuErrchk(hipDeviceReset()); // return EXIT_SUCCESS; //}
642bc35dbf32acfc748d2797fa08f6a3c50c3491.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "common.h" #include "cuda_common.cuh" #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #define BDIMX 64 #define BDIMY 8 #define IPAD 2 __global__ void transpose_read_raw_write_column_benchmark(int * mat, int* transpose, int nx, int ny) { int ix = blockDim.x * blockIdx.x + threadIdx.x; int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { //read by row, write by col transpose[ix * ny + iy] = mat[iy * nx + ix]; } } __global__ void transpose_smem(int * in, int* out, int nx, int ny) { __shared__ int tile[BDIMY][BDIMX]; //input index int ix, iy, in_index; //output index int i_row, i_col, _1d_index, out_ix, out_iy, out_index; //ix and iy calculation for input index ix = blockDim.x * blockIdx.x + threadIdx.x; iy = blockDim.y * blockIdx.y + threadIdx.y; //input index in_index = iy * nx + ix; //1D index calculation fro shared memory _1d_index = threadIdx.y * blockDim.x + threadIdx.x; //col major row and col index calcuation i_row = _1d_index / blockDim.y; i_col = _1d_index % blockDim.y; //coordinate for transpose matrix out_ix = blockIdx.y * blockDim.y + i_col; out_iy = blockIdx.x * blockDim.x + i_row; //output array access in row major format out_index = out_iy * ny + out_ix; if (ix < nx && iy < ny) { //load from in array in row major and store to shared memory in row major tile[threadIdx.y][threadIdx.x] = in[in_index]; //wait untill all the threads load the values __syncthreads(); out[out_index] = tile[i_col][i_row]; } } __global__ void transpose_smem_pad(int * in, int* out, int nx, int ny) { __shared__ int tile[BDIMY][BDIMX + IPAD]; //input index int ix, iy, in_index; //output index int i_row, i_col, _1d_index, out_ix, out_iy, out_index; //ix and iy calculation for input index ix = blockDim.x * blockIdx.x + threadIdx.x; iy = blockDim.y * blockIdx.y + threadIdx.y; //input index in_index = iy * nx + ix; //1D index calculation fro shared memory _1d_index = threadIdx.y * blockDim.x + threadIdx.x; //col major row and col index calcuation i_row = _1d_index / blockDim.y; i_col = _1d_index % blockDim.y; //coordinate for transpose matrix out_ix = blockIdx.y * blockDim.y + i_col; out_iy = blockIdx.x * blockDim.x + i_row; //output array access in row major format out_index = out_iy * ny + out_ix; if (ix < nx && iy < ny) { //load from in array in row major and store to shared memory in row major tile[threadIdx.y][threadIdx.x] = in[in_index]; //wait untill all the threads load the values __syncthreads(); out[out_index] = tile[i_col][i_row]; } } __global__ void transpose_smem_pad_unrolling(int * in, int* out, int nx, int ny) { __shared__ int tile[BDIMY * (2 * BDIMX + IPAD)]; //input index int ix, iy, in_index; //output index int i_row, i_col, _1d_index, out_ix, out_iy, out_index; //ix and iy calculation for input index ix = 2 * blockDim.x * blockIdx.x + threadIdx.x; iy = blockDim.y * blockIdx.y + threadIdx.y; //input index in_index = iy * nx + ix; //1D index calculation fro shared memory _1d_index = threadIdx.y * blockDim.x + threadIdx.x; //col major row and col index calcuation i_row = _1d_index / blockDim.y; i_col = _1d_index % blockDim.y; //coordinate for transpose matrix out_ix = blockIdx.y * blockDim.y + i_col; out_iy = 2 * blockIdx.x * blockDim.x + i_row; //output array access in row major format out_index = out_iy * ny + out_ix; if (ix < nx && iy < ny) { int row_idx = threadIdx.y * (2 * blockDim.x + IPAD) + threadIdx.x; //load from in array in row major and store to shared memory in row major tile[row_idx] = in[in_index]; tile[row_idx+ BDIMX] = in[in_index + BDIMX]; //wait untill all the threads load the values __syncthreads(); int col_idx = i_col * (2 * blockDim.x + IPAD) + i_row; out[out_index] = tile[col_idx]; out[out_index + ny* BDIMX] = tile[col_idx + BDIMX]; } } //int main(int argc, char** argv) //{ // //default values for variabless // int nx = 1024; // int ny = 1024; // int block_x = BDIMX; // int block_y = BDIMY; // int kernel_num = 0; // // //set the variable based on arguments // if (argc > 1) // nx = 1 << atoi(argv[1]); // if (argc > 2) // ny = 1 << atoi(argv[2]); // if (argc > 3) // block_x = 1 << atoi(argv[3]); // if (argc > 4) // block_y = 1 <<atoi(argv[4]); // // int size = nx * ny; // int byte_size = sizeof(int*) * size; // // printf("Matrix transpose for %d X % d matrix with block size %d X %d \n",nx,ny,block_x,block_y); // // int * h_mat_array = (int*)malloc(byte_size); // int * h_trans_array = (int*)malloc(byte_size); // int * h_ref = (int*)malloc(byte_size); // // initialize(h_mat_array,size ,INIT_ONE_TO_TEN); // // //matirx transpose in CPU // mat_transpose_cpu(h_mat_array, h_trans_array, nx, ny); // // int * d_mat_array, *d_trans_array; // // gpuErrchk(cudaMalloc((void**)&d_mat_array, byte_size)); // gpuErrchk(cudaMalloc((void**)&d_trans_array, byte_size)); // // gpuErrchk(cudaMemcpy(d_mat_array, h_mat_array, byte_size, cudaMemcpyHostToDevice)); // gpuErrchk(cudaMemset(d_trans_array, 0, byte_size)); // // dim3 blocks(block_x, block_y); // dim3 grid(nx/block_x, ny/block_y); // // printf("Launching smem kernel \n"); // transpose_smem <<< grid, blocks>> > (d_mat_array,d_trans_array,nx, ny); // gpuErrchk(cudaDeviceSynchronize()); // // gpuErrchk(cudaMemcpy(h_ref, d_trans_array, byte_size, cudaMemcpyDeviceToHost)); // compare_arrays(h_ref, h_trans_array,size); // // printf("Launching benchmark kernel \n"); // cudaMemset(d_trans_array,0, byte_size); // transpose_read_raw_write_column_benchmark << < grid, blocks >> > (d_mat_array, d_trans_array, nx, ny); // gpuErrchk(cudaDeviceSynchronize()); // // gpuErrchk(cudaMemcpy(h_ref, d_trans_array, byte_size, cudaMemcpyDeviceToHost)); // compare_arrays(h_ref, h_trans_array, size); // // printf("Launching smem padding kernel \n"); // cudaMemset(d_trans_array, 0, byte_size); // transpose_smem_pad << < grid, blocks >> > (d_mat_array, d_trans_array, nx, ny); // gpuErrchk(cudaDeviceSynchronize()); // // gpuErrchk(cudaMemcpy(h_ref, d_trans_array, byte_size, cudaMemcpyDeviceToHost)); // compare_arrays(h_ref, h_trans_array, size); // // printf("Launching smem padding and unrolling kernel \n"); // cudaMemset(d_trans_array, 0, byte_size); // // grid.x = grid.x / 2; // // transpose_smem_pad_unrolling << < grid, blocks >> > (d_mat_array, d_trans_array, nx, ny); // gpuErrchk(cudaDeviceSynchronize()); // // gpuErrchk(cudaMemcpy(h_ref, d_trans_array, byte_size, cudaMemcpyDeviceToHost)); // compare_arrays(h_ref, h_trans_array, size); // // cudaFree(d_trans_array); // cudaFree(d_mat_array); // free(h_ref); // free(h_trans_array); // free(h_mat_array); // // gpuErrchk(cudaDeviceReset()); // return EXIT_SUCCESS; //}
4f7ad7dbb7b97f8100a0246f2bf5fbf3a05daaf2.hip
// !!! This is a file automatically generated by hipify!!! /** Sample for Mobile CUDA Simple Adding Vectors Application. Authoer @ Taichirou Suzuki **/ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <unistd.h> #include <sys/wait.h> #include <sys/time.h> __global__ void ___add(float* a,float* b,unsigned long size){ unsigned int _x = blockDim.x * blockIdx.x + threadIdx.x; a[_x] += b[_x]; } static float elapsed(struct timeval tv0,struct timeval tv1){ return (float)(tv1.tv_sec - tv0.tv_sec) + (float)(tv1.tv_usec - tv0.tv_usec) * 0.000001f; } int main(void){ struct timeval t0,t1; gettimeofday(&t0,NULL); unsigned long _hen = 16000; // unsigned long size = _hen * _hen; unsigned long size = 16000*16000; printf("gyouretu size : %lu\n",size); int numOfLaunchKernel = 1; hipSetDevice(0); float* d_a = NULL; float* d_b = NULL; // float* d_c = NULL; hipMalloc((void**)&d_a,sizeof(float)*size); hipMalloc((void**)&d_b,sizeof(float)*size); float* h_a = NULL; float* h_b = NULL; hipError_t res; res = hipHostMalloc((void **)&h_a,sizeof(float)*size,0); printf("hipHostMalloc : %d\n",res); res = hipHostMalloc((void **)&h_b,sizeof(float)*size,0); printf("hipHostMalloc : %d\n",res); printf("This Sample Application Uses %d[Mbyte] per vector.(Total : %d[Mbyte])\n",sizeof(float)*size >> 20,sizeof(float)*size*2 >> 20); for(int i = 0 ; i < size ; i ++){ h_a[i] = 0.0f; h_b[i] = 1.0f; } int ite = 190; int _size = 10; // dim3 threads(_size,_size,1); // dim3 grid(_hen/_size,_hen/_size,1); dim3 threads(10,1,1); dim3 grid(size,1,1); printf("SIZE : %d\n",size); for(int j = 0 ; j < ite ; j ++){ hipMemcpy(d_a,h_a,sizeof(float)*size,hipMemcpyHostToDevice); hipMemcpy(d_b,h_b,sizeof(float)*size,hipMemcpyHostToDevice); // hipLaunchKernelGGL(( ___add), dim3(grid),dim3(threads), 0, 0, d_a,d_b,_hen); hipLaunchKernelGGL(( ___add), dim3(grid),dim3(threads), 0, 0, d_a,d_b,1); hipMemcpy(h_a,d_a,sizeof(float)*size,hipMemcpyDeviceToHost); printf("Add[%d]\n",j); printf("\tH_A[0] : %d\n",h_a[0]); sleep(5); } int pass = 1; for(int i = 0 ; i < size ; i ++){ // printf("H_A[%d] : %d\n",i,h_a[i]); } if(pass){ printf(">Result TEST : PASS\n"); }else{ printf(">Result TEST : FAILED\n"); } hipFree(d_a); hipFree(d_b); // hipFree(d_c); free(h_a); free(h_b); // hipHostFree(h_a); // hipHostFree(h_b); // free(h_c); printf("Application Closed...\n"); gettimeofday(&t1,NULL); printf("My RESULT : %f\n",elapsed(t0,t1)); return 0; }
4f7ad7dbb7b97f8100a0246f2bf5fbf3a05daaf2.cu
/** Sample for Mobile CUDA Simple Adding Vectors Application. Authoer @ Taichirou Suzuki **/ #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <unistd.h> #include <sys/wait.h> #include <sys/time.h> __global__ void ___add(float* a,float* b,unsigned long size){ unsigned int _x = blockDim.x * blockIdx.x + threadIdx.x; a[_x] += b[_x]; } static float elapsed(struct timeval tv0,struct timeval tv1){ return (float)(tv1.tv_sec - tv0.tv_sec) + (float)(tv1.tv_usec - tv0.tv_usec) * 0.000001f; } int main(void){ struct timeval t0,t1; gettimeofday(&t0,NULL); unsigned long _hen = 16000; // unsigned long size = _hen * _hen; unsigned long size = 16000*16000; printf("gyouretu size : %lu\n",size); int numOfLaunchKernel = 1; cudaSetDevice(0); float* d_a = NULL; float* d_b = NULL; // float* d_c = NULL; cudaMalloc((void**)&d_a,sizeof(float)*size); cudaMalloc((void**)&d_b,sizeof(float)*size); float* h_a = NULL; float* h_b = NULL; cudaError_t res; res = cudaHostAlloc((void **)&h_a,sizeof(float)*size,0); printf("cudaHostAlloc : %d\n",res); res = cudaHostAlloc((void **)&h_b,sizeof(float)*size,0); printf("cudaHostAlloc : %d\n",res); printf("This Sample Application Uses %d[Mbyte] per vector.(Total : %d[Mbyte])\n",sizeof(float)*size >> 20,sizeof(float)*size*2 >> 20); for(int i = 0 ; i < size ; i ++){ h_a[i] = 0.0f; h_b[i] = 1.0f; } int ite = 190; int _size = 10; // dim3 threads(_size,_size,1); // dim3 grid(_hen/_size,_hen/_size,1); dim3 threads(10,1,1); dim3 grid(size,1,1); printf("SIZE : %d\n",size); for(int j = 0 ; j < ite ; j ++){ cudaMemcpy(d_a,h_a,sizeof(float)*size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,sizeof(float)*size,cudaMemcpyHostToDevice); // ___add<<<grid,threads>>>(d_a,d_b,_hen); ___add<<<grid,threads>>>(d_a,d_b,1); cudaMemcpy(h_a,d_a,sizeof(float)*size,cudaMemcpyDeviceToHost); printf("Add[%d]\n",j); printf("\tH_A[0] : %d\n",h_a[0]); sleep(5); } int pass = 1; for(int i = 0 ; i < size ; i ++){ // printf("H_A[%d] : %d\n",i,h_a[i]); } if(pass){ printf(">Result TEST : PASS\n"); }else{ printf(">Result TEST : FAILED\n"); } cudaFree(d_a); cudaFree(d_b); // cudaFree(d_c); free(h_a); free(h_b); // cudaFreeHost(h_a); // cudaFreeHost(h_b); // free(h_c); printf("Application Closed...\n"); gettimeofday(&t1,NULL); printf("My RESULT : %f\n",elapsed(t0,t1)); return 0; }
f17ef1daed85f04069f06fe780776403c5cf5d87.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #include "caffe/mpitask.hpp" namespace caffe { template <typename Dtype> void ScatterLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { hipDeviceSynchronize(); for (int i = 0; i < bottom.size(); ++i) { MPI_Scatter(bottom[i]->gpu_data(),top[i]->count(), MPI_FLOAT,top[i]->mutable_gpu_data(), top[i]->count(),MPI_FLOAT,0,MPI_COMM_WORLD); hipDeviceSynchronize(); } } template <typename Dtype> void ScatterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { hipDeviceSynchronize(); MpiTaskList<Dtype> *task_list = (MpiTaskList<Dtype> *)Caffe::getTaskList(); task_list->wait_all_task(); for (int i = 0; i < top.size(); ++i) { MPI_Gather(top[i]->gpu_diff(),top[i]->count(),MPI_FLOAT, bottom[i]->mutable_gpu_diff(),top[i]->count(), MPI_FLOAT,0,MPI_COMM_WORLD); hipDeviceSynchronize(); } } } INSTANTIATE_LAYER_GPU_FUNCS(ScatterLayer); } // namespace caffe
f17ef1daed85f04069f06fe780776403c5cf5d87.cu
#include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #include "caffe/mpitask.hpp" namespace caffe { template <typename Dtype> void ScatterLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { cudaDeviceSynchronize(); for (int i = 0; i < bottom.size(); ++i) { MPI_Scatter(bottom[i]->gpu_data(),top[i]->count(), MPI_FLOAT,top[i]->mutable_gpu_data(), top[i]->count(),MPI_FLOAT,0,MPI_COMM_WORLD); cudaDeviceSynchronize(); } } template <typename Dtype> void ScatterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { cudaDeviceSynchronize(); MpiTaskList<Dtype> *task_list = (MpiTaskList<Dtype> *)Caffe::getTaskList(); task_list->wait_all_task(); for (int i = 0; i < top.size(); ++i) { MPI_Gather(top[i]->gpu_diff(),top[i]->count(),MPI_FLOAT, bottom[i]->mutable_gpu_diff(),top[i]->count(), MPI_FLOAT,0,MPI_COMM_WORLD); cudaDeviceSynchronize(); } } } INSTANTIATE_LAYER_GPU_FUNCS(ScatterLayer); } // namespace caffe
bc14676c892e12ce17bdceed450192f4b3735f78.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <chrono> #include <hip/hip_runtime.h> template<typename T> void verify(const T* cpu_out, const T* gpu_out, int n) { int error = memcmp(cpu_out, gpu_out, n * sizeof(T)); printf("%s\n", error ? "FAIL" : "PASS"); } // bank conflict aware optimization #define LOG_MEM_BANKS 5 #define OFFSET(n) ((n) >> LOG_MEM_BANKS) // N is the number of elements to scan in a thread block template<typename T, int N> __global__ void scan_bcao ( T *__restrict__ g_odata, const T *__restrict__ g_idata) { __shared__ T temp[2*N]; int bid = blockIdx.x; g_idata += bid * N; g_odata += bid * N; int thid = threadIdx.x; int a = thid; int b = a + (N/2); int oa = OFFSET(a); int ob = OFFSET(b); temp[a + oa] = g_idata[a]; temp[b + ob] = g_idata[b]; int offset = 1; for (int d = N >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; ai += OFFSET(ai); bi += OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) temp[N-1+OFFSET(N-1)] = 0; // clear the last elem for (int d = 1; d < N; d *= 2) // traverse down { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; ai += OFFSET(ai); bi += OFFSET(bi); T t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); // required g_odata[a] = temp[a + oa]; g_odata[b] = temp[b + ob]; } template<typename T, int N> __global__ void scan( T *__restrict__ g_odata, const T *__restrict__ g_idata) { __shared__ T temp[N]; int bid = blockIdx.x; g_idata += bid * N; g_odata += bid * N; int thid = threadIdx.x; int offset = 1; temp[2*thid] = g_idata[2*thid]; temp[2*thid+1] = g_idata[2*thid+1]; for (int d = N >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) temp[N-1] = 0; // clear the last elem for (int d = 1; d < N; d *= 2) // traverse down { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } g_odata[2*thid] = temp[2*thid]; g_odata[2*thid+1] = temp[2*thid+1]; } template <typename T, int N> void runTest (const size_t n, const int repeat, bool timing = false) { const size_t num_blocks = (n + N - 1) / N; const size_t nelems = num_blocks * N; // actual total number of elements size_t bytes = nelems * sizeof(T); T *in = (T*) malloc (bytes); T *cpu_out = (T*) malloc (bytes); T *gpu_out = (T*) malloc (bytes); srand(123); for (size_t n = 0; n < nelems; n++) in[n] = rand() % 5 + 1; T *t_in = in; T *t_out = cpu_out; for (size_t n = 0; n < num_blocks; n++) { t_out[0] = 0; for (int i = 1; i < N; i++) t_out[i] = t_out[i-1] + t_in[i-1]; t_out += N; t_in += N; } T *d_in, *d_out; hipMalloc((void**)&d_in, bytes); hipMemcpy(d_in, in, bytes, hipMemcpyHostToDevice); hipMalloc((void**)&d_out, bytes); dim3 grids (num_blocks); dim3 blocks (N/2); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( scan<T, N>), dim3(grids), dim3(blocks), 0, 0, d_out, d_in); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); if (timing) { printf("Element size in bytes is %zu. Average execution time of scan (w/ bank conflicts): %f (us)\n", sizeof(T), (time * 1e-3f) / repeat); } hipMemcpy(gpu_out, d_out, bytes, hipMemcpyDeviceToHost); if (!timing) verify(cpu_out, gpu_out, nelems); // bcao start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( scan_bcao<T, N>), dim3(grids), dim3(blocks), 0, 0, d_out, d_in); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); auto bcao_time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); if (timing) { printf("Element size in bytes is %zu. Average execution time of scan (w/o bank conflicts): %f (us). ", sizeof(T), (bcao_time * 1e-3f) / repeat); printf("Reduce the time by %.1f%%\n", (time - bcao_time) * 1.0 / time * 100); } hipMemcpy(gpu_out, d_out, bytes, hipMemcpyDeviceToHost); if (!timing) verify(cpu_out, gpu_out, nelems); hipFree(d_in); hipFree(d_out); free(in); free(cpu_out); free(gpu_out); } template<int N> void run (const int n, const int repeat) { for (int i = 0; i < 2; i++) { bool report_timing = i > 0; printf("\nThe number of elements to scan in a thread block: %d\n", N); runTest< char, N>(n, repeat, report_timing); runTest<short, N>(n, repeat, report_timing); runTest< int, N>(n, repeat, report_timing); runTest< long, N>(n, repeat, report_timing); } } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: %s <number of elements> <repeat>\n", argv[0]); return 1; } const int n = atoi(argv[1]); const int repeat = atoi(argv[2]); run< 128>(n, repeat); run< 256>(n, repeat); run< 512>(n, repeat); run<1024>(n, repeat); run<2048>(n, repeat); return 0; }
bc14676c892e12ce17bdceed450192f4b3735f78.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <chrono> #include <hip/hip_runtime.h> template<typename T> void verify(const T* cpu_out, const T* gpu_out, int n) { int error = memcmp(cpu_out, gpu_out, n * sizeof(T)); printf("%s\n", error ? "FAIL" : "PASS"); } // bank conflict aware optimization #define LOG_MEM_BANKS 5 #define OFFSET(n) ((n) >> LOG_MEM_BANKS) // N is the number of elements to scan in a thread block template<typename T, int N> __global__ void scan_bcao ( T *__restrict__ g_odata, const T *__restrict__ g_idata) { __shared__ T temp[2*N]; int bid = blockIdx.x; g_idata += bid * N; g_odata += bid * N; int thid = threadIdx.x; int a = thid; int b = a + (N/2); int oa = OFFSET(a); int ob = OFFSET(b); temp[a + oa] = g_idata[a]; temp[b + ob] = g_idata[b]; int offset = 1; for (int d = N >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; ai += OFFSET(ai); bi += OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) temp[N-1+OFFSET(N-1)] = 0; // clear the last elem for (int d = 1; d < N; d *= 2) // traverse down { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; ai += OFFSET(ai); bi += OFFSET(bi); T t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); // required g_odata[a] = temp[a + oa]; g_odata[b] = temp[b + ob]; } template<typename T, int N> __global__ void scan( T *__restrict__ g_odata, const T *__restrict__ g_idata) { __shared__ T temp[N]; int bid = blockIdx.x; g_idata += bid * N; g_odata += bid * N; int thid = threadIdx.x; int offset = 1; temp[2*thid] = g_idata[2*thid]; temp[2*thid+1] = g_idata[2*thid+1]; for (int d = N >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) temp[N-1] = 0; // clear the last elem for (int d = 1; d < N; d *= 2) // traverse down { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } g_odata[2*thid] = temp[2*thid]; g_odata[2*thid+1] = temp[2*thid+1]; } template <typename T, int N> void runTest (const size_t n, const int repeat, bool timing = false) { const size_t num_blocks = (n + N - 1) / N; const size_t nelems = num_blocks * N; // actual total number of elements size_t bytes = nelems * sizeof(T); T *in = (T*) malloc (bytes); T *cpu_out = (T*) malloc (bytes); T *gpu_out = (T*) malloc (bytes); srand(123); for (size_t n = 0; n < nelems; n++) in[n] = rand() % 5 + 1; T *t_in = in; T *t_out = cpu_out; for (size_t n = 0; n < num_blocks; n++) { t_out[0] = 0; for (int i = 1; i < N; i++) t_out[i] = t_out[i-1] + t_in[i-1]; t_out += N; t_in += N; } T *d_in, *d_out; hipMalloc((void**)&d_in, bytes); hipMemcpy(d_in, in, bytes, hipMemcpyHostToDevice); hipMalloc((void**)&d_out, bytes); dim3 grids (num_blocks); dim3 blocks (N/2); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { scan<T, N><<<grids, blocks>>>(d_out, d_in); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); if (timing) { printf("Element size in bytes is %zu. Average execution time of scan (w/ bank conflicts): %f (us)\n", sizeof(T), (time * 1e-3f) / repeat); } hipMemcpy(gpu_out, d_out, bytes, hipMemcpyDeviceToHost); if (!timing) verify(cpu_out, gpu_out, nelems); // bcao start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { scan_bcao<T, N><<<grids, blocks>>>(d_out, d_in); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); auto bcao_time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); if (timing) { printf("Element size in bytes is %zu. Average execution time of scan (w/o bank conflicts): %f (us). ", sizeof(T), (bcao_time * 1e-3f) / repeat); printf("Reduce the time by %.1f%%\n", (time - bcao_time) * 1.0 / time * 100); } hipMemcpy(gpu_out, d_out, bytes, hipMemcpyDeviceToHost); if (!timing) verify(cpu_out, gpu_out, nelems); hipFree(d_in); hipFree(d_out); free(in); free(cpu_out); free(gpu_out); } template<int N> void run (const int n, const int repeat) { for (int i = 0; i < 2; i++) { bool report_timing = i > 0; printf("\nThe number of elements to scan in a thread block: %d\n", N); runTest< char, N>(n, repeat, report_timing); runTest<short, N>(n, repeat, report_timing); runTest< int, N>(n, repeat, report_timing); runTest< long, N>(n, repeat, report_timing); } } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: %s <number of elements> <repeat>\n", argv[0]); return 1; } const int n = atoi(argv[1]); const int repeat = atoi(argv[2]); run< 128>(n, repeat); run< 256>(n, repeat); run< 512>(n, repeat); run<1024>(n, repeat); run<2048>(n, repeat); return 0; }
7ddfd9b3572ff1f3fa44eb6628d4d7470d344607.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> # include "hip/hip_runtime.h" #define SIZE 50 __global__ void VectorAdd(int a[], int b[], int c[], int n){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ c[i] = a[i] + b[i]; } } int main(){ int *a, *b, *c; a = (int*)malloc(SIZE * sizeof(int)); b = (int*)malloc(SIZE * sizeof(int)); c = (int*)malloc(SIZE * sizeof(int)); for (int i = 0; i < SIZE; i++) { a[i] = i+1; b[i] = i; } int *d_a, *d_b, *d_c; hipMalloc(&d_a, SIZE * sizeof(int)); hipMalloc(&d_b, SIZE * sizeof(int)); hipMalloc(&d_c, SIZE * sizeof(int)); hipMemcpy(d_a, a, SIZE * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, SIZE * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( VectorAdd) , dim3(2), dim3(SIZE/2) , 0, 0, d_a, d_b, d_c, SIZE); hipDeviceSynchronize(); // jab tak saare threads ka kaam nahi hota.... tab tak ruko hipMemcpy(c, d_c, SIZE * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < SIZE; i++) printf("%d + %d = %d\n", a[i], b[i], c[i]); hipFree(d_a); hipFree(d_b); hipFree(d_c); free(a); free(b); free(c); return 0; }
7ddfd9b3572ff1f3fa44eb6628d4d7470d344607.cu
#include <stdio.h> # include "cuda_runtime.h" #define SIZE 50 __global__ void VectorAdd(int a[], int b[], int c[], int n){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n){ c[i] = a[i] + b[i]; } } int main(){ int *a, *b, *c; a = (int*)malloc(SIZE * sizeof(int)); b = (int*)malloc(SIZE * sizeof(int)); c = (int*)malloc(SIZE * sizeof(int)); for (int i = 0; i < SIZE; i++) { a[i] = i+1; b[i] = i; } int *d_a, *d_b, *d_c; cudaMalloc(&d_a, SIZE * sizeof(int)); cudaMalloc(&d_b, SIZE * sizeof(int)); cudaMalloc(&d_c, SIZE * sizeof(int)); cudaMemcpy(d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice); VectorAdd <<< 2, SIZE/2 >>> (d_a, d_b, d_c, SIZE); cudaDeviceSynchronize(); // jab tak saare threads ka kaam nahi hota.... tab tak ruko cudaMemcpy(c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < SIZE; i++) printf("%d + %d = %d\n", a[i], b[i], c[i]); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(a); free(b); free(c); return 0; }
9c1356b281f452a2445b48996117b0c361460546.hip
// !!! This is a file automatically generated by hipify!!! #include "common_cuda.h" #include "hip/hip_runtime.h" #include "executor.h" #include <chrono> #include <iostream> #include <thread> #include <tuple> using namespace std; void Executor::register_queue(string model_name, PhysicalOpQueue queue) { hipStream_t s; hipStreamCreate(&s); ExecutorCtx ctx = {model_name, s, queue}; executor_queues.emplace_back(ctx); } void Executor::stop() { wait(); should_stop = true; } void Executor::start() { CHECK_CUDEVICE(hipCtxSetCurrent(*ctx)); while (true) { if (should_stop) { break; } shared_ptr<PhysicalOperator> op = nullptr; for (ExecutorCtx &ctx_struct : executor_queues) { while (ctx_struct.queue->try_dequeue(op)) { op->dispatch(ctx_struct.stream); // events_registrar.insert(ctx_struct.model_name, events); } } } } void Executor::wait() { for (ExecutorCtx &ctx_struct : executor_queues) { while (ctx_struct.queue->size_approx() != 0) { std::this_thread::sleep_for(10ms); } } hipDeviceSynchronize(); }
9c1356b281f452a2445b48996117b0c361460546.cu
#include "common_cuda.h" #include "cuda.h" #include "executor.h" #include <chrono> #include <iostream> #include <thread> #include <tuple> using namespace std; void Executor::register_queue(string model_name, PhysicalOpQueue queue) { cudaStream_t s; cudaStreamCreate(&s); ExecutorCtx ctx = {model_name, s, queue}; executor_queues.emplace_back(ctx); } void Executor::stop() { wait(); should_stop = true; } void Executor::start() { CHECK_CUDEVICE(cuCtxSetCurrent(*ctx)); while (true) { if (should_stop) { break; } shared_ptr<PhysicalOperator> op = nullptr; for (ExecutorCtx &ctx_struct : executor_queues) { while (ctx_struct.queue->try_dequeue(op)) { op->dispatch(ctx_struct.stream); // events_registrar.insert(ctx_struct.model_name, events); } } } } void Executor::wait() { for (ExecutorCtx &ctx_struct : executor_queues) { while (ctx_struct.queue->size_approx() != 0) { std::this_thread::sleep_for(10ms); } } cudaDeviceSynchronize(); }
04a2529fb44af00988742147c8e0af220b6c5ef0.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime_api.h> #include <chrono> __constant__ float const_stencilWeight[21]; // base case __global__ void stencil(float *src, float *dst, int size, float *stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; } // read only cache stencil coefficients __global__ void stencilReadOnly1(float *src, float *dst, int size, float* stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; } // read only data __global__ void stencilReadOnly2(float *src, float *dst, int size, float* stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; } // read only coefficients and data __global__ void stencilReadOnly3(float *src, float *dst, int size, float* stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; } // constat memory coefficients __global__ void stencilConst1(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * const_stencilWeight[i+10]; } dst[idx] = out; } // constant memory coefficients and data through read only cache __global__ void stencilConst2(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * const_stencilWeight[i+10]; } dst[idx] = out; } // constant memory coefficients and data from shared __global__ void stencilShared1(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float buffer[1024+21]; for(int i = threadIdx.x; i < 1024+21; i = i + 1024) { buffer[i] = src[idx+i]; } idx += 11; if (idx >= size) return; __syncthreads(); float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += buffer[threadIdx.x+10+i] * const_stencilWeight[i+10]; } dst[idx] = out; } // constant memory coefficients and data from shared thorugh read only __global__ void stencilShared2(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float buffer[1024+21]; for(int i = threadIdx.x; i < 1024+21; i = i + 1024) { buffer[i] = src[idx+i]; } idx += 11; if (idx >= size) return; __syncthreads(); float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += buffer[threadIdx.x+10+i] * const_stencilWeight[i+10]; } dst[idx] = out; } bool verify(float *arr, float *corr, int count) { // skip the first elements since they may be wrong for(int i = 11; i < count; i++) { if(arr[i] != corr[i]) { std::cout << "check failed" << i << " " << arr[i] << " != " << corr[i] << std::endl; exit(1); } } return true; } int main() { float *a; float *b; float *bOut; float *bCorr; float *weights; (hipMalloc(&a, sizeof(float)*102400000)); (hipMalloc(&b, sizeof(float)*102400000)); (hipHostMalloc(&bOut, sizeof(float)*102400000)); (hipMallocManaged(&bCorr, sizeof(float)*102400000)); (hipMallocManaged(&weights, sizeof(float)*21)); hipDeviceSynchronize(); for(int i = 0; i < 102400000;i++) { //a[i] = 0; //b[i] = 0; bCorr[i] = 0; } hipMemset(a, 1, 102400000); hipMemset(b, 1, 102400000); hipMemset(bCorr, 1, 102400000); hipMemset(bOut, 1, 102400000); hipDeviceSynchronize(); int blockSize = 1024; int blocks = 10000; for(int i = 0; i < 21;i++) weights[i] = i-10; hipDeviceSynchronize(); // copy to constant memory hipMemcpyToSymbol(const_stencilWeight, weights, sizeof(float)*21); // run the basic case once to get the "correct" results hipLaunchKernelGGL(( ((stencil), dim3(blocks), dim3(blockSize), 0, 0, a, bCorr, 10240000, weights))); hipDeviceSynchronize(); hipLaunchKernelGGL(( ((stencil), dim3(blocks), dim3(blockSize), 0, 0, a, b, 10240000, weights))); hipDeviceSynchronize(); (hipMemcpy(bOut, b, sizeof(float)*10240000, hipMemcpyDefault)); verify(bOut, bCorr, 1000); hipSetDevice(0); float minTime = 10000; for(int i = 0; i < 10; i++) { std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); hipLaunchKernelGGL(( ((stencil), dim3(blocks), dim3(blockSize), 0, 0, a, b, 10240000, weights))); hipDeviceSynchronize(); end = std::chrono::system_clock::now(); (hipMemcpy(bOut, b, sizeof(float)*10240000, hipMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = ::min(elapsed_seconds.count(), minTime); } std::cout << "Non optimized " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; std::cout << std::endl; for(int i = 0; i < 10; i++) { hipDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); hipLaunchKernelGGL(( ((stencilReadOnly1), dim3(blocks), dim3(blockSize), 0, 0, a, b, 10240000, weights))); hipDeviceSynchronize(); end = std::chrono::system_clock::now(); (hipMemcpy(bOut, b, sizeof(float)*10240000, hipMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = ::min(elapsed_seconds.count(), minTime); } std::cout << "read only cache stencil coefficients " <<(blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; for(int i = 0; i < 10; i++) { hipDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); hipLaunchKernelGGL(( ((stencilReadOnly2), dim3(blocks), dim3(blockSize), 0, 0, a, b, 10240000, weights))); hipDeviceSynchronize(); end = std::chrono::system_clock::now(); (hipMemcpy(bOut, b, sizeof(float)*10240000, hipMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = ::min(elapsed_seconds.count(), minTime); } std::cout << "read only data " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; for(int i = 0; i < 10; i++) { hipDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); hipLaunchKernelGGL(( ((stencilReadOnly3), dim3(blocks), dim3(blockSize), 0, 0, a, b, 10240000, weights))); hipDeviceSynchronize(); end = std::chrono::system_clock::now(); (hipMemcpy(bOut, b, sizeof(float)*10240000, hipMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = ::min(elapsed_seconds.count(), minTime); } std::cout << "read only coefficients and data " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; std::cout << std::endl; for(int i = 0; i < 10; i++) { hipDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); hipLaunchKernelGGL(( ((stencilConst1), dim3(blocks), dim3(blockSize), 0, 0, a, b, 10240000))); hipDeviceSynchronize(); end = std::chrono::system_clock::now(); (hipMemcpy(bOut, b, sizeof(float)*10240000, hipMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = ::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; for(int i = 0; i < 10; i++) { hipDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); hipLaunchKernelGGL(( ((stencilConst2), dim3(blocks), dim3(blockSize), 0, 0, a, b, 10240000))); hipDeviceSynchronize(); end = std::chrono::system_clock::now(); (hipMemcpy(bOut, b, sizeof(float)*10240000, hipMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = ::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients and data through read only cache " << (blockSize*blocks)/minTime << " updates/s" << std::endl; std::cout << std::endl; minTime = 10000; for(int i = 0; i < 10; i++) { hipDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); hipLaunchKernelGGL(( ((stencilShared1), dim3(blocks), dim3(blockSize), 0, 0, a, b, 10240000))); hipDeviceSynchronize(); end = std::chrono::system_clock::now(); (hipMemcpy(bOut, b, sizeof(float)*10240000, hipMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = ::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients and data from shared " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; minTime = 10000; for(int i = 0; i < 10; i++) { hipDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); hipLaunchKernelGGL(( ((stencilShared2), dim3(blocks), dim3(blockSize), 0, 0, a, b, 10240000))); hipDeviceSynchronize(); end = std::chrono::system_clock::now(); (hipMemcpy(bOut, b, sizeof(float)*10240000, hipMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = ::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients and data from shared thorugh read only " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; }
04a2529fb44af00988742147c8e0af220b6c5ef0.cu
#include <iostream> #include <cuda_runtime_api.h> #include <chrono> __constant__ float const_stencilWeight[21]; // base case __global__ void stencil(float *src, float *dst, int size, float *stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; } // read only cache stencil coefficients __global__ void stencilReadOnly1(float *src, float *dst, int size, float* stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; } // read only data __global__ void stencilReadOnly2(float *src, float *dst, int size, float* stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; } // read only coefficients and data __global__ void stencilReadOnly3(float *src, float *dst, int size, float* stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; } // constat memory coefficients __global__ void stencilConst1(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * const_stencilWeight[i+10]; } dst[idx] = out; } // constant memory coefficients and data through read only cache __global__ void stencilConst2(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * const_stencilWeight[i+10]; } dst[idx] = out; } // constant memory coefficients and data from shared __global__ void stencilShared1(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float buffer[1024+21]; for(int i = threadIdx.x; i < 1024+21; i = i + 1024) { buffer[i] = src[idx+i]; } idx += 11; if (idx >= size) return; __syncthreads(); float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += buffer[threadIdx.x+10+i] * const_stencilWeight[i+10]; } dst[idx] = out; } // constant memory coefficients and data from shared thorugh read only __global__ void stencilShared2(float *src, float *dst, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float buffer[1024+21]; for(int i = threadIdx.x; i < 1024+21; i = i + 1024) { buffer[i] = src[idx+i]; } idx += 11; if (idx >= size) return; __syncthreads(); float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += buffer[threadIdx.x+10+i] * const_stencilWeight[i+10]; } dst[idx] = out; } bool verify(float *arr, float *corr, int count) { // skip the first elements since they may be wrong for(int i = 11; i < count; i++) { if(arr[i] != corr[i]) { std::cout << "check failed" << i << " " << arr[i] << " != " << corr[i] << std::endl; exit(1); } } return true; } int main() { float *a; float *b; float *bOut; float *bCorr; float *weights; (cudaMalloc(&a, sizeof(float)*102400000)); (cudaMalloc(&b, sizeof(float)*102400000)); (cudaMallocHost(&bOut, sizeof(float)*102400000)); (cudaMallocManaged(&bCorr, sizeof(float)*102400000)); (cudaMallocManaged(&weights, sizeof(float)*21)); cudaDeviceSynchronize(); for(int i = 0; i < 102400000;i++) { //a[i] = 0; //b[i] = 0; bCorr[i] = 0; } cudaMemset(a, 1, 102400000); cudaMemset(b, 1, 102400000); cudaMemset(bCorr, 1, 102400000); cudaMemset(bOut, 1, 102400000); cudaDeviceSynchronize(); int blockSize = 1024; int blocks = 10000; for(int i = 0; i < 21;i++) weights[i] = i-10; cudaDeviceSynchronize(); // copy to constant memory cudaMemcpyToSymbol(const_stencilWeight, weights, sizeof(float)*21); // run the basic case once to get the "correct" results ((stencil<<<blocks, blockSize>>>(a, bCorr, 10240000, weights))); cudaDeviceSynchronize(); ((stencil<<<blocks, blockSize>>>(a, b, 10240000, weights))); cudaDeviceSynchronize(); (cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); cudaSetDevice(0); float minTime = 10000; for(int i = 0; i < 10; i++) { std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); ((stencil<<<blocks, blockSize>>>(a, b, 10240000, weights))); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); (cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "Non optimized " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; std::cout << std::endl; for(int i = 0; i < 10; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); ((stencilReadOnly1<<<blocks, blockSize>>>(a, b, 10240000, weights))); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); (cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "read only cache stencil coefficients " <<(blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; for(int i = 0; i < 10; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); ((stencilReadOnly2<<<blocks, blockSize>>>(a, b, 10240000, weights))); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); (cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "read only data " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; for(int i = 0; i < 10; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); ((stencilReadOnly3<<<blocks, blockSize>>>(a, b, 10240000, weights))); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); (cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "read only coefficients and data " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; std::cout << std::endl; for(int i = 0; i < 10; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); ((stencilConst1<<<blocks, blockSize>>>(a, b, 10240000))); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); (cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; for(int i = 0; i < 10; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); ((stencilConst2<<<blocks, blockSize>>>(a, b, 10240000))); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); (cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients and data through read only cache " << (blockSize*blocks)/minTime << " updates/s" << std::endl; std::cout << std::endl; minTime = 10000; for(int i = 0; i < 10; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); ((stencilShared1<<<blocks, blockSize>>>(a, b, 10240000))); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); (cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients and data from shared " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; minTime = 10000; for(int i = 0; i < 10; i++) { cudaDeviceSynchronize(); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); ((stencilShared2<<<blocks, blockSize>>>(a, b, 10240000))); cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); (cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault)); verify(bOut, bCorr, 1000); std::chrono::duration<float> elapsed_seconds = end-start; minTime = std::min(elapsed_seconds.count(), minTime); } std::cout << "constant memory coefficients and data from shared thorugh read only " << (blockSize*blocks)/minTime << " updates/s" << std::endl; minTime = 10000; }
d6ca92adc88c825c0ae9e0c690336b83c44e8dec.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #ifndef MAX #define MAX(a,b) (a > b ? a : b) #endif /* Add two vectors on the GPU */ __global__ void vectorAddGPU(float *a, float *b, float *c, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } // Allocate generic memory with malloc() and pin it laster instead of using hipHostMalloc() bool bPinGenericMemory = false; // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) int main(int argc, char **argv) { int n, nelem, deviceCount; int idev = 0; // use default device 0 char *device = NULL; unsigned int flags; size_t bytes; float *a, *b, *c; // Pinned memory allocated on the CPU float *a_UA, *b_UA, *c_UA; // Non-4K Aligned Pinned memory on the CPU float *d_a, *d_b, *d_c; // Device pointers for mapped memory float errorNorm, refNorm, ref, diff; hipDeviceProp_t deviceProp; if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printf("Usage: simpleZeroCopy [OPTION]\n\n"); printf("Options:\n"); printf(" --device=[device #] Specify the device to be used\n"); printf(" --use_generic_memory (optional) use generic page-aligned for system memory\n"); return EXIT_SUCCESS; } /* Get the device selected by the user or default to 0, and then set it. */ if (getCmdLineArgumentString(argc, (const char **)argv, "device", &device)) { hipGetDeviceCount(&deviceCount); idev = atoi(device); if (idev >= deviceCount || idev < 0) { fprintf(stderr, "Device number %d is invalid, will use default CUDA device 0.\n", idev); idev = 0; } } // if GPU found supports SM 1.2, then continue, otherwise we exit if (!checkCudaCapabilities(1, 2)) { exit(EXIT_SUCCESS); } if (checkCmdLineFlag(argc, (const char **)argv, "use_generic_memory")) { #if defined(__APPLE__) || defined(MACOSX) bPinGenericMemory = false; // Generic Pinning of System Paged memory is not currently supported on Mac OSX #else bPinGenericMemory = true; #endif } if (bPinGenericMemory) { printf("> Using Generic System Paged Memory (malloc)\n"); } else { printf("> Using CUDA Host Allocated (hipHostMalloc)\n"); } checkCudaErrors(hipSetDevice(idev)); /* Verify the selected device supports mapped memory and set the device flags for mapping host memory. */ checkCudaErrors(hipGetDeviceProperties(&deviceProp, idev)); #if CUDART_VERSION >= 2020 if (!deviceProp.canMapHostMemory) { fprintf(stderr, "Device %d does not support mapping CPU host memory!\n", idev); exit(EXIT_SUCCESS); } checkCudaErrors(hipSetDeviceFlags(hipDeviceMapHost)); #else fprintf(stderr, "CUDART version %d.%d does not support <hipDeviceProp_t.canMapHostMemory> field\n", , CUDART_VERSION/1000, (CUDART_VERSION%100)/10); exit(EXIT_SUCCESS); #endif #if CUDART_VERSION < 4000 if (bPinGenericMemory) { fprintf(stderr, "CUDART version %d.%d does not support <hipHostRegister> function\n", CUDART_VERSION/1000, (CUDART_VERSION%100)/10); exit(EXIT_SUCCESS); } #endif /* Allocate mapped CPU memory. */ nelem = 1048576; bytes = nelem*sizeof(float); if (bPinGenericMemory) { #if CUDART_VERSION >= 4000 a_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT); b_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT); c_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT); // We need to ensure memory is aligned to 4K (so we will need to padd memory accordingly) a = (float *) ALIGN_UP(a_UA, MEMORY_ALIGNMENT); b = (float *) ALIGN_UP(b_UA, MEMORY_ALIGNMENT); c = (float *) ALIGN_UP(c_UA, MEMORY_ALIGNMENT); checkCudaErrors(hipHostRegister(a, bytes, HIP_MEMHOSTALLOC_DEVICEMAP)); checkCudaErrors(hipHostRegister(b, bytes, HIP_MEMHOSTALLOC_DEVICEMAP)); checkCudaErrors(hipHostRegister(c, bytes, HIP_MEMHOSTALLOC_DEVICEMAP)); #endif } else { #if CUDART_VERSION >= 2020 flags = hipHostMallocMapped; checkCudaErrors(hipHostMalloc((void **)&a, bytes, flags)); checkCudaErrors(hipHostMalloc((void **)&b, bytes, flags)); checkCudaErrors(hipHostMalloc((void **)&c, bytes, flags)); #endif } /* Initialize the vectors. */ for (n = 0; n < nelem; n++) { a[n] = rand() / (float)RAND_MAX; b[n] = rand() / (float)RAND_MAX; } /* Get the device pointers for the pinned CPU memory mapped into the GPU memory space. */ #if CUDART_VERSION >= 2020 checkCudaErrors(hipHostGetDevicePointer((void **)&d_a, (void *)a, 0)); checkCudaErrors(hipHostGetDevicePointer((void **)&d_b, (void *)b, 0)); checkCudaErrors(hipHostGetDevicePointer((void **)&d_c, (void *)c, 0)); #endif /* Call the GPU kernel using the CPU pointers residing in CPU mapped memory. */ printf("> vectorAddGPU kernel will add vectors using mapped CPU memory...\n"); dim3 block(256); dim3 grid((unsigned int)ceil(nelem/(float)block.x)); hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, nelem); checkCudaErrors(hipDeviceSynchronize()); getLastCudaError("vectorAddGPU() execution failed"); /* Compare the results */ printf("> Checking the results from vectorAddGPU() ...\n"); errorNorm = 0.f; refNorm = 0.f; for (n = 0; n < nelem; n++) { ref = a[n] + b[n]; diff = c[n] - ref; errorNorm += diff*diff; refNorm += ref*ref; } errorNorm = (float)sqrt((double)errorNorm); refNorm = (float)sqrt((double)refNorm); /* Memory clean up */ printf("> Releasing CPU memory...\n"); if (bPinGenericMemory) { #if CUDART_VERSION >= 4000 checkCudaErrors(hipHostUnregister(a)); checkCudaErrors(hipHostUnregister(b)); checkCudaErrors(hipHostUnregister(c)); free(a_UA); free(b_UA); free(c_UA); #endif } else { #if CUDART_VERSION >= 2020 checkCudaErrors(hipHostFree(a)); checkCudaErrors(hipHostFree(b)); checkCudaErrors(hipHostFree(c)); #endif } exit(errorNorm/refNorm < 1.e-6f ? EXIT_SUCCESS : EXIT_FAILURE); }
d6ca92adc88c825c0ae9e0c690336b83c44e8dec.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda.h> #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #ifndef MAX #define MAX(a,b) (a > b ? a : b) #endif /* Add two vectors on the GPU */ __global__ void vectorAddGPU(float *a, float *b, float *c, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } // Allocate generic memory with malloc() and pin it laster instead of using cudaHostAlloc() bool bPinGenericMemory = false; // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) int main(int argc, char **argv) { int n, nelem, deviceCount; int idev = 0; // use default device 0 char *device = NULL; unsigned int flags; size_t bytes; float *a, *b, *c; // Pinned memory allocated on the CPU float *a_UA, *b_UA, *c_UA; // Non-4K Aligned Pinned memory on the CPU float *d_a, *d_b, *d_c; // Device pointers for mapped memory float errorNorm, refNorm, ref, diff; cudaDeviceProp deviceProp; if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printf("Usage: simpleZeroCopy [OPTION]\n\n"); printf("Options:\n"); printf(" --device=[device #] Specify the device to be used\n"); printf(" --use_generic_memory (optional) use generic page-aligned for system memory\n"); return EXIT_SUCCESS; } /* Get the device selected by the user or default to 0, and then set it. */ if (getCmdLineArgumentString(argc, (const char **)argv, "device", &device)) { cudaGetDeviceCount(&deviceCount); idev = atoi(device); if (idev >= deviceCount || idev < 0) { fprintf(stderr, "Device number %d is invalid, will use default CUDA device 0.\n", idev); idev = 0; } } // if GPU found supports SM 1.2, then continue, otherwise we exit if (!checkCudaCapabilities(1, 2)) { exit(EXIT_SUCCESS); } if (checkCmdLineFlag(argc, (const char **)argv, "use_generic_memory")) { #if defined(__APPLE__) || defined(MACOSX) bPinGenericMemory = false; // Generic Pinning of System Paged memory is not currently supported on Mac OSX #else bPinGenericMemory = true; #endif } if (bPinGenericMemory) { printf("> Using Generic System Paged Memory (malloc)\n"); } else { printf("> Using CUDA Host Allocated (cudaHostAlloc)\n"); } checkCudaErrors(cudaSetDevice(idev)); /* Verify the selected device supports mapped memory and set the device flags for mapping host memory. */ checkCudaErrors(cudaGetDeviceProperties(&deviceProp, idev)); #if CUDART_VERSION >= 2020 if (!deviceProp.canMapHostMemory) { fprintf(stderr, "Device %d does not support mapping CPU host memory!\n", idev); exit(EXIT_SUCCESS); } checkCudaErrors(cudaSetDeviceFlags(cudaDeviceMapHost)); #else fprintf(stderr, "CUDART version %d.%d does not support <cudaDeviceProp.canMapHostMemory> field\n", , CUDART_VERSION/1000, (CUDART_VERSION%100)/10); exit(EXIT_SUCCESS); #endif #if CUDART_VERSION < 4000 if (bPinGenericMemory) { fprintf(stderr, "CUDART version %d.%d does not support <cudaHostRegister> function\n", CUDART_VERSION/1000, (CUDART_VERSION%100)/10); exit(EXIT_SUCCESS); } #endif /* Allocate mapped CPU memory. */ nelem = 1048576; bytes = nelem*sizeof(float); if (bPinGenericMemory) { #if CUDART_VERSION >= 4000 a_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT); b_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT); c_UA = (float *) malloc(bytes + MEMORY_ALIGNMENT); // We need to ensure memory is aligned to 4K (so we will need to padd memory accordingly) a = (float *) ALIGN_UP(a_UA, MEMORY_ALIGNMENT); b = (float *) ALIGN_UP(b_UA, MEMORY_ALIGNMENT); c = (float *) ALIGN_UP(c_UA, MEMORY_ALIGNMENT); checkCudaErrors(cudaHostRegister(a, bytes, CU_MEMHOSTALLOC_DEVICEMAP)); checkCudaErrors(cudaHostRegister(b, bytes, CU_MEMHOSTALLOC_DEVICEMAP)); checkCudaErrors(cudaHostRegister(c, bytes, CU_MEMHOSTALLOC_DEVICEMAP)); #endif } else { #if CUDART_VERSION >= 2020 flags = cudaHostAllocMapped; checkCudaErrors(cudaHostAlloc((void **)&a, bytes, flags)); checkCudaErrors(cudaHostAlloc((void **)&b, bytes, flags)); checkCudaErrors(cudaHostAlloc((void **)&c, bytes, flags)); #endif } /* Initialize the vectors. */ for (n = 0; n < nelem; n++) { a[n] = rand() / (float)RAND_MAX; b[n] = rand() / (float)RAND_MAX; } /* Get the device pointers for the pinned CPU memory mapped into the GPU memory space. */ #if CUDART_VERSION >= 2020 checkCudaErrors(cudaHostGetDevicePointer((void **)&d_a, (void *)a, 0)); checkCudaErrors(cudaHostGetDevicePointer((void **)&d_b, (void *)b, 0)); checkCudaErrors(cudaHostGetDevicePointer((void **)&d_c, (void *)c, 0)); #endif /* Call the GPU kernel using the CPU pointers residing in CPU mapped memory. */ printf("> vectorAddGPU kernel will add vectors using mapped CPU memory...\n"); dim3 block(256); dim3 grid((unsigned int)ceil(nelem/(float)block.x)); vectorAddGPU<<<grid, block>>>(d_a, d_b, d_c, nelem); checkCudaErrors(cudaDeviceSynchronize()); getLastCudaError("vectorAddGPU() execution failed"); /* Compare the results */ printf("> Checking the results from vectorAddGPU() ...\n"); errorNorm = 0.f; refNorm = 0.f; for (n = 0; n < nelem; n++) { ref = a[n] + b[n]; diff = c[n] - ref; errorNorm += diff*diff; refNorm += ref*ref; } errorNorm = (float)sqrt((double)errorNorm); refNorm = (float)sqrt((double)refNorm); /* Memory clean up */ printf("> Releasing CPU memory...\n"); if (bPinGenericMemory) { #if CUDART_VERSION >= 4000 checkCudaErrors(cudaHostUnregister(a)); checkCudaErrors(cudaHostUnregister(b)); checkCudaErrors(cudaHostUnregister(c)); free(a_UA); free(b_UA); free(c_UA); #endif } else { #if CUDART_VERSION >= 2020 checkCudaErrors(cudaFreeHost(a)); checkCudaErrors(cudaFreeHost(b)); checkCudaErrors(cudaFreeHost(c)); #endif } exit(errorNorm/refNorm < 1.e-6f ? EXIT_SUCCESS : EXIT_FAILURE); }
5cc6ff591d85445f9c70cc29d50eecd2f9bfae95.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include "type_shim.h" __device__ __forceinline__ int lastpow2(int n) { int out = 1 << (31 - __clz(n)); if(n == out) out >>= 1; return out; } __host__ __forceinline__ int h_next_pow2(unsigned int n) { n--; n |= (n >> 1); n |= (n >> 2); n |= (n >> 4); n |= (n >> 8); n |= (n >> 16); return ++n; } __host__ __forceinline__ int h_last_pow2(unsigned int n) { n |= (n >> 1); n |= (n >> 2); n |= (n >> 4); n |= (n >> 8); n |= (n >> 16); return n - (n >> 1); } #define WARP_SIZE 32 template<typename T> __device__ __forceinline__ T warp_reduce_sum(T val) { #pragma unroll for(int i = WARP_SIZE/2; i > 0; i >>= 1) val = val + __shfl_down_sync(0xffffffff, val, i); return val; } template<typename T> __device__ __forceinline__ T reduce_block(T *x, T val) { int tid = threadIdx.y*blockDim.x + threadIdx.x; int blockSize = blockDim.x * blockDim.y; if (blockSize > 32) { val = warp_reduce_sum(val); if (tid % WARP_SIZE == 0) x[tid/WARP_SIZE] = val; __syncthreads(); val = (tid < blockSize / WARP_SIZE? x[tid%WARP_SIZE] : T(0)); } if(tid/WARP_SIZE==0) val = warp_reduce_sum(val); return val; } #define ELEMENTS_PER_ITER 4 // enables concurrency within each thread to hide latency #define ELEMENTS_PER_THREAD 16 #define OPTIMAL_TILE_W 32 #define MAX_H_BLOCK 128 #define MAX_BLOCK_SIZE 512 __host__ int div_ru(int x, int y) { return h_last_pow2(1 + (x-1)/y); } __host__ void flexible_launch_configs( const int reduction, const int stride, dim3 &block, dim3 &grid, const bool coop_flag = false) { int block_x = ::min(h_last_pow2(stride), OPTIMAL_TILE_W); int block_y = ::min(h_last_pow2(div_ru(reduction , ELEMENTS_PER_THREAD)), MAX_BLOCK_SIZE / block_x); if (block_x * block_y != MAX_BLOCK_SIZE) { block_x = ::min(h_last_pow2(stride), MAX_BLOCK_SIZE / block_y); } int grid_x = div_ru(stride, block_x); int grid_y = ::min(div_ru(reduction, block_y * ELEMENTS_PER_THREAD), MAX_H_BLOCK); if (coop_flag) { // it's not worth having a grid reduction if the reduction dimension is not big enough grid_y = grid_y < 8 ? 1 : grid_y; } block.x = block_x; block.y = block_y; block.z = 1; grid.x = grid_x; grid.y = grid_y; grid.z = 1; } template<typename T, typename C> __device__ __forceinline__ void welford_merge_element(C& count, T& mean, T& m2n, const C& num_new, const T& mean_new, const T& m2n_new) { T factor = T(1.0) / max(1, (count + num_new)); T delta0 = mean - mean_new; mean = (mean_new * num_new + mean * count) * factor; m2n += m2n_new + delta0 * delta0 * num_new * count * factor; count += num_new; } template<typename T> __device__ __forceinline__ void warp_reduce_mean_m2n(T &mean, T &m2n, int &num) { #pragma unroll for(int i = WARP_SIZE/2; i > 0; i >>= 1) { auto num_new = __shfl_down_sync(0xffffffff, num, i); auto mean_new = __shfl_down_sync(0xffffffff, mean, i); auto m2n_new = __shfl_down_sync(0xffffffff, m2n, i); welford_merge_element(num, mean, m2n, num_new, mean_new, m2n_new); } } template <typename T> __device__ void welford_reduce_mean_m2n( T* __restrict__ x, int* __restrict__ count, T &mean, T &m2n, int &num, int block_size, int thread_id) { int lane = thread_id % WARP_SIZE; int wid = thread_id / WARP_SIZE; if (block_size > 32) { warp_reduce_mean_m2n(mean, m2n, num); if (lane == 0) { x[wid*2] = mean; x[wid*2+1] = m2n; count[wid] = num; } __syncthreads(); if (wid == 0) { mean = (thread_id < block_size / WARP_SIZE)? x[lane*2] : T(0); m2n = (thread_id < block_size / WARP_SIZE)? x[lane*2+1] : T(0); num = (thread_id < block_size / WARP_SIZE)? count[lane] : int(0); } } if (wid==0) warp_reduce_mean_m2n(mean, m2n, num); return; } // return spatial size for NC+ Tensors __host__ int get_tensor_spatial_size(const at::Tensor& input) { auto space_size = input.size(2); for (int i = 3; i < input.ndimension(); i++) { space_size *= input.size(i); } return space_size; } // promote accumulation scalar type. promote half to float. __host__ at::ScalarType promote_scalartype(const at::Tensor& input) { return input.scalar_type() == at::ScalarType::Half ? at::ScalarType::Float : input.scalar_type(); } // return single element size, optional accumulation type promotion. __host__ size_t get_element_data_size(const at::Tensor& input, bool accumulation = false) { auto scalar_type = accumulation ? promote_scalartype(input) : input.scalar_type(); return at::elementSize(scalar_type); } template<typename T, typename C> __device__ __forceinline__ void welford_merge_block_vertical(C& count, T& mean, T& m2n, C* shmem_count, T* shmem_mean, T* shmem_m2n) { // write to shared memory auto address_base = threadIdx.x + threadIdx.y * blockDim.x; shmem_mean[address_base] = mean; shmem_m2n[address_base] = m2n; shmem_count[address_base] = count; #pragma unroll for (int offset = blockDim.y/2; offset > 0; offset >>= 1) { __syncthreads(); if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { auto address = address_base + offset * blockDim.x; // read shared memory back to register for reduction auto num_new = shmem_count[address]; auto mean_new = shmem_mean[address]; auto m2n_new = shmem_m2n[address]; welford_merge_element(count, mean, m2n, num_new, mean_new, m2n_new); // last write is not necessary shmem_mean[address_base] = mean; shmem_m2n[address_base] = m2n; shmem_count[address_base] = count; } } } template<typename T> __device__ __forceinline__ void merge_block_vertical(T& sum_dy, T& sum_dy_xmu, T* shmem_sum_dy, T* shmem_sum_dy_xmu) { // write to shared memory auto address_base = threadIdx.x + threadIdx.y * blockDim.x; shmem_sum_dy[address_base] = sum_dy; shmem_sum_dy_xmu[address_base] = sum_dy_xmu; #pragma unroll for (int offset = blockDim.y/2; offset > 0; offset >>= 1) { __syncthreads(); if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { auto address = address_base + offset * blockDim.x; sum_dy += shmem_sum_dy[address]; sum_dy_xmu += shmem_sum_dy_xmu[address]; // last write is not necessary shmem_sum_dy[address_base] = sum_dy; shmem_sum_dy_xmu[address_base] = sum_dy_xmu; } } } // welford kernel calculating mean/biased_variance/unbiased_variance template <typename scalar_t, typename accscalar_t, typename outscalar_t> __global__ void welford_kernel( const scalar_t* __restrict__ input, outscalar_t* __restrict__ out_mean, outscalar_t* __restrict__ out_var_biased, const int bs, const int fs, const int ss) { int block_size = blockDim.x * blockDim.y; int count = 0; accscalar_t x_mean = accscalar_t(0); accscalar_t m_2_n = accscalar_t(0); int thread_id = threadIdx.y*blockDim.x + threadIdx.x; for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) { int input_base = blockIdx.x*ss + batch_id*ss*fs; // sequential welford for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) { count++; auto x_n = static_cast<accscalar_t>(input[offset+input_base]); auto d = x_n - x_mean; x_mean += d / count; m_2_n += d * (x_n - x_mean); } } static __shared__ int s_mem[160]; accscalar_t* s_mem_ac = (accscalar_t*) &s_mem[32]; welford_reduce_mean_m2n<accscalar_t>(s_mem_ac, s_mem, x_mean, m_2_n, count, block_size, thread_id); if (thread_id == 0) { out_mean[blockIdx.x] = static_cast<outscalar_t>(x_mean); out_var_biased[blockIdx.x] = static_cast<outscalar_t>(m_2_n/count); } } // elementwise BN kernel template <typename scalar_t, typename accscalar_t, typename layerscalar_t> __global__ void batchnorm_forward_kernel( const scalar_t* __restrict__ input, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, const layerscalar_t* __restrict__ weight, const layerscalar_t* __restrict__ shift, scalar_t* __restrict__ out, const int ss, const int bs) { auto m_c = mean[blockIdx.x]; auto inv_std_c = inv_std[blockIdx.x]; auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x]); auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[blockIdx.x]); for (int batch_offset = blockIdx.y*blockDim.y + threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) { int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss; for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) { out[address_base+offset] = static_cast<scalar_t>(w_c * (static_cast<accscalar_t>(input[address_base+offset]) - m_c ) * inv_std_c + s_c); } } } // Backward BN kernel, calculates grad_bias, grad_weight as well as intermediate // results to calculating grad_input. // Breaking the grad_input to two step to support sync BN, which requires all // reduce of the intermediate results across processes. template <typename scalar_t, typename accscalar_t, typename layerscalar_t> __global__ void reduce_bn_kernel( const scalar_t* __restrict__ input, const scalar_t* __restrict__ grad_output, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, accscalar_t* __restrict__ mean_dy, accscalar_t* __restrict__ mean_dy_xmu, layerscalar_t* __restrict__ grad_weight, layerscalar_t* __restrict__ grad_bias, const int bs, const int fs, const int ss) { static __shared__ int s_mem[64]; int total_item_num = bs * ss; int thread_id = threadIdx.y*blockDim.x + threadIdx.x; auto r_mean = mean[blockIdx.x]; auto factor = inv_std[blockIdx.x]; // Kahan sum accscalar_t sum_dy = 0.0; accscalar_t sum_dy_xmu = 0.0; accscalar_t sum_dy_c = 0.0; accscalar_t sum_dy_xmu_c = 0.0; for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) { int input_base = blockIdx.x*ss + batch_id*ss*fs; for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) { auto e_grad = static_cast<accscalar_t>(grad_output[offset+input_base]); auto e_input = static_cast<accscalar_t>(input[offset+input_base]); // calculating sum_dy auto sum_dy_y = e_grad - sum_dy_c; auto sum_dy_t = sum_dy + sum_dy_y; sum_dy_c = (sum_dy_t - sum_dy) - sum_dy_y; sum_dy = sum_dy_t; // calculating sum_dy_xmu auto sum_dy_xmu_y = e_grad * (e_input - r_mean) - sum_dy_xmu_c; auto sum_dy_xmu_t = sum_dy_xmu + sum_dy_xmu_y; sum_dy_xmu_c = (sum_dy_xmu_t - sum_dy_xmu) - sum_dy_xmu_y; sum_dy_xmu = sum_dy_xmu_t; } } sum_dy = reduce_block((accscalar_t*)s_mem, sum_dy); __syncthreads(); sum_dy_xmu = reduce_block((accscalar_t*)s_mem, sum_dy_xmu); if (thread_id == 0) { if (grad_bias != NULL) { grad_bias[blockIdx.x] = static_cast<layerscalar_t>(sum_dy); } if (grad_weight != NULL) { grad_weight[blockIdx.x] = static_cast<layerscalar_t>(sum_dy_xmu * factor); } mean_dy[blockIdx.x] = sum_dy / total_item_num; mean_dy_xmu[blockIdx.x] = sum_dy_xmu / total_item_num; } } // elementwise backward BN kernel template <typename scalar_t, typename accscalar_t, typename layerscalar_t> __global__ void batchnorm_backward_kernel( const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, const layerscalar_t* __restrict__ weight, const accscalar_t* __restrict__ mean_dy, const accscalar_t* __restrict__ mean_dy_xmu, scalar_t* __restrict__ grad_input, const int ss, const int bs) { auto m_c = static_cast<accscalar_t>(mean[blockIdx.x]); auto m_dy_c = static_cast<accscalar_t>(mean_dy[blockIdx.x]); auto factor_1_c = inv_std[blockIdx.x]; auto factor_2_c = (weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x])) * factor_1_c; factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[blockIdx.x]; for (int batch_offset = blockIdx.y*blockDim.y+threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) { int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss; for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) { grad_input[address_base+offset] = (static_cast<accscalar_t>(grad_output[address_base+offset]) - m_dy_c - (static_cast<accscalar_t>(input[address_base+offset]) - m_c) * factor_1_c) * factor_2_c; } } } // welford kernel for c last tensor calculating mean/biased_variance/unbiased_variance template <typename scalar_t, typename accscalar_t, typename outscalar_t, int PARALLEL_LOADS> __global__ void welford_kernel_c_last( const scalar_t* __restrict__ input, outscalar_t* __restrict__ out_mean, outscalar_t* __restrict__ out_var_biased, volatile accscalar_t* staging_data, int* semaphores, const int reduction_size, const int stride) { // hide latency with concurrency accscalar_t x_mean[PARALLEL_LOADS]; accscalar_t m_2_n[PARALLEL_LOADS]; int count[PARALLEL_LOADS]; #pragma unroll for (int i = 0; i < PARALLEL_LOADS; i++) { x_mean[i] = accscalar_t(0); m_2_n[i] = accscalar_t(0); count[i] = accscalar_t(0); } // tensor dimension (m,c) // loop along m dimension int inner_loop_stride = blockDim.y * gridDim.y; // offset along m dimension int m_offset = blockIdx.y * blockDim.y + threadIdx.y; int c_offset = blockIdx.x * blockDim.x + threadIdx.x; int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); int address_base = m_offset * stride + c_offset; int address_increment = inner_loop_stride * stride; for (int i = 0; i < loop_count; i++) { accscalar_t x_math[PARALLEL_LOADS]; accscalar_t x_count_inv[PARALLEL_LOADS]; accscalar_t is_valid[PARALLEL_LOADS]; // load multiple data in #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { if (c_offset < stride && m_offset < reduction_size) { x_math[j] = input[address_base]; count[j]++; x_count_inv[j] = accscalar_t(1) / count[j]; is_valid[j] = accscalar_t(1); } else { x_math[j] = accscalar_t(0); x_count_inv[j] = accscalar_t(0); is_valid[j] = accscalar_t(0); } m_offset += inner_loop_stride; address_base += address_increment; } // calculate mean/m2n with welford #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { accscalar_t delta0 = x_math[j] - x_mean[j]; x_mean[j] += delta0 * x_count_inv[j]; accscalar_t delta1 = x_math[j] - x_mean[j]; m_2_n[j] += delta0 * delta1 * is_valid[j]; } } // thread reduction to accumulate mean/m_2_n/count between PARALLEL_LOADS #pragma unroll for (int j = 1; j < PARALLEL_LOADS; j++) { welford_merge_element(count[0], x_mean[0], m_2_n[0], count[j], x_mean[j], m_2_n[j]); } // release x_mean / m_2_n auto mean_th = x_mean[0]; auto m2_th = m_2_n[0]; auto count_th = count[0]; // block-wise reduction with shared memory (since reduction cannot be done within a warp) static __shared__ accscalar_t shmem_mean[MAX_BLOCK_SIZE]; static __shared__ accscalar_t shmem_m2n[MAX_BLOCK_SIZE]; static __shared__ int shmem_count[MAX_BLOCK_SIZE]; welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n); // grid reduction if needed (coop launch used at the first place) if (gridDim.y > 1) { volatile accscalar_t* staging_mean = staging_data; volatile accscalar_t* staging_m2n = &staging_data[stride*gridDim.y]; volatile int* staging_count = reinterpret_cast<volatile int*>(&staging_m2n[stride*gridDim.y]); address_base = c_offset + blockIdx.y * stride; // write data to staging_data; if (threadIdx.y == 0 && c_offset < stride) { staging_mean[address_base] = mean_th; staging_m2n[address_base] = m2_th; staging_count[address_base] = count_th; } __threadfence(); __syncthreads(); // ensuring writes to staging_ is visible to all blocks __shared__ bool is_last_block_done; // mark block done if (threadIdx.x == 0 && threadIdx.y == 0) { int old = atomicAdd(&semaphores[blockIdx.x], 1); is_last_block_done = (old == (gridDim.y-1)); } __syncthreads(); // check that all data is now available in global memory if (is_last_block_done) { count_th = 0; mean_th = accscalar_t(0.0); m2_th = accscalar_t(0.0); for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) { address_base = c_offset + y * stride; int num_new = c_offset < stride ? staging_count[address_base] : 0; accscalar_t mean_new = c_offset < stride ? staging_mean[address_base] : accscalar_t(0.0); accscalar_t m2n_new = c_offset < stride ? staging_m2n[address_base] : accscalar_t(0.0); welford_merge_element(count_th, mean_th, m2_th, num_new, mean_new, m2n_new); } welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n); if (threadIdx.y == 0 && c_offset < stride) { out_mean[c_offset] = static_cast<outscalar_t>(mean_th); out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th); } } } else { if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) { out_mean[c_offset] = static_cast<outscalar_t>(mean_th); out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th); } } } // parallel welford kernel to further reduce mean / biased_var // into mean / unbiased_var / inv_std across multiple processes. template <typename scalar_t> __global__ void welford_kernel_parallel( const scalar_t* __restrict__ mean, const scalar_t* __restrict__ var_biased, scalar_t* __restrict__ out_mean, scalar_t* __restrict__ out_var, scalar_t* __restrict__ inv_std, const int world_size, const int feature_size, const float eps, const int numel) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < feature_size; i += gridDim.x * blockDim.x) { // load data; int address = i; scalar_t x_mean = 0; scalar_t m_2_n = 0; int count = 0; for (int j = 0; j < world_size; j++) { welford_merge_element(count, x_mean, m_2_n, numel, mean[address], var_biased[address]*numel); address += feature_size; } out_mean[i] = x_mean; out_var[i] = m_2_n/ (count - 1); inv_std[i] = scalar_t(1) / sqrt(m_2_n/count + eps); } } // elementwise BN kernel template < typename scalar_t, typename accscalar_t, typename layerscalar_t, int PARALLEL_LOADS> __global__ void batchnorm_forward_c_last_kernel( const scalar_t* __restrict__ input, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, const layerscalar_t* __restrict__ weight, const layerscalar_t* __restrict__ shift, scalar_t* __restrict__ out, const int reduction_size, const int stride) { // tensor dimension (m,c) // loop along m dimension int inner_loop_stride = blockDim.y * gridDim.y; // offset along m dimension int m_offset = blockIdx.y * blockDim.y + threadIdx.y; int c_offset = blockIdx.x * blockDim.x + threadIdx.x; auto m_c = mean[c_offset]; auto inv_std_c = static_cast<accscalar_t>(inv_std[c_offset]); auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset]); auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[c_offset]); int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); int address_base = m_offset * stride + c_offset; int address_increment = inner_loop_stride * stride; for (int i = 0; i < loop_count; i++) { #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { if (c_offset < stride && m_offset < reduction_size) { out[address_base] = static_cast<scalar_t>( w_c * (static_cast<accscalar_t>(input[address_base]) - m_c ) * inv_std_c + s_c ); } m_offset += inner_loop_stride; address_base += address_increment; } } } // batchnorm backward kernel for c last tensor template <typename scalar_t, typename accscalar_t, typename layerscalar_t, int PARALLEL_LOADS> __global__ void reduce_bn_c_last_kernel( const scalar_t* __restrict__ input, const scalar_t* __restrict__ grad_output, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, accscalar_t* __restrict__ mean_dy, accscalar_t* __restrict__ mean_dy_xmu, layerscalar_t* __restrict__ grad_weight, layerscalar_t* __restrict__ grad_bias, volatile accscalar_t* staging_data, int* semaphores, const int reduction_size, const int stride) { // hide latency with concurrency accscalar_t sum_dy[PARALLEL_LOADS]; accscalar_t sum_dy_xmu[PARALLEL_LOADS]; #pragma unroll for (int i = 0; i < PARALLEL_LOADS; i++) { sum_dy[i] = accscalar_t(0); sum_dy_xmu[i] = accscalar_t(0); } // tensor dimension (m,c) // loop along m dimension int inner_loop_stride = blockDim.y * gridDim.y; // offset along m dimension int m_offset = blockIdx.y * blockDim.y + threadIdx.y; int c_offset = blockIdx.x * blockDim.x + threadIdx.x; int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); int address_base = m_offset * stride + c_offset; int address_increment = inner_loop_stride * stride; auto r_mean = mean[c_offset]; auto factor = inv_std[c_offset]; for (int i = 0; i < loop_count; i++) { accscalar_t x_input[PARALLEL_LOADS]; accscalar_t x_grad_output[PARALLEL_LOADS]; // load multiple data in #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { if (c_offset < stride && m_offset < reduction_size) { x_input[j] = input[address_base]; x_grad_output[j] = grad_output[address_base]; } else { x_input[j] = accscalar_t(0); x_grad_output[j] = accscalar_t(0); } m_offset += inner_loop_stride; address_base += address_increment; } // calculate sum_dy / sum_dy_xmu #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { sum_dy[j] += x_grad_output[j]; sum_dy_xmu[j] += x_grad_output[j] * (x_input[j] - r_mean); } } // thread reduction to accumulate sum_dy / sum_dy_xmu between PARALLEL_LOADS #pragma unroll for (int j = 1; j < PARALLEL_LOADS; j++) { sum_dy[0] += sum_dy[j]; sum_dy_xmu[0] += sum_dy_xmu[j]; } // release array of registers auto sum_dy_th = sum_dy[0]; auto sum_dy_xmu_th = sum_dy_xmu[0]; // block-wise reduction with shared memory (since reduction cannot be done within a warp) static __shared__ accscalar_t shmem_sum_dy[MAX_BLOCK_SIZE]; static __shared__ accscalar_t shmem_sum_dy_xmu[MAX_BLOCK_SIZE]; merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu); // grid reduction if needed (coop launch used at the first place) if (gridDim.y > 1) { volatile accscalar_t* staging_sum_dy = staging_data; volatile accscalar_t* staging_sum_dy_xmu = &staging_data[stride*gridDim.y]; address_base = c_offset + blockIdx.y * stride; // write data to staging_data; if (threadIdx.y == 0 && c_offset < stride) { staging_sum_dy[address_base] = sum_dy_th; staging_sum_dy_xmu[address_base] = sum_dy_xmu_th; } __threadfence(); __syncthreads(); // ensuring writes to staging_ is visible to all blocks __shared__ bool is_last_block_done; // mark block done if (threadIdx.x == 0 && threadIdx.y == 0) { int old = atomicAdd(&semaphores[blockIdx.x], 1); is_last_block_done = (old == (gridDim.y-1)); } __syncthreads(); // check that all data is now available in global memory if (is_last_block_done) { sum_dy_th = accscalar_t(0.0); sum_dy_xmu_th = accscalar_t(0.0); for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) { address_base = c_offset + y * stride; sum_dy_th += (c_offset < stride ? staging_sum_dy[address_base] : accscalar_t(0.0)); sum_dy_xmu_th += (c_offset < stride ? staging_sum_dy_xmu[address_base] : accscalar_t(0.0)); } merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu); if (threadIdx.y == 0 && c_offset < stride) { if (grad_bias != NULL) { grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th); } if (grad_weight != NULL) { grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor); } mean_dy[c_offset] = sum_dy_th / reduction_size; mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size; } } } else { if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) { if (grad_bias != NULL) { grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th); } if (grad_weight != NULL) { grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor); } mean_dy[c_offset] = sum_dy_th / reduction_size; mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size; } } } // elementwise BN kernel template < typename scalar_t, typename accscalar_t, typename layerscalar_t, int PARALLEL_LOADS> __global__ void batchnorm_backward_c_last_kernel( const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, const layerscalar_t* __restrict__ weight, const accscalar_t* __restrict__ mean_dy, const accscalar_t* __restrict__ mean_dy_xmu, scalar_t* __restrict__ grad_input, const int reduction_size, const int stride) { // tensor dimension (m,c) // loop along m dimension int inner_loop_stride = blockDim.y * gridDim.y; // offset along m dimension int m_offset = blockIdx.y * blockDim.y + threadIdx.y; int c_offset = blockIdx.x * blockDim.x + threadIdx.x; auto m_c = mean[c_offset]; auto m_dy_c = mean_dy[c_offset]; auto factor_1_c = inv_std[c_offset]; auto factor_2_c = (weight == NULL? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset])) * factor_1_c; factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[c_offset]; int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); int address_base = m_offset * stride + c_offset; int address_increment = inner_loop_stride * stride; for (int i = 0; i < loop_count; i++) { #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { if (c_offset < stride && m_offset < reduction_size) { grad_input[address_base] = static_cast<scalar_t>( (static_cast<accscalar_t>(grad_output[address_base]) - m_dy_c - (static_cast<accscalar_t>(input[address_base]) - m_c) * factor_1_c) * factor_2_c); } m_offset += inner_loop_stride; address_base += address_increment; } } } std::vector<at::Tensor> welford_mean_var_CUDA(const at::Tensor input) { const auto batch_size = input.size(0); const auto feature_size = input.size(1); auto space_size = get_tensor_spatial_size(input); auto scalar_type = promote_scalartype(input); at::Tensor out_var_biased = at::empty({feature_size}, input.options().dtype(scalar_type)); at::Tensor out_mean = at::empty({feature_size}, input.options().dtype(scalar_type)); int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE / 32)); int block_x = max(1, min(MAX_BLOCK_SIZE / block_y, h_last_pow2(space_size))); const dim3 block(block_x, block_y); const dim3 grid(feature_size); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( welford_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream, input.data<scalar_t_0>(), out_mean.data<accscalar_t>(), out_var_biased.data<accscalar_t>(), batch_size, feature_size, space_size); ); } return {out_mean, out_var_biased}; } at::Tensor batchnorm_forward_CUDA( const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight, const at::optional<at::Tensor> shift) { const auto batch_size = input.size(0); const auto feature_size = input.size(1); at::Tensor out = at::empty_like(input); auto space_size = get_tensor_spatial_size(input); int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4)); int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4)); const dim3 block(block_x, block_y); int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x)); int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y)); const dim3 grid(feature_size, batch_group_size, grid_z); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( batchnorm_forward_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream, input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<accscalar_t>() : NULL, shift.has_value() ? shift.value().data<accscalar_t>() : NULL, out.data<scalar_t_0>(), space_size, batch_size); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( batchnorm_forward_kernel<scalar_t_0, accscalar_t, scalar_t_0>), dim3(grid), dim3(block), 0, stream, input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<scalar_t_0>() : NULL, shift.has_value() ? shift.value().data<scalar_t_0>() : NULL, out.data<scalar_t_0>(), space_size, batch_size); ); } return out; } std::vector<at::Tensor> reduce_bn_CUDA( const at::Tensor grad_output, const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight) { const auto batch_size = input.size(0); const auto feature_size = input.size(1); auto scalar_type = promote_scalartype(input); at::Tensor mean_dy = at::empty({feature_size}, mean.options()); at::Tensor mean_dy_xmu = at::empty({feature_size}, mean.options()); at::Tensor grad_weight; at::Tensor grad_bias; if (weight.has_value()) { grad_weight = at::empty({feature_size}, weight.value().options()); grad_bias = at::empty({feature_size}, weight.value().options()); } else { grad_weight = at::empty({0}, mean.options()); grad_bias = at::empty({0}, mean.options()); } auto space_size = get_tensor_spatial_size(input); int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE/ 32)); int block_x = max(1, min(MAX_BLOCK_SIZE/ block_y, h_last_pow2(space_size))); const dim3 block(block_x, block_y); const dim3 grid(feature_size); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( reduce_bn_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream, input.data<scalar_t_0>(), grad_output.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), weight.has_value() ? grad_weight.data<accscalar_t>() : NULL, weight.has_value() ? grad_bias.data<accscalar_t>() : NULL, batch_size, feature_size, space_size); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( reduce_bn_kernel<scalar_t_0, accscalar_t, scalar_t_0>), dim3(grid), dim3(block), 0, stream, input.data<scalar_t_0>(), grad_output.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL, weight.has_value() ? grad_bias.data<scalar_t_0>() : NULL, batch_size, feature_size, space_size); ); } return {mean_dy, mean_dy_xmu, grad_weight, grad_bias}; } at::Tensor batchnorm_backward_CUDA( const at::Tensor grad_output, const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight, const at::Tensor mean_dy, const at::Tensor mean_dy_xmu) { const auto batch_size = input.size(0); const auto feature_size = input.size(1); at::Tensor grad_input = at::empty_like(input); auto space_size = get_tensor_spatial_size(input); int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4)); int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4)); const dim3 block(block_x, block_y); int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x)); int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y)); const dim3 grid(feature_size, batch_group_size, grid_z); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( batchnorm_backward_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream, grad_output.data<scalar_t_0>(), input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<accscalar_t>() : NULL, mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), grad_input.data<scalar_t_0>(), space_size, batch_size); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( batchnorm_backward_kernel<scalar_t_0, accscalar_t, scalar_t_0>), dim3(grid), dim3(block), 0, stream, grad_output.data<scalar_t_0>(), input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<scalar_t_0>() : NULL, mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), grad_input.data<scalar_t_0>(), space_size, batch_size); ); } return grad_input; } std::vector<at::Tensor> welford_parallel_CUDA(const at::Tensor mean_feature_nodes, const at::Tensor var_biased, int numel, const float eps) { const auto world_size = mean_feature_nodes.size(0); const auto feature_size = mean_feature_nodes.size(1); at::Tensor out_var = at::empty({feature_size}, var_biased.options()); at::Tensor inv_std = at::empty_like(out_var); at::Tensor out_mean = at::empty_like(out_var); // TODO(jie): tile this for memory coalescing! const int block = ::min(h_last_pow2(feature_size), MAX_BLOCK_SIZE); const int grid = std::max<int>(1, feature_size / block); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); { using namespace at; DISPATCH_FLOAT_AND_HALF(mean_feature_nodes.scalar_type(), 0, "welford_parallel_kernel", hipLaunchKernelGGL(( welford_kernel_parallel<scalar_t_0>), dim3(grid), dim3(block), 0, stream, mean_feature_nodes.data<scalar_t_0>(), var_biased.data<scalar_t_0>(), out_mean.data<scalar_t_0>(), out_var.data<scalar_t_0>(), inv_std.data<scalar_t_0>(), world_size, feature_size, eps, numel); ); } return {out_mean, out_var, inv_std}; } std::vector<at::Tensor> welford_mean_var_c_last_CUDA(const at::Tensor input) { const auto stride = input.size(input.ndimension()-1); const auto reduction_size = input.numel() / stride; auto scalar_type = promote_scalartype(input); auto option = input.options().dtype(scalar_type); at::Tensor out_var_biased = at::empty({stride}, option); at::Tensor out_mean = at::empty({stride}, option); dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid, true); at::Tensor staging_data; at::Tensor semaphores; if (grid.y > 1) { staging_data = at::empty({4*stride*grid.y}, option); semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt)); } auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_c_last", using accscalar_t = at::acc_type<scalar_t_0, true>; accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr; int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr; hipLaunchKernelGGL(( welford_kernel_c_last<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>) , dim3(grid), dim3(block), 0, stream, input.data<scalar_t_0>(), out_mean.data<accscalar_t>(), out_var_biased.data<accscalar_t>(), staging_data_ptr, semaphores_ptr, reduction_size, stride); ); } return {out_mean, out_var_biased}; } at::Tensor batchnorm_forward_c_last_CUDA( const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight, const at::optional<at::Tensor> shift) { const auto stride = input.size(input.ndimension()-1); const auto reduction_size = input.numel() / stride; at::Tensor out = at::empty_like(input); dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>) , dim3(grid), dim3(block), 0, stream, input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<accscalar_t>() : NULL, shift.has_value() ? shift.value().data<accscalar_t>(): NULL, out.data<scalar_t_0>(), reduction_size, stride); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>) , dim3(grid), dim3(block), 0, stream, input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<scalar_t_0>() : NULL, shift.has_value() ? shift.value().data<scalar_t_0>(): NULL, out.data<scalar_t_0>(), reduction_size, stride); ); } return out; } std::vector<at::Tensor> reduce_bn_c_last_CUDA( const at::Tensor grad_output, const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight) { const auto stride = input.size(input.ndimension()-1); const auto reduction_size = input.numel() / stride; at::Tensor mean_dy = at::empty({stride}, mean.options()); at::Tensor mean_dy_xmu = at::empty({stride}, mean.options()); at::Tensor grad_weight; at::Tensor grad_bias; if (weight.has_value()) { grad_weight = at::empty({stride}, weight.value().options()); grad_bias = at::empty({stride}, weight.value().options()); } else { // because I cannot return an uninitialized at::Tensor grad_weight = at::empty({0}, mean.options()); grad_bias = at::empty({0}, mean.options()); } dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid, true); at::Tensor staging_data; at::Tensor semaphores; if (grid.y > 1) { staging_data = at::empty({2*stride*grid.y}, mean.options()); semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt)); } auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce", using accscalar_t = at::acc_type<scalar_t_0, true>; accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr; int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr; hipLaunchKernelGGL(( reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>) , dim3(grid), dim3(block), 0, stream, input.data<scalar_t_0>(), grad_output.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), weight.has_value() ? grad_weight.data<accscalar_t>() : NULL, weight.has_value() ?grad_bias.data<accscalar_t>() : NULL, staging_data_ptr, semaphores_ptr, reduction_size, stride); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce", using accscalar_t = at::acc_type<scalar_t_0, true>; accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr; int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr; hipLaunchKernelGGL(( reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>) , dim3(grid), dim3(block), 0, stream, input.data<scalar_t_0>(), grad_output.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL, weight.has_value() ?grad_bias.data<scalar_t_0>() : NULL, staging_data_ptr, semaphores_ptr, reduction_size, stride); ); } return {mean_dy, mean_dy_xmu, grad_weight, grad_bias}; } at::Tensor batchnorm_backward_c_last_CUDA( const at::Tensor grad_output, const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight, const at::Tensor mean_dy, const at::Tensor mean_dy_xmu) { const auto stride = input.size(input.ndimension()-1); const auto reduction_size = input.numel() / stride; at::Tensor grad_input = at::empty_like(input); dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>) , dim3(grid), dim3(block), 0, stream, grad_output.data<scalar_t_0>(), input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<accscalar_t>() : NULL, mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), grad_input.data<scalar_t_0>(), reduction_size, stride); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>) , dim3(grid), dim3(block), 0, stream, grad_output.data<scalar_t_0>(), input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<scalar_t_0>() : NULL, mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), grad_input.data<scalar_t_0>(), reduction_size, stride); ); } return grad_input; }
5cc6ff591d85445f9c70cc29d50eecd2f9bfae95.cu
#include <iostream> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include "type_shim.h" __device__ __forceinline__ int lastpow2(int n) { int out = 1 << (31 - __clz(n)); if(n == out) out >>= 1; return out; } __host__ __forceinline__ int h_next_pow2(unsigned int n) { n--; n |= (n >> 1); n |= (n >> 2); n |= (n >> 4); n |= (n >> 8); n |= (n >> 16); return ++n; } __host__ __forceinline__ int h_last_pow2(unsigned int n) { n |= (n >> 1); n |= (n >> 2); n |= (n >> 4); n |= (n >> 8); n |= (n >> 16); return n - (n >> 1); } #define WARP_SIZE 32 template<typename T> __device__ __forceinline__ T warp_reduce_sum(T val) { #pragma unroll for(int i = WARP_SIZE/2; i > 0; i >>= 1) val = val + __shfl_down_sync(0xffffffff, val, i); return val; } template<typename T> __device__ __forceinline__ T reduce_block(T *x, T val) { int tid = threadIdx.y*blockDim.x + threadIdx.x; int blockSize = blockDim.x * blockDim.y; if (blockSize > 32) { val = warp_reduce_sum(val); if (tid % WARP_SIZE == 0) x[tid/WARP_SIZE] = val; __syncthreads(); val = (tid < blockSize / WARP_SIZE? x[tid%WARP_SIZE] : T(0)); } if(tid/WARP_SIZE==0) val = warp_reduce_sum(val); return val; } #define ELEMENTS_PER_ITER 4 // enables concurrency within each thread to hide latency #define ELEMENTS_PER_THREAD 16 #define OPTIMAL_TILE_W 32 #define MAX_H_BLOCK 128 #define MAX_BLOCK_SIZE 512 __host__ int div_ru(int x, int y) { return h_last_pow2(1 + (x-1)/y); } __host__ void flexible_launch_configs( const int reduction, const int stride, dim3 &block, dim3 &grid, const bool coop_flag = false) { int block_x = std::min(h_last_pow2(stride), OPTIMAL_TILE_W); int block_y = std::min(h_last_pow2(div_ru(reduction , ELEMENTS_PER_THREAD)), MAX_BLOCK_SIZE / block_x); if (block_x * block_y != MAX_BLOCK_SIZE) { block_x = std::min(h_last_pow2(stride), MAX_BLOCK_SIZE / block_y); } int grid_x = div_ru(stride, block_x); int grid_y = std::min(div_ru(reduction, block_y * ELEMENTS_PER_THREAD), MAX_H_BLOCK); if (coop_flag) { // it's not worth having a grid reduction if the reduction dimension is not big enough grid_y = grid_y < 8 ? 1 : grid_y; } block.x = block_x; block.y = block_y; block.z = 1; grid.x = grid_x; grid.y = grid_y; grid.z = 1; } template<typename T, typename C> __device__ __forceinline__ void welford_merge_element(C& count, T& mean, T& m2n, const C& num_new, const T& mean_new, const T& m2n_new) { T factor = T(1.0) / max(1, (count + num_new)); T delta0 = mean - mean_new; mean = (mean_new * num_new + mean * count) * factor; m2n += m2n_new + delta0 * delta0 * num_new * count * factor; count += num_new; } template<typename T> __device__ __forceinline__ void warp_reduce_mean_m2n(T &mean, T &m2n, int &num) { #pragma unroll for(int i = WARP_SIZE/2; i > 0; i >>= 1) { auto num_new = __shfl_down_sync(0xffffffff, num, i); auto mean_new = __shfl_down_sync(0xffffffff, mean, i); auto m2n_new = __shfl_down_sync(0xffffffff, m2n, i); welford_merge_element(num, mean, m2n, num_new, mean_new, m2n_new); } } template <typename T> __device__ void welford_reduce_mean_m2n( T* __restrict__ x, int* __restrict__ count, T &mean, T &m2n, int &num, int block_size, int thread_id) { int lane = thread_id % WARP_SIZE; int wid = thread_id / WARP_SIZE; if (block_size > 32) { warp_reduce_mean_m2n(mean, m2n, num); if (lane == 0) { x[wid*2] = mean; x[wid*2+1] = m2n; count[wid] = num; } __syncthreads(); if (wid == 0) { mean = (thread_id < block_size / WARP_SIZE)? x[lane*2] : T(0); m2n = (thread_id < block_size / WARP_SIZE)? x[lane*2+1] : T(0); num = (thread_id < block_size / WARP_SIZE)? count[lane] : int(0); } } if (wid==0) warp_reduce_mean_m2n(mean, m2n, num); return; } // return spatial size for NC+ Tensors __host__ int get_tensor_spatial_size(const at::Tensor& input) { auto space_size = input.size(2); for (int i = 3; i < input.ndimension(); i++) { space_size *= input.size(i); } return space_size; } // promote accumulation scalar type. promote half to float. __host__ at::ScalarType promote_scalartype(const at::Tensor& input) { return input.scalar_type() == at::ScalarType::Half ? at::ScalarType::Float : input.scalar_type(); } // return single element size, optional accumulation type promotion. __host__ size_t get_element_data_size(const at::Tensor& input, bool accumulation = false) { auto scalar_type = accumulation ? promote_scalartype(input) : input.scalar_type(); return at::elementSize(scalar_type); } template<typename T, typename C> __device__ __forceinline__ void welford_merge_block_vertical(C& count, T& mean, T& m2n, C* shmem_count, T* shmem_mean, T* shmem_m2n) { // write to shared memory auto address_base = threadIdx.x + threadIdx.y * blockDim.x; shmem_mean[address_base] = mean; shmem_m2n[address_base] = m2n; shmem_count[address_base] = count; #pragma unroll for (int offset = blockDim.y/2; offset > 0; offset >>= 1) { __syncthreads(); if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { auto address = address_base + offset * blockDim.x; // read shared memory back to register for reduction auto num_new = shmem_count[address]; auto mean_new = shmem_mean[address]; auto m2n_new = shmem_m2n[address]; welford_merge_element(count, mean, m2n, num_new, mean_new, m2n_new); // last write is not necessary shmem_mean[address_base] = mean; shmem_m2n[address_base] = m2n; shmem_count[address_base] = count; } } } template<typename T> __device__ __forceinline__ void merge_block_vertical(T& sum_dy, T& sum_dy_xmu, T* shmem_sum_dy, T* shmem_sum_dy_xmu) { // write to shared memory auto address_base = threadIdx.x + threadIdx.y * blockDim.x; shmem_sum_dy[address_base] = sum_dy; shmem_sum_dy_xmu[address_base] = sum_dy_xmu; #pragma unroll for (int offset = blockDim.y/2; offset > 0; offset >>= 1) { __syncthreads(); if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { auto address = address_base + offset * blockDim.x; sum_dy += shmem_sum_dy[address]; sum_dy_xmu += shmem_sum_dy_xmu[address]; // last write is not necessary shmem_sum_dy[address_base] = sum_dy; shmem_sum_dy_xmu[address_base] = sum_dy_xmu; } } } // welford kernel calculating mean/biased_variance/unbiased_variance template <typename scalar_t, typename accscalar_t, typename outscalar_t> __global__ void welford_kernel( const scalar_t* __restrict__ input, outscalar_t* __restrict__ out_mean, outscalar_t* __restrict__ out_var_biased, const int bs, const int fs, const int ss) { int block_size = blockDim.x * blockDim.y; int count = 0; accscalar_t x_mean = accscalar_t(0); accscalar_t m_2_n = accscalar_t(0); int thread_id = threadIdx.y*blockDim.x + threadIdx.x; for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) { int input_base = blockIdx.x*ss + batch_id*ss*fs; // sequential welford for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) { count++; auto x_n = static_cast<accscalar_t>(input[offset+input_base]); auto d = x_n - x_mean; x_mean += d / count; m_2_n += d * (x_n - x_mean); } } static __shared__ int s_mem[160]; accscalar_t* s_mem_ac = (accscalar_t*) &s_mem[32]; welford_reduce_mean_m2n<accscalar_t>(s_mem_ac, s_mem, x_mean, m_2_n, count, block_size, thread_id); if (thread_id == 0) { out_mean[blockIdx.x] = static_cast<outscalar_t>(x_mean); out_var_biased[blockIdx.x] = static_cast<outscalar_t>(m_2_n/count); } } // elementwise BN kernel template <typename scalar_t, typename accscalar_t, typename layerscalar_t> __global__ void batchnorm_forward_kernel( const scalar_t* __restrict__ input, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, const layerscalar_t* __restrict__ weight, const layerscalar_t* __restrict__ shift, scalar_t* __restrict__ out, const int ss, const int bs) { auto m_c = mean[blockIdx.x]; auto inv_std_c = inv_std[blockIdx.x]; auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x]); auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[blockIdx.x]); for (int batch_offset = blockIdx.y*blockDim.y + threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) { int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss; for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) { out[address_base+offset] = static_cast<scalar_t>(w_c * (static_cast<accscalar_t>(input[address_base+offset]) - m_c ) * inv_std_c + s_c); } } } // Backward BN kernel, calculates grad_bias, grad_weight as well as intermediate // results to calculating grad_input. // Breaking the grad_input to two step to support sync BN, which requires all // reduce of the intermediate results across processes. template <typename scalar_t, typename accscalar_t, typename layerscalar_t> __global__ void reduce_bn_kernel( const scalar_t* __restrict__ input, const scalar_t* __restrict__ grad_output, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, accscalar_t* __restrict__ mean_dy, accscalar_t* __restrict__ mean_dy_xmu, layerscalar_t* __restrict__ grad_weight, layerscalar_t* __restrict__ grad_bias, const int bs, const int fs, const int ss) { static __shared__ int s_mem[64]; int total_item_num = bs * ss; int thread_id = threadIdx.y*blockDim.x + threadIdx.x; auto r_mean = mean[blockIdx.x]; auto factor = inv_std[blockIdx.x]; // Kahan sum accscalar_t sum_dy = 0.0; accscalar_t sum_dy_xmu = 0.0; accscalar_t sum_dy_c = 0.0; accscalar_t sum_dy_xmu_c = 0.0; for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) { int input_base = blockIdx.x*ss + batch_id*ss*fs; for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) { auto e_grad = static_cast<accscalar_t>(grad_output[offset+input_base]); auto e_input = static_cast<accscalar_t>(input[offset+input_base]); // calculating sum_dy auto sum_dy_y = e_grad - sum_dy_c; auto sum_dy_t = sum_dy + sum_dy_y; sum_dy_c = (sum_dy_t - sum_dy) - sum_dy_y; sum_dy = sum_dy_t; // calculating sum_dy_xmu auto sum_dy_xmu_y = e_grad * (e_input - r_mean) - sum_dy_xmu_c; auto sum_dy_xmu_t = sum_dy_xmu + sum_dy_xmu_y; sum_dy_xmu_c = (sum_dy_xmu_t - sum_dy_xmu) - sum_dy_xmu_y; sum_dy_xmu = sum_dy_xmu_t; } } sum_dy = reduce_block((accscalar_t*)s_mem, sum_dy); __syncthreads(); sum_dy_xmu = reduce_block((accscalar_t*)s_mem, sum_dy_xmu); if (thread_id == 0) { if (grad_bias != NULL) { grad_bias[blockIdx.x] = static_cast<layerscalar_t>(sum_dy); } if (grad_weight != NULL) { grad_weight[blockIdx.x] = static_cast<layerscalar_t>(sum_dy_xmu * factor); } mean_dy[blockIdx.x] = sum_dy / total_item_num; mean_dy_xmu[blockIdx.x] = sum_dy_xmu / total_item_num; } } // elementwise backward BN kernel template <typename scalar_t, typename accscalar_t, typename layerscalar_t> __global__ void batchnorm_backward_kernel( const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, const layerscalar_t* __restrict__ weight, const accscalar_t* __restrict__ mean_dy, const accscalar_t* __restrict__ mean_dy_xmu, scalar_t* __restrict__ grad_input, const int ss, const int bs) { auto m_c = static_cast<accscalar_t>(mean[blockIdx.x]); auto m_dy_c = static_cast<accscalar_t>(mean_dy[blockIdx.x]); auto factor_1_c = inv_std[blockIdx.x]; auto factor_2_c = (weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x])) * factor_1_c; factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[blockIdx.x]; for (int batch_offset = blockIdx.y*blockDim.y+threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) { int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss; for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) { grad_input[address_base+offset] = (static_cast<accscalar_t>(grad_output[address_base+offset]) - m_dy_c - (static_cast<accscalar_t>(input[address_base+offset]) - m_c) * factor_1_c) * factor_2_c; } } } // welford kernel for c last tensor calculating mean/biased_variance/unbiased_variance template <typename scalar_t, typename accscalar_t, typename outscalar_t, int PARALLEL_LOADS> __global__ void welford_kernel_c_last( const scalar_t* __restrict__ input, outscalar_t* __restrict__ out_mean, outscalar_t* __restrict__ out_var_biased, volatile accscalar_t* staging_data, int* semaphores, const int reduction_size, const int stride) { // hide latency with concurrency accscalar_t x_mean[PARALLEL_LOADS]; accscalar_t m_2_n[PARALLEL_LOADS]; int count[PARALLEL_LOADS]; #pragma unroll for (int i = 0; i < PARALLEL_LOADS; i++) { x_mean[i] = accscalar_t(0); m_2_n[i] = accscalar_t(0); count[i] = accscalar_t(0); } // tensor dimension (m,c) // loop along m dimension int inner_loop_stride = blockDim.y * gridDim.y; // offset along m dimension int m_offset = blockIdx.y * blockDim.y + threadIdx.y; int c_offset = blockIdx.x * blockDim.x + threadIdx.x; int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); int address_base = m_offset * stride + c_offset; int address_increment = inner_loop_stride * stride; for (int i = 0; i < loop_count; i++) { accscalar_t x_math[PARALLEL_LOADS]; accscalar_t x_count_inv[PARALLEL_LOADS]; accscalar_t is_valid[PARALLEL_LOADS]; // load multiple data in #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { if (c_offset < stride && m_offset < reduction_size) { x_math[j] = input[address_base]; count[j]++; x_count_inv[j] = accscalar_t(1) / count[j]; is_valid[j] = accscalar_t(1); } else { x_math[j] = accscalar_t(0); x_count_inv[j] = accscalar_t(0); is_valid[j] = accscalar_t(0); } m_offset += inner_loop_stride; address_base += address_increment; } // calculate mean/m2n with welford #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { accscalar_t delta0 = x_math[j] - x_mean[j]; x_mean[j] += delta0 * x_count_inv[j]; accscalar_t delta1 = x_math[j] - x_mean[j]; m_2_n[j] += delta0 * delta1 * is_valid[j]; } } // thread reduction to accumulate mean/m_2_n/count between PARALLEL_LOADS #pragma unroll for (int j = 1; j < PARALLEL_LOADS; j++) { welford_merge_element(count[0], x_mean[0], m_2_n[0], count[j], x_mean[j], m_2_n[j]); } // release x_mean / m_2_n auto mean_th = x_mean[0]; auto m2_th = m_2_n[0]; auto count_th = count[0]; // block-wise reduction with shared memory (since reduction cannot be done within a warp) static __shared__ accscalar_t shmem_mean[MAX_BLOCK_SIZE]; static __shared__ accscalar_t shmem_m2n[MAX_BLOCK_SIZE]; static __shared__ int shmem_count[MAX_BLOCK_SIZE]; welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n); // grid reduction if needed (coop launch used at the first place) if (gridDim.y > 1) { volatile accscalar_t* staging_mean = staging_data; volatile accscalar_t* staging_m2n = &staging_data[stride*gridDim.y]; volatile int* staging_count = reinterpret_cast<volatile int*>(&staging_m2n[stride*gridDim.y]); address_base = c_offset + blockIdx.y * stride; // write data to staging_data; if (threadIdx.y == 0 && c_offset < stride) { staging_mean[address_base] = mean_th; staging_m2n[address_base] = m2_th; staging_count[address_base] = count_th; } __threadfence(); __syncthreads(); // ensuring writes to staging_ is visible to all blocks __shared__ bool is_last_block_done; // mark block done if (threadIdx.x == 0 && threadIdx.y == 0) { int old = atomicAdd(&semaphores[blockIdx.x], 1); is_last_block_done = (old == (gridDim.y-1)); } __syncthreads(); // check that all data is now available in global memory if (is_last_block_done) { count_th = 0; mean_th = accscalar_t(0.0); m2_th = accscalar_t(0.0); for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) { address_base = c_offset + y * stride; int num_new = c_offset < stride ? staging_count[address_base] : 0; accscalar_t mean_new = c_offset < stride ? staging_mean[address_base] : accscalar_t(0.0); accscalar_t m2n_new = c_offset < stride ? staging_m2n[address_base] : accscalar_t(0.0); welford_merge_element(count_th, mean_th, m2_th, num_new, mean_new, m2n_new); } welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n); if (threadIdx.y == 0 && c_offset < stride) { out_mean[c_offset] = static_cast<outscalar_t>(mean_th); out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th); } } } else { if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) { out_mean[c_offset] = static_cast<outscalar_t>(mean_th); out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th); } } } // parallel welford kernel to further reduce mean / biased_var // into mean / unbiased_var / inv_std across multiple processes. template <typename scalar_t> __global__ void welford_kernel_parallel( const scalar_t* __restrict__ mean, const scalar_t* __restrict__ var_biased, scalar_t* __restrict__ out_mean, scalar_t* __restrict__ out_var, scalar_t* __restrict__ inv_std, const int world_size, const int feature_size, const float eps, const int numel) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < feature_size; i += gridDim.x * blockDim.x) { // load data; int address = i; scalar_t x_mean = 0; scalar_t m_2_n = 0; int count = 0; for (int j = 0; j < world_size; j++) { welford_merge_element(count, x_mean, m_2_n, numel, mean[address], var_biased[address]*numel); address += feature_size; } out_mean[i] = x_mean; out_var[i] = m_2_n/ (count - 1); inv_std[i] = scalar_t(1) / sqrt(m_2_n/count + eps); } } // elementwise BN kernel template < typename scalar_t, typename accscalar_t, typename layerscalar_t, int PARALLEL_LOADS> __global__ void batchnorm_forward_c_last_kernel( const scalar_t* __restrict__ input, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, const layerscalar_t* __restrict__ weight, const layerscalar_t* __restrict__ shift, scalar_t* __restrict__ out, const int reduction_size, const int stride) { // tensor dimension (m,c) // loop along m dimension int inner_loop_stride = blockDim.y * gridDim.y; // offset along m dimension int m_offset = blockIdx.y * blockDim.y + threadIdx.y; int c_offset = blockIdx.x * blockDim.x + threadIdx.x; auto m_c = mean[c_offset]; auto inv_std_c = static_cast<accscalar_t>(inv_std[c_offset]); auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset]); auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[c_offset]); int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); int address_base = m_offset * stride + c_offset; int address_increment = inner_loop_stride * stride; for (int i = 0; i < loop_count; i++) { #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { if (c_offset < stride && m_offset < reduction_size) { out[address_base] = static_cast<scalar_t>( w_c * (static_cast<accscalar_t>(input[address_base]) - m_c ) * inv_std_c + s_c ); } m_offset += inner_loop_stride; address_base += address_increment; } } } // batchnorm backward kernel for c last tensor template <typename scalar_t, typename accscalar_t, typename layerscalar_t, int PARALLEL_LOADS> __global__ void reduce_bn_c_last_kernel( const scalar_t* __restrict__ input, const scalar_t* __restrict__ grad_output, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, accscalar_t* __restrict__ mean_dy, accscalar_t* __restrict__ mean_dy_xmu, layerscalar_t* __restrict__ grad_weight, layerscalar_t* __restrict__ grad_bias, volatile accscalar_t* staging_data, int* semaphores, const int reduction_size, const int stride) { // hide latency with concurrency accscalar_t sum_dy[PARALLEL_LOADS]; accscalar_t sum_dy_xmu[PARALLEL_LOADS]; #pragma unroll for (int i = 0; i < PARALLEL_LOADS; i++) { sum_dy[i] = accscalar_t(0); sum_dy_xmu[i] = accscalar_t(0); } // tensor dimension (m,c) // loop along m dimension int inner_loop_stride = blockDim.y * gridDim.y; // offset along m dimension int m_offset = blockIdx.y * blockDim.y + threadIdx.y; int c_offset = blockIdx.x * blockDim.x + threadIdx.x; int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); int address_base = m_offset * stride + c_offset; int address_increment = inner_loop_stride * stride; auto r_mean = mean[c_offset]; auto factor = inv_std[c_offset]; for (int i = 0; i < loop_count; i++) { accscalar_t x_input[PARALLEL_LOADS]; accscalar_t x_grad_output[PARALLEL_LOADS]; // load multiple data in #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { if (c_offset < stride && m_offset < reduction_size) { x_input[j] = input[address_base]; x_grad_output[j] = grad_output[address_base]; } else { x_input[j] = accscalar_t(0); x_grad_output[j] = accscalar_t(0); } m_offset += inner_loop_stride; address_base += address_increment; } // calculate sum_dy / sum_dy_xmu #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { sum_dy[j] += x_grad_output[j]; sum_dy_xmu[j] += x_grad_output[j] * (x_input[j] - r_mean); } } // thread reduction to accumulate sum_dy / sum_dy_xmu between PARALLEL_LOADS #pragma unroll for (int j = 1; j < PARALLEL_LOADS; j++) { sum_dy[0] += sum_dy[j]; sum_dy_xmu[0] += sum_dy_xmu[j]; } // release array of registers auto sum_dy_th = sum_dy[0]; auto sum_dy_xmu_th = sum_dy_xmu[0]; // block-wise reduction with shared memory (since reduction cannot be done within a warp) static __shared__ accscalar_t shmem_sum_dy[MAX_BLOCK_SIZE]; static __shared__ accscalar_t shmem_sum_dy_xmu[MAX_BLOCK_SIZE]; merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu); // grid reduction if needed (coop launch used at the first place) if (gridDim.y > 1) { volatile accscalar_t* staging_sum_dy = staging_data; volatile accscalar_t* staging_sum_dy_xmu = &staging_data[stride*gridDim.y]; address_base = c_offset + blockIdx.y * stride; // write data to staging_data; if (threadIdx.y == 0 && c_offset < stride) { staging_sum_dy[address_base] = sum_dy_th; staging_sum_dy_xmu[address_base] = sum_dy_xmu_th; } __threadfence(); __syncthreads(); // ensuring writes to staging_ is visible to all blocks __shared__ bool is_last_block_done; // mark block done if (threadIdx.x == 0 && threadIdx.y == 0) { int old = atomicAdd(&semaphores[blockIdx.x], 1); is_last_block_done = (old == (gridDim.y-1)); } __syncthreads(); // check that all data is now available in global memory if (is_last_block_done) { sum_dy_th = accscalar_t(0.0); sum_dy_xmu_th = accscalar_t(0.0); for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) { address_base = c_offset + y * stride; sum_dy_th += (c_offset < stride ? staging_sum_dy[address_base] : accscalar_t(0.0)); sum_dy_xmu_th += (c_offset < stride ? staging_sum_dy_xmu[address_base] : accscalar_t(0.0)); } merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu); if (threadIdx.y == 0 && c_offset < stride) { if (grad_bias != NULL) { grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th); } if (grad_weight != NULL) { grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor); } mean_dy[c_offset] = sum_dy_th / reduction_size; mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size; } } } else { if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) { if (grad_bias != NULL) { grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th); } if (grad_weight != NULL) { grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor); } mean_dy[c_offset] = sum_dy_th / reduction_size; mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size; } } } // elementwise BN kernel template < typename scalar_t, typename accscalar_t, typename layerscalar_t, int PARALLEL_LOADS> __global__ void batchnorm_backward_c_last_kernel( const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const accscalar_t* __restrict__ mean, const accscalar_t* __restrict__ inv_std, const layerscalar_t* __restrict__ weight, const accscalar_t* __restrict__ mean_dy, const accscalar_t* __restrict__ mean_dy_xmu, scalar_t* __restrict__ grad_input, const int reduction_size, const int stride) { // tensor dimension (m,c) // loop along m dimension int inner_loop_stride = blockDim.y * gridDim.y; // offset along m dimension int m_offset = blockIdx.y * blockDim.y + threadIdx.y; int c_offset = blockIdx.x * blockDim.x + threadIdx.x; auto m_c = mean[c_offset]; auto m_dy_c = mean_dy[c_offset]; auto factor_1_c = inv_std[c_offset]; auto factor_2_c = (weight == NULL? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset])) * factor_1_c; factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[c_offset]; int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); int address_base = m_offset * stride + c_offset; int address_increment = inner_loop_stride * stride; for (int i = 0; i < loop_count; i++) { #pragma unroll for (int j = 0; j < PARALLEL_LOADS; j++) { if (c_offset < stride && m_offset < reduction_size) { grad_input[address_base] = static_cast<scalar_t>( (static_cast<accscalar_t>(grad_output[address_base]) - m_dy_c - (static_cast<accscalar_t>(input[address_base]) - m_c) * factor_1_c) * factor_2_c); } m_offset += inner_loop_stride; address_base += address_increment; } } } std::vector<at::Tensor> welford_mean_var_CUDA(const at::Tensor input) { const auto batch_size = input.size(0); const auto feature_size = input.size(1); auto space_size = get_tensor_spatial_size(input); auto scalar_type = promote_scalartype(input); at::Tensor out_var_biased = at::empty({feature_size}, input.options().dtype(scalar_type)); at::Tensor out_mean = at::empty({feature_size}, input.options().dtype(scalar_type)); int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE / 32)); int block_x = max(1, min(MAX_BLOCK_SIZE / block_y, h_last_pow2(space_size))); const dim3 block(block_x, block_y); const dim3 grid(feature_size); auto stream = at::cuda::getCurrentCUDAStream(); { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; welford_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>( input.data<scalar_t_0>(), out_mean.data<accscalar_t>(), out_var_biased.data<accscalar_t>(), batch_size, feature_size, space_size); ); } return {out_mean, out_var_biased}; } at::Tensor batchnorm_forward_CUDA( const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight, const at::optional<at::Tensor> shift) { const auto batch_size = input.size(0); const auto feature_size = input.size(1); at::Tensor out = at::empty_like(input); auto space_size = get_tensor_spatial_size(input); int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4)); int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4)); const dim3 block(block_x, block_y); int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x)); int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y)); const dim3 grid(feature_size, batch_group_size, grid_z); auto stream = at::cuda::getCurrentCUDAStream(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; batchnorm_forward_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>( input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<accscalar_t>() : NULL, shift.has_value() ? shift.value().data<accscalar_t>() : NULL, out.data<scalar_t_0>(), space_size, batch_size); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; batchnorm_forward_kernel<scalar_t_0, accscalar_t, scalar_t_0><<<grid, block, 0, stream>>>( input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<scalar_t_0>() : NULL, shift.has_value() ? shift.value().data<scalar_t_0>() : NULL, out.data<scalar_t_0>(), space_size, batch_size); ); } return out; } std::vector<at::Tensor> reduce_bn_CUDA( const at::Tensor grad_output, const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight) { const auto batch_size = input.size(0); const auto feature_size = input.size(1); auto scalar_type = promote_scalartype(input); at::Tensor mean_dy = at::empty({feature_size}, mean.options()); at::Tensor mean_dy_xmu = at::empty({feature_size}, mean.options()); at::Tensor grad_weight; at::Tensor grad_bias; if (weight.has_value()) { grad_weight = at::empty({feature_size}, weight.value().options()); grad_bias = at::empty({feature_size}, weight.value().options()); } else { grad_weight = at::empty({0}, mean.options()); grad_bias = at::empty({0}, mean.options()); } auto space_size = get_tensor_spatial_size(input); int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE/ 32)); int block_x = max(1, min(MAX_BLOCK_SIZE/ block_y, h_last_pow2(space_size))); const dim3 block(block_x, block_y); const dim3 grid(feature_size); auto stream = at::cuda::getCurrentCUDAStream(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce", using accscalar_t = at::acc_type<scalar_t_0, true>; reduce_bn_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>( input.data<scalar_t_0>(), grad_output.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), weight.has_value() ? grad_weight.data<accscalar_t>() : NULL, weight.has_value() ? grad_bias.data<accscalar_t>() : NULL, batch_size, feature_size, space_size); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce", using accscalar_t = at::acc_type<scalar_t_0, true>; reduce_bn_kernel<scalar_t_0, accscalar_t, scalar_t_0><<<grid, block, 0, stream>>>( input.data<scalar_t_0>(), grad_output.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL, weight.has_value() ? grad_bias.data<scalar_t_0>() : NULL, batch_size, feature_size, space_size); ); } return {mean_dy, mean_dy_xmu, grad_weight, grad_bias}; } at::Tensor batchnorm_backward_CUDA( const at::Tensor grad_output, const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight, const at::Tensor mean_dy, const at::Tensor mean_dy_xmu) { const auto batch_size = input.size(0); const auto feature_size = input.size(1); at::Tensor grad_input = at::empty_like(input); auto space_size = get_tensor_spatial_size(input); int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4)); int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4)); const dim3 block(block_x, block_y); int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x)); int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y)); const dim3 grid(feature_size, batch_group_size, grid_z); auto stream = at::cuda::getCurrentCUDAStream(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward", using accscalar_t = at::acc_type<scalar_t_0, true>; batchnorm_backward_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>( grad_output.data<scalar_t_0>(), input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<accscalar_t>() : NULL, mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), grad_input.data<scalar_t_0>(), space_size, batch_size); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward", using accscalar_t = at::acc_type<scalar_t_0, true>; batchnorm_backward_kernel<scalar_t_0, accscalar_t, scalar_t_0><<<grid, block, 0, stream>>>( grad_output.data<scalar_t_0>(), input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<scalar_t_0>() : NULL, mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), grad_input.data<scalar_t_0>(), space_size, batch_size); ); } return grad_input; } std::vector<at::Tensor> welford_parallel_CUDA(const at::Tensor mean_feature_nodes, const at::Tensor var_biased, int numel, const float eps) { const auto world_size = mean_feature_nodes.size(0); const auto feature_size = mean_feature_nodes.size(1); at::Tensor out_var = at::empty({feature_size}, var_biased.options()); at::Tensor inv_std = at::empty_like(out_var); at::Tensor out_mean = at::empty_like(out_var); // TODO(jie): tile this for memory coalescing! const int block = std::min(h_last_pow2(feature_size), MAX_BLOCK_SIZE); const int grid = std::max<int>(1, feature_size / block); auto stream = at::cuda::getCurrentCUDAStream(); { using namespace at; DISPATCH_FLOAT_AND_HALF(mean_feature_nodes.scalar_type(), 0, "welford_parallel_kernel", welford_kernel_parallel<scalar_t_0><<<grid, block, 0, stream>>>( mean_feature_nodes.data<scalar_t_0>(), var_biased.data<scalar_t_0>(), out_mean.data<scalar_t_0>(), out_var.data<scalar_t_0>(), inv_std.data<scalar_t_0>(), world_size, feature_size, eps, numel); ); } return {out_mean, out_var, inv_std}; } std::vector<at::Tensor> welford_mean_var_c_last_CUDA(const at::Tensor input) { const auto stride = input.size(input.ndimension()-1); const auto reduction_size = input.numel() / stride; auto scalar_type = promote_scalartype(input); auto option = input.options().dtype(scalar_type); at::Tensor out_var_biased = at::empty({stride}, option); at::Tensor out_mean = at::empty({stride}, option); dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid, true); at::Tensor staging_data; at::Tensor semaphores; if (grid.y > 1) { staging_data = at::empty({4*stride*grid.y}, option); semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt)); } auto stream = at::cuda::getCurrentCUDAStream(); { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_c_last", using accscalar_t = at::acc_type<scalar_t_0, true>; accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr; int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr; welford_kernel_c_last<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER> <<<grid, block, 0, stream>>>( input.data<scalar_t_0>(), out_mean.data<accscalar_t>(), out_var_biased.data<accscalar_t>(), staging_data_ptr, semaphores_ptr, reduction_size, stride); ); } return {out_mean, out_var_biased}; } at::Tensor batchnorm_forward_c_last_CUDA( const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight, const at::optional<at::Tensor> shift) { const auto stride = input.size(input.ndimension()-1); const auto reduction_size = input.numel() / stride; at::Tensor out = at::empty_like(input); dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid); auto stream = at::cuda::getCurrentCUDAStream(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER> <<<grid, block, 0, stream>>>( input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<accscalar_t>() : NULL, shift.has_value() ? shift.value().data<accscalar_t>(): NULL, out.data<scalar_t_0>(), reduction_size, stride); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER> <<<grid, block, 0, stream>>>( input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<scalar_t_0>() : NULL, shift.has_value() ? shift.value().data<scalar_t_0>(): NULL, out.data<scalar_t_0>(), reduction_size, stride); ); } return out; } std::vector<at::Tensor> reduce_bn_c_last_CUDA( const at::Tensor grad_output, const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight) { const auto stride = input.size(input.ndimension()-1); const auto reduction_size = input.numel() / stride; at::Tensor mean_dy = at::empty({stride}, mean.options()); at::Tensor mean_dy_xmu = at::empty({stride}, mean.options()); at::Tensor grad_weight; at::Tensor grad_bias; if (weight.has_value()) { grad_weight = at::empty({stride}, weight.value().options()); grad_bias = at::empty({stride}, weight.value().options()); } else { // because I cannot return an uninitialized at::Tensor grad_weight = at::empty({0}, mean.options()); grad_bias = at::empty({0}, mean.options()); } dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid, true); at::Tensor staging_data; at::Tensor semaphores; if (grid.y > 1) { staging_data = at::empty({2*stride*grid.y}, mean.options()); semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt)); } auto stream = at::cuda::getCurrentCUDAStream(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce", using accscalar_t = at::acc_type<scalar_t_0, true>; accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr; int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr; reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER> <<<grid, block, 0, stream>>>( input.data<scalar_t_0>(), grad_output.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), weight.has_value() ? grad_weight.data<accscalar_t>() : NULL, weight.has_value() ?grad_bias.data<accscalar_t>() : NULL, staging_data_ptr, semaphores_ptr, reduction_size, stride); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce", using accscalar_t = at::acc_type<scalar_t_0, true>; accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr; int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr; reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER> <<<grid, block, 0, stream>>>( input.data<scalar_t_0>(), grad_output.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL, weight.has_value() ?grad_bias.data<scalar_t_0>() : NULL, staging_data_ptr, semaphores_ptr, reduction_size, stride); ); } return {mean_dy, mean_dy_xmu, grad_weight, grad_bias}; } at::Tensor batchnorm_backward_c_last_CUDA( const at::Tensor grad_output, const at::Tensor input, const at::Tensor mean, const at::Tensor inv_std, const at::optional<at::Tensor> weight, const at::Tensor mean_dy, const at::Tensor mean_dy_xmu) { const auto stride = input.size(input.ndimension()-1); const auto reduction_size = input.numel() / stride; at::Tensor grad_input = at::empty_like(input); dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid); auto stream = at::cuda::getCurrentCUDAStream(); if (input.scalar_type() == at::ScalarType::Half && weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) { using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER> <<<grid, block, 0, stream>>>( grad_output.data<scalar_t_0>(), input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<accscalar_t>() : NULL, mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), grad_input.data<scalar_t_0>(), reduction_size, stride); ); } else { if (weight.has_value()) { AT_CHECK(input.scalar_type() == weight.value().scalar_type(), "input.scalar_type() is not supported with weight.scalar_type()"); } using namespace at; DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward", using accscalar_t = at::acc_type<scalar_t_0, true>; batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER> <<<grid, block, 0, stream>>>( grad_output.data<scalar_t_0>(), input.data<scalar_t_0>(), mean.data<accscalar_t>(), inv_std.data<accscalar_t>(), weight.has_value() ? weight.value().data<scalar_t_0>() : NULL, mean_dy.data<accscalar_t>(), mean_dy_xmu.data<accscalar_t>(), grad_input.data<scalar_t_0>(), reduction_size, stride); ); } return grad_input; }
d09ccdf3d32ee59d5707e20073f01f5568b88b6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This file contains all methods related to image post-processing using CUDA */ /// <summary> /// Erodes a given image which stored in linear device memory 'd_imageArray' /// Uses global memory to fetch array element and writes the result to the secondary memory space /// 'r_imageArray' /// </summary> __global__ void erodeImageCUDA(unsigned char *d_imageArray, unsigned char *r_imageArray) { // get current idx in array int idx = (blockIdx.x * blockDim.x) + threadIdx.x; // idx >= width -> ensures that the pixels 0-639 do not get operated on i.e. top row // idx <= (height * width) - width -> ensures that bottom row does not get operated on // width && idx % width != 0 -> ensures that left most column does not get operated on // idx % width != 639 -> ensures that right most column does not get operated on if (d_imageArray[idx] == 1) { // Pixel above // Pixel below // Pixel left // Pixel right // Top left // Top right // Bottom left // Bottom right if( ((idx - 640 < 0) || (d_imageArray[idx-640] == 1) ) && \ ((idx + 640 > 307199) || (d_imageArray[idx+640] == 1) ) && \ ((idx % 640 == 0) || (d_imageArray[idx-1] == 1) ) && \ ((idx % 640 == 639) || (d_imageArray[idx+1] == 1) ) && \ (( ((idx % 640) == 0) || (idx - 640 < 0)) || (d_imageArray[idx - 640 - 1] == 1) ) && \ (( ((idx % 640) == 639) || (idx - 640 < 0)) || (d_imageArray[idx - 640 + 1] == 1) ) && \ (( ((idx % 640) == 0) || (idx + 640 > 307199)) || (d_imageArray[idx + 640 - 1] == 1) ) && \ (( ((idx % 640) == 639) || (idx + 640 > 307199)) || (d_imageArray[idx + 640 + 1] == 1) ) ) r_imageArray[idx] = 1; } } /// <summary> /// Erodes a given image which stored in linear device memory 'd_imageArray' /// Uses texture memory to fetch array element /// Must be used in conjunction with erodeImageCUDA_R in order to prevent using 'hipMemcpy' to place output results back /// into the input stream/global address space 'd_imageArray' which 'd_imageArrayTex' is bound to - this is required to /// utilise the texture cache /// </summary> __global__ void erodeImageCUDA_D(unsigned char *r_imageArray) { // get current idx in array int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (tex1Dfetch(d_imageArrayTex, idx) == 1) { // Pixel above // Pixel below // Pixel left // Pixel right // Top left // Top right // Bottom left // Bottom right if( ((idx - 640 < 0) || (tex1Dfetch(d_imageArrayTex, idx-640) == 1) ) && \ ((idx + 640 > 307199) || (tex1Dfetch(d_imageArrayTex, idx+640) == 1) ) && \ ((idx % 640 == 0) || (tex1Dfetch(d_imageArrayTex, idx-1) == 1) ) && \ ((idx % 640 == 639) || (tex1Dfetch(d_imageArrayTex, idx+1) == 1) ) && \ (( ((idx % 640) == 0) || (idx - 640 < 0)) || (tex1Dfetch(d_imageArrayTex, idx - 640 - 1) == 1) ) && \ (( ((idx % 640) == 639) || (idx - 640 < 0)) || (tex1Dfetch(d_imageArrayTex, idx - 640 + 1) == 1) ) && \ (( ((idx % 640) == 0) || (idx + 640 > 307199)) || (tex1Dfetch(d_imageArrayTex, idx + 640 - 1) == 1) ) && \ (( ((idx % 640) == 639) || (idx + 640 > 307199)) || (tex1Dfetch(d_imageArrayTex, idx + 640 + 1) == 1) ) ) r_imageArray[idx] = 1; } } /// <summary> /// Erodes a given image which stored in linear device memory 'd_imageArray' /// Uses texture memory to fetch array element /// Writes results back to the primary global array 'd_imageArray' /// </summary> __global__ void erodeImageCUDA_R(unsigned char *d_imageArray) { // get current idx in array int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (tex1Dfetch(r_imageArrayTex, idx) == 1) { // Pixel above // Pixel below // Pixel left // Pixel right // Top left // Top right // Bottom left // Bottom right if( ((idx - 640 < 0) || (tex1Dfetch(r_imageArrayTex, idx-640) == 1) ) && \ ((idx + 640 > 307199) || (tex1Dfetch(r_imageArrayTex, idx+640) == 1) ) && \ ((idx % 640 == 0) || (tex1Dfetch(r_imageArrayTex, idx-1) == 1) ) && \ ((idx % 640 == 639) || (tex1Dfetch(r_imageArrayTex, idx+1) == 1) ) && \ (( ((idx % 640) == 0) || (idx - 640 < 0)) || (tex1Dfetch(r_imageArrayTex, idx - 640 - 1) == 1) ) && \ (( ((idx % 640) == 639) || (idx - 640 < 0)) || (tex1Dfetch(r_imageArrayTex, idx - 640 + 1) == 1) ) && \ (( ((idx % 640) == 0) || (idx + 640 > 307199)) || (tex1Dfetch(r_imageArrayTex, idx + 640 - 1) == 1) ) && \ (( ((idx % 640) == 639) || (idx + 640 > 307199)) || (tex1Dfetch(r_imageArrayTex, idx + 640 + 1) == 1) ) ) d_imageArray[idx] = 1; } } /// <summary> /// Dilates a given image which stored in linear device memory 'd_imageArray' /// Uses global memory to fetch array element and writes the result to the secondary memory space /// 'r_imageArray' /// </summary> __global__ void dilateImageCUDA(unsigned char *d_imageArray, unsigned char *r_imageArray) { // get current idx in array int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (d_imageArray[idx] == 1) { r_imageArray[idx] = 1; if (idx >= 640 ) r_imageArray[idx-640] = 1; // Pixel above if (idx <= 306559 ) r_imageArray[idx+640] = 1; // Pixel below if (idx % 640 != 0 ) r_imageArray[idx-1] = 1; // Pixel left if (idx % 640 != 639 ) r_imageArray[idx+1] = 1; // Pixel right if ((idx >= 640) && (idx % 640 != 639)) r_imageArray[idx-640+1] = 1; // Top right if ((idx <= 306559) && (idx % 640 != 639)) r_imageArray[idx+640+1] = 1; // Bottom right if ((idx >= 640) && (idx % 640 != 0)) r_imageArray[idx-640-1] = 1; // Top left if ((idx <= 306559) && (idx % 640 != 0)) r_imageArray[idx+640-1] = 1; // Bottom left // Possibly faster?? //if (idx >= 640 ) { // r_imageArray[idx-640] = 1; // Pixel above // if (idx % 640 != 639) r_imageArray[idx-640+1] = 1; // Top right // if (idx % 640 != 0) r_imageArray[idx-640-1] = 1; // Top left //} //if (idx <= 306559 ) { // r_imageArray[idx+640] = 1; // Pixel below // if (idx % 640 != 639) r_imageArray[idx+640+1] = 1; // Bottom right // if (idx % 640 != 0) r_imageArray[idx+640-1] = 1; // Bottom left //} //if (idx % 640 != 0 ) r_imageArray[idx-1] = 1; // Pixel left //if (idx % 640 != 639 ) r_imageArray[idx+1] = 1; // Pixel right } } /// <summary> /// Dilates a given image which stored in linear device memory 'd_imageArray' /// Uses texture memory to fetch array element /// Must be used in conjunction with dilateImageCUDA_R in order to prevent using 'hipMemcpy' to place output results back /// into the input stream/global address space 'd_imageArray' which 'd_imageArrayTex' is bound to - this is required to /// utilise the texture cache /// </summary> __global__ void dilateImageCUDA_D(unsigned char *r_imageArray) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (tex1Dfetch(d_imageArrayTex, idx) == 1) { r_imageArray[idx] = 1; if (idx >= 640 ) r_imageArray[idx-640] = 1; // Pixel above if (idx <= 306559 ) r_imageArray[idx+640] = 1; // Pixel below if (idx % 640 != 0 ) r_imageArray[idx-1] = 1; // Pixel left if (idx % 640 != 639 ) r_imageArray[idx+1] = 1; // Pixel right if ((idx >= 640) && (idx % 640 != 639)) r_imageArray[idx-640+1] = 1; // Top right if ((idx <= 306559) && (idx % 640 != 639)) r_imageArray[idx+640+1] = 1; // Bottom right if ((idx >= 640) && (idx % 640 != 0)) r_imageArray[idx-640-1] = 1; // Top left if ((idx <= 306559) && (idx % 640 != 0)) r_imageArray[idx+640-1] = 1; // Bottom left } } /// <summary> /// Dilates a given image which stored in linear device memory 'd_imageArray' /// Uses texture memory to fetch array element /// Writes results back to the primary global array 'd_imageArray' /// </summary> __global__ void dilateImageCUDA_R(unsigned char *d_imageArray) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (tex1Dfetch(r_imageArrayTex, idx) == 1) { d_imageArray[idx] = 1; if (idx >= 640 ) d_imageArray[idx-640] = 1; // Pixel above if (idx <= 306559 ) d_imageArray[idx+640] = 1; // Pixel below if (idx % 640 != 0 ) d_imageArray[idx-1] = 1; // Pixel left if (idx % 640 != 639 ) d_imageArray[idx+1] = 1; // Pixel right if ((idx >= 640) && (idx % 640 != 639)) d_imageArray[idx-640+1] = 1; // Top right if ((idx <= 306559) && (idx % 640 != 639)) d_imageArray[idx+640+1] = 1; // Bottom right if ((idx >= 640) && (idx % 640 != 0)) d_imageArray[idx-640-1] = 1; // Top left if ((idx <= 306559) && (idx % 640 != 0)) d_imageArray[idx+640-1] = 1; // Bottom left } } /// <summary> /// This method is designed to remove remaining noise in the vertial direction after post-processing /// Only removes groups of pixel below the defined mask size /// Uses global memory /// </summary> __global__ void removeUnwantedNoiseHeightCUDA(unsigned char *d_imageArray, unsigned char *r_imageArray, int maskSize) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int unwantedHeight = maskSize; int heightCount; r_imageArray[idx] = d_imageArray[idx]; // greater than two rows from the bottom and less than two rows from the top (if masksize == 3) if ((idx >= WIDTH * (unwantedHeight-1)) && (idx <= (HEIGHT * WIDTH) - ((unwantedHeight-1) * WIDTH))) { heightCount = 0; //check pixels if (d_imageArray[idx] == 1) { heightCount++; //check above two pixels for (int k = 1; k < unwantedHeight; k++) { if (d_imageArray[idx + (k * WIDTH)] == 1) heightCount++; } //check below two pixels for (int k = 1; k < unwantedHeight; k++) { if (d_imageArray[idx - (k * WIDTH)] == 1) heightCount++; } if(heightCount < unwantedHeight) r_imageArray[idx] = 0; } } } /// <summary> /// This method is designed to remove remaining noise in the vertial direction after post-processing /// Only removes groups of pixel below the defined mask size /// Uses texture memory /// </summary> __global__ void removeUnwantedNoiseHeightCUDATexture(unsigned char *r_imageArray, int maskSize) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int unwantedHeight = maskSize; int heightCount; r_imageArray[idx] = tex1Dfetch(d_imageArrayTex, idx); if ((idx >= WIDTH * (unwantedHeight-1)) && (idx <= (HEIGHT * WIDTH) - ((unwantedHeight-1) * WIDTH))) { heightCount = 0; if (tex1Dfetch(d_imageArrayTex, idx) == 1) { heightCount++; for (int k = 1; k < unwantedHeight; k++) { if (tex1Dfetch(d_imageArrayTex, idx + (k * WIDTH)) == 1) heightCount++; } for (int k = 1; k < unwantedHeight; k++) { if (tex1Dfetch(d_imageArrayTex, idx - (k * WIDTH)) == 1) heightCount++; } if(heightCount < unwantedHeight) r_imageArray[idx] = 0; } } } /// <summary> /// This method is designed to remove remaining noise in the horizontal direction after post-processing /// Only removes groups of pixel below the defined mask size /// Uses global memory /// </summary> __global__ void removeUnwantedNoiseWidthCUDA(unsigned char *d_imageArray, unsigned char *r_imageArray, int maskSize) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int unwantedWidth = maskSize; int widthCount; r_imageArray[idx] = d_imageArray[idx]; if ((idx % WIDTH >= (unwantedWidth-1)) && (idx % WIDTH <= 639 - (unwantedWidth-1))) { widthCount = 0; //check pixels if (d_imageArray[idx] == 1) { widthCount++; //check right pixels for (int k = 1; k < unwantedWidth; k++) { if (d_imageArray[idx + (k * 1)] == 1) widthCount++; } //check left pixels for (int k = 1; k < unwantedWidth; k++) { if (d_imageArray[idx - (k * 1)] == 1) widthCount++; } //check pixel height and delete if necessary if(widthCount < unwantedWidth) r_imageArray[idx] = 0; } } } /// <summary> /// This method is designed to remove remaining noise in the horizontal direction after post-processing /// Only removes groups of pixel below the defined mask size /// Uses texture memory /// </summary> __global__ void removeUnwantedNoiseWidthCUDATexture(unsigned char *d_imageArray, int maskSize) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int unwantedWidth = maskSize; int widthCount; d_imageArray[idx] = tex1Dfetch(r_imageArrayTex, idx); if ((idx % WIDTH >= (unwantedWidth-1)) && (idx % WIDTH <= 639 - (unwantedWidth-1))) { widthCount = 0; //check pixels if (tex1Dfetch(r_imageArrayTex, idx) == 1) { widthCount++; //check right pixels for (int k = 1; k < unwantedWidth; k++) { if (tex1Dfetch(r_imageArrayTex, idx + (k * 1)) == 1) widthCount++; } //check left pixels for (int k = 1; k < unwantedWidth; k++) { if (tex1Dfetch(r_imageArrayTex, idx - (k * 1)) == 1) widthCount++; } //check pixel height and delete if necessary if(widthCount < unwantedWidth) d_imageArray[idx] = 0; } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* Sequential Methods */ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> /// Erodes a given image which is represented by the linear array 'anArray' /// </summary> void erodeImage(int height, int width, unsigned char *anArray) { unsigned char *tempArray = (unsigned char*)malloc(sizeof(unsigned char) * height * width); int i, j; for (i = 0; i < width * height; i++) { tempArray[i] = anArray[i]; } // Perform logical and operation to erode image for (i = 1; i < height - 1; i++) { for (j = 1; j < width - 1; j++) { if (anArray[(i * width) + j] == 1) { if (anArray[((i - 1) * width) + (j - 1)] != 1 || anArray[((i - 1)* width) + j] != 1 || anArray[((i - 1) * width) + (j + 1)] != 1 || anArray[(i * width) + (j - 1)] != 1 || anArray[(i * width) + (j + 1)] != 1 || anArray[((i + 1) * width) + (j - 1)] != 1 || anArray[((i + 1) * width) + j] != 1 || anArray[((i + 1) * width) + (j + 1)] != 1) { tempArray[(i * width) + j] = 0; } } } } for (i = 0; i < width * height; i++) { anArray[i] = tempArray[i]; } free(tempArray); }
d09ccdf3d32ee59d5707e20073f01f5568b88b6f.cu
/* * This file contains all methods related to image post-processing using CUDA */ /// <summary> /// Erodes a given image which stored in linear device memory 'd_imageArray' /// Uses global memory to fetch array element and writes the result to the secondary memory space /// 'r_imageArray' /// </summary> __global__ void erodeImageCUDA(unsigned char *d_imageArray, unsigned char *r_imageArray) { // get current idx in array int idx = (blockIdx.x * blockDim.x) + threadIdx.x; // idx >= width -> ensures that the pixels 0-639 do not get operated on i.e. top row // idx <= (height * width) - width -> ensures that bottom row does not get operated on // width && idx % width != 0 -> ensures that left most column does not get operated on // idx % width != 639 -> ensures that right most column does not get operated on if (d_imageArray[idx] == 1) { // Pixel above // Pixel below // Pixel left // Pixel right // Top left // Top right // Bottom left // Bottom right if( ((idx - 640 < 0) || (d_imageArray[idx-640] == 1) ) && \ ((idx + 640 > 307199) || (d_imageArray[idx+640] == 1) ) && \ ((idx % 640 == 0) || (d_imageArray[idx-1] == 1) ) && \ ((idx % 640 == 639) || (d_imageArray[idx+1] == 1) ) && \ (( ((idx % 640) == 0) || (idx - 640 < 0)) || (d_imageArray[idx - 640 - 1] == 1) ) && \ (( ((idx % 640) == 639) || (idx - 640 < 0)) || (d_imageArray[idx - 640 + 1] == 1) ) && \ (( ((idx % 640) == 0) || (idx + 640 > 307199)) || (d_imageArray[idx + 640 - 1] == 1) ) && \ (( ((idx % 640) == 639) || (idx + 640 > 307199)) || (d_imageArray[idx + 640 + 1] == 1) ) ) r_imageArray[idx] = 1; } } /// <summary> /// Erodes a given image which stored in linear device memory 'd_imageArray' /// Uses texture memory to fetch array element /// Must be used in conjunction with erodeImageCUDA_R in order to prevent using 'cudaMemcpy' to place output results back /// into the input stream/global address space 'd_imageArray' which 'd_imageArrayTex' is bound to - this is required to /// utilise the texture cache /// </summary> __global__ void erodeImageCUDA_D(unsigned char *r_imageArray) { // get current idx in array int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (tex1Dfetch(d_imageArrayTex, idx) == 1) { // Pixel above // Pixel below // Pixel left // Pixel right // Top left // Top right // Bottom left // Bottom right if( ((idx - 640 < 0) || (tex1Dfetch(d_imageArrayTex, idx-640) == 1) ) && \ ((idx + 640 > 307199) || (tex1Dfetch(d_imageArrayTex, idx+640) == 1) ) && \ ((idx % 640 == 0) || (tex1Dfetch(d_imageArrayTex, idx-1) == 1) ) && \ ((idx % 640 == 639) || (tex1Dfetch(d_imageArrayTex, idx+1) == 1) ) && \ (( ((idx % 640) == 0) || (idx - 640 < 0)) || (tex1Dfetch(d_imageArrayTex, idx - 640 - 1) == 1) ) && \ (( ((idx % 640) == 639) || (idx - 640 < 0)) || (tex1Dfetch(d_imageArrayTex, idx - 640 + 1) == 1) ) && \ (( ((idx % 640) == 0) || (idx + 640 > 307199)) || (tex1Dfetch(d_imageArrayTex, idx + 640 - 1) == 1) ) && \ (( ((idx % 640) == 639) || (idx + 640 > 307199)) || (tex1Dfetch(d_imageArrayTex, idx + 640 + 1) == 1) ) ) r_imageArray[idx] = 1; } } /// <summary> /// Erodes a given image which stored in linear device memory 'd_imageArray' /// Uses texture memory to fetch array element /// Writes results back to the primary global array 'd_imageArray' /// </summary> __global__ void erodeImageCUDA_R(unsigned char *d_imageArray) { // get current idx in array int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (tex1Dfetch(r_imageArrayTex, idx) == 1) { // Pixel above // Pixel below // Pixel left // Pixel right // Top left // Top right // Bottom left // Bottom right if( ((idx - 640 < 0) || (tex1Dfetch(r_imageArrayTex, idx-640) == 1) ) && \ ((idx + 640 > 307199) || (tex1Dfetch(r_imageArrayTex, idx+640) == 1) ) && \ ((idx % 640 == 0) || (tex1Dfetch(r_imageArrayTex, idx-1) == 1) ) && \ ((idx % 640 == 639) || (tex1Dfetch(r_imageArrayTex, idx+1) == 1) ) && \ (( ((idx % 640) == 0) || (idx - 640 < 0)) || (tex1Dfetch(r_imageArrayTex, idx - 640 - 1) == 1) ) && \ (( ((idx % 640) == 639) || (idx - 640 < 0)) || (tex1Dfetch(r_imageArrayTex, idx - 640 + 1) == 1) ) && \ (( ((idx % 640) == 0) || (idx + 640 > 307199)) || (tex1Dfetch(r_imageArrayTex, idx + 640 - 1) == 1) ) && \ (( ((idx % 640) == 639) || (idx + 640 > 307199)) || (tex1Dfetch(r_imageArrayTex, idx + 640 + 1) == 1) ) ) d_imageArray[idx] = 1; } } /// <summary> /// Dilates a given image which stored in linear device memory 'd_imageArray' /// Uses global memory to fetch array element and writes the result to the secondary memory space /// 'r_imageArray' /// </summary> __global__ void dilateImageCUDA(unsigned char *d_imageArray, unsigned char *r_imageArray) { // get current idx in array int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (d_imageArray[idx] == 1) { r_imageArray[idx] = 1; if (idx >= 640 ) r_imageArray[idx-640] = 1; // Pixel above if (idx <= 306559 ) r_imageArray[idx+640] = 1; // Pixel below if (idx % 640 != 0 ) r_imageArray[idx-1] = 1; // Pixel left if (idx % 640 != 639 ) r_imageArray[idx+1] = 1; // Pixel right if ((idx >= 640) && (idx % 640 != 639)) r_imageArray[idx-640+1] = 1; // Top right if ((idx <= 306559) && (idx % 640 != 639)) r_imageArray[idx+640+1] = 1; // Bottom right if ((idx >= 640) && (idx % 640 != 0)) r_imageArray[idx-640-1] = 1; // Top left if ((idx <= 306559) && (idx % 640 != 0)) r_imageArray[idx+640-1] = 1; // Bottom left // Possibly faster?? //if (idx >= 640 ) { // r_imageArray[idx-640] = 1; // Pixel above // if (idx % 640 != 639) r_imageArray[idx-640+1] = 1; // Top right // if (idx % 640 != 0) r_imageArray[idx-640-1] = 1; // Top left //} //if (idx <= 306559 ) { // r_imageArray[idx+640] = 1; // Pixel below // if (idx % 640 != 639) r_imageArray[idx+640+1] = 1; // Bottom right // if (idx % 640 != 0) r_imageArray[idx+640-1] = 1; // Bottom left //} //if (idx % 640 != 0 ) r_imageArray[idx-1] = 1; // Pixel left //if (idx % 640 != 639 ) r_imageArray[idx+1] = 1; // Pixel right } } /// <summary> /// Dilates a given image which stored in linear device memory 'd_imageArray' /// Uses texture memory to fetch array element /// Must be used in conjunction with dilateImageCUDA_R in order to prevent using 'cudaMemcpy' to place output results back /// into the input stream/global address space 'd_imageArray' which 'd_imageArrayTex' is bound to - this is required to /// utilise the texture cache /// </summary> __global__ void dilateImageCUDA_D(unsigned char *r_imageArray) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (tex1Dfetch(d_imageArrayTex, idx) == 1) { r_imageArray[idx] = 1; if (idx >= 640 ) r_imageArray[idx-640] = 1; // Pixel above if (idx <= 306559 ) r_imageArray[idx+640] = 1; // Pixel below if (idx % 640 != 0 ) r_imageArray[idx-1] = 1; // Pixel left if (idx % 640 != 639 ) r_imageArray[idx+1] = 1; // Pixel right if ((idx >= 640) && (idx % 640 != 639)) r_imageArray[idx-640+1] = 1; // Top right if ((idx <= 306559) && (idx % 640 != 639)) r_imageArray[idx+640+1] = 1; // Bottom right if ((idx >= 640) && (idx % 640 != 0)) r_imageArray[idx-640-1] = 1; // Top left if ((idx <= 306559) && (idx % 640 != 0)) r_imageArray[idx+640-1] = 1; // Bottom left } } /// <summary> /// Dilates a given image which stored in linear device memory 'd_imageArray' /// Uses texture memory to fetch array element /// Writes results back to the primary global array 'd_imageArray' /// </summary> __global__ void dilateImageCUDA_R(unsigned char *d_imageArray) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (tex1Dfetch(r_imageArrayTex, idx) == 1) { d_imageArray[idx] = 1; if (idx >= 640 ) d_imageArray[idx-640] = 1; // Pixel above if (idx <= 306559 ) d_imageArray[idx+640] = 1; // Pixel below if (idx % 640 != 0 ) d_imageArray[idx-1] = 1; // Pixel left if (idx % 640 != 639 ) d_imageArray[idx+1] = 1; // Pixel right if ((idx >= 640) && (idx % 640 != 639)) d_imageArray[idx-640+1] = 1; // Top right if ((idx <= 306559) && (idx % 640 != 639)) d_imageArray[idx+640+1] = 1; // Bottom right if ((idx >= 640) && (idx % 640 != 0)) d_imageArray[idx-640-1] = 1; // Top left if ((idx <= 306559) && (idx % 640 != 0)) d_imageArray[idx+640-1] = 1; // Bottom left } } /// <summary> /// This method is designed to remove remaining noise in the vertial direction after post-processing /// Only removes groups of pixel below the defined mask size /// Uses global memory /// </summary> __global__ void removeUnwantedNoiseHeightCUDA(unsigned char *d_imageArray, unsigned char *r_imageArray, int maskSize) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int unwantedHeight = maskSize; int heightCount; r_imageArray[idx] = d_imageArray[idx]; // greater than two rows from the bottom and less than two rows from the top (if masksize == 3) if ((idx >= WIDTH * (unwantedHeight-1)) && (idx <= (HEIGHT * WIDTH) - ((unwantedHeight-1) * WIDTH))) { heightCount = 0; //check pixels if (d_imageArray[idx] == 1) { heightCount++; //check above two pixels for (int k = 1; k < unwantedHeight; k++) { if (d_imageArray[idx + (k * WIDTH)] == 1) heightCount++; } //check below two pixels for (int k = 1; k < unwantedHeight; k++) { if (d_imageArray[idx - (k * WIDTH)] == 1) heightCount++; } if(heightCount < unwantedHeight) r_imageArray[idx] = 0; } } } /// <summary> /// This method is designed to remove remaining noise in the vertial direction after post-processing /// Only removes groups of pixel below the defined mask size /// Uses texture memory /// </summary> __global__ void removeUnwantedNoiseHeightCUDATexture(unsigned char *r_imageArray, int maskSize) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int unwantedHeight = maskSize; int heightCount; r_imageArray[idx] = tex1Dfetch(d_imageArrayTex, idx); if ((idx >= WIDTH * (unwantedHeight-1)) && (idx <= (HEIGHT * WIDTH) - ((unwantedHeight-1) * WIDTH))) { heightCount = 0; if (tex1Dfetch(d_imageArrayTex, idx) == 1) { heightCount++; for (int k = 1; k < unwantedHeight; k++) { if (tex1Dfetch(d_imageArrayTex, idx + (k * WIDTH)) == 1) heightCount++; } for (int k = 1; k < unwantedHeight; k++) { if (tex1Dfetch(d_imageArrayTex, idx - (k * WIDTH)) == 1) heightCount++; } if(heightCount < unwantedHeight) r_imageArray[idx] = 0; } } } /// <summary> /// This method is designed to remove remaining noise in the horizontal direction after post-processing /// Only removes groups of pixel below the defined mask size /// Uses global memory /// </summary> __global__ void removeUnwantedNoiseWidthCUDA(unsigned char *d_imageArray, unsigned char *r_imageArray, int maskSize) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int unwantedWidth = maskSize; int widthCount; r_imageArray[idx] = d_imageArray[idx]; if ((idx % WIDTH >= (unwantedWidth-1)) && (idx % WIDTH <= 639 - (unwantedWidth-1))) { widthCount = 0; //check pixels if (d_imageArray[idx] == 1) { widthCount++; //check right pixels for (int k = 1; k < unwantedWidth; k++) { if (d_imageArray[idx + (k * 1)] == 1) widthCount++; } //check left pixels for (int k = 1; k < unwantedWidth; k++) { if (d_imageArray[idx - (k * 1)] == 1) widthCount++; } //check pixel height and delete if necessary if(widthCount < unwantedWidth) r_imageArray[idx] = 0; } } } /// <summary> /// This method is designed to remove remaining noise in the horizontal direction after post-processing /// Only removes groups of pixel below the defined mask size /// Uses texture memory /// </summary> __global__ void removeUnwantedNoiseWidthCUDATexture(unsigned char *d_imageArray, int maskSize) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int unwantedWidth = maskSize; int widthCount; d_imageArray[idx] = tex1Dfetch(r_imageArrayTex, idx); if ((idx % WIDTH >= (unwantedWidth-1)) && (idx % WIDTH <= 639 - (unwantedWidth-1))) { widthCount = 0; //check pixels if (tex1Dfetch(r_imageArrayTex, idx) == 1) { widthCount++; //check right pixels for (int k = 1; k < unwantedWidth; k++) { if (tex1Dfetch(r_imageArrayTex, idx + (k * 1)) == 1) widthCount++; } //check left pixels for (int k = 1; k < unwantedWidth; k++) { if (tex1Dfetch(r_imageArrayTex, idx - (k * 1)) == 1) widthCount++; } //check pixel height and delete if necessary if(widthCount < unwantedWidth) d_imageArray[idx] = 0; } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* Sequential Methods */ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> /// Erodes a given image which is represented by the linear array 'anArray' /// </summary> void erodeImage(int height, int width, unsigned char *anArray) { unsigned char *tempArray = (unsigned char*)malloc(sizeof(unsigned char) * height * width); int i, j; for (i = 0; i < width * height; i++) { tempArray[i] = anArray[i]; } // Perform logical and operation to erode image for (i = 1; i < height - 1; i++) { for (j = 1; j < width - 1; j++) { if (anArray[(i * width) + j] == 1) { if (anArray[((i - 1) * width) + (j - 1)] != 1 || anArray[((i - 1)* width) + j] != 1 || anArray[((i - 1) * width) + (j + 1)] != 1 || anArray[(i * width) + (j - 1)] != 1 || anArray[(i * width) + (j + 1)] != 1 || anArray[((i + 1) * width) + (j - 1)] != 1 || anArray[((i + 1) * width) + j] != 1 || anArray[((i + 1) * width) + (j + 1)] != 1) { tempArray[(i * width) + j] = 0; } } } } for (i = 0; i < width * height; i++) { anArray[i] = tempArray[i]; } free(tempArray); }
1919d507caf3e0e5505f15536e900b8408c60ed3.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // pow_scalar.cu #include <nbla/cuda/function/pow_scalar.hpp> #include <nbla/cuda/function/utils/base_transform_unary.cuh> namespace nbla { NBLA_DEFINE_TRANSFORM_UNARY_CUDA_1(PowScalar, ::pow(x, (T)a0), dy *(T)a0 *::pow(x, (T)a0 - (T)1), double, false); }
1919d507caf3e0e5505f15536e900b8408c60ed3.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // pow_scalar.cu #include <nbla/cuda/function/pow_scalar.hpp> #include <nbla/cuda/function/utils/base_transform_unary.cuh> namespace nbla { NBLA_DEFINE_TRANSFORM_UNARY_CUDA_1(PowScalar, std::pow(x, (T)a0), dy *(T)a0 *std::pow(x, (T)a0 - (T)1), double, false); }
6a770a89cba086bf33f59dba83c301461dd4960a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zlat2c.cu, mixed zc -> ds, Thu Oct 8 23:05:32 2020 @author Mark Gates */ #include "magma_internal.h" // mixed precision generation has issues with SINGLE PRECISION, so use PRECISION_z #define PRECISION_d #define BLK_X 64 #define BLK_Y 32 // TODO get rid of global variable! static __device__ int flag = 0; /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlag2s and zlaset. */ __global__ void dlat2s_lower( int n, const double *A, int lda, float *SA, int ldsa, double rmax ) { double tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { tmp = A[j*lda]; if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) ); } } } } /* Similar to dlat2s_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlag2s and zlaset. */ __global__ void dlat2s_upper( int n, const double *A, int lda, float *SA, int ldsa, double rmax ) { double tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { tmp = A[j*lda]; if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) ); } } } } } /***************************************************************************//** Purpose ------- DLAT2S converts a double-real matrix, A, to a single-real matrix, SA. RMAX is the overflow for the single-real arithmetic. DLAT2S checks that all the entries of A are between -RMAX and RMAX. If not, the conversion is aborted and a flag is raised. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A DOUBLE PRECISION array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA SINGLE PRECISION array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value - = 1: an entry of the matrix A is greater than the SINGLE PRECISION overflow threshold, in this case, the content of SA on exit is unspecified. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lat2 *******************************************************************************/ extern "C" void magmablas_dlat2s( magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaFloat_ptr SA, magma_int_t ldsa, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } double rmax = (double)lapackf77_slamch("O"); dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0 if (uplo == MagmaLower) { hipLaunchKernelGGL(( dlat2s_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, rmax); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( dlat2s_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, rmax); } hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag }
6a770a89cba086bf33f59dba83c301461dd4960a.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zlat2c.cu, mixed zc -> ds, Thu Oct 8 23:05:32 2020 @author Mark Gates */ #include "magma_internal.h" // mixed precision generation has issues with SINGLE PRECISION, so use PRECISION_z #define PRECISION_d #define BLK_X 64 #define BLK_Y 32 // TODO get rid of global variable! static __device__ int flag = 0; /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlag2s and zlaset. */ __global__ void dlat2s_lower( int n, const double *A, int lda, float *SA, int ldsa, double rmax ) { double tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { tmp = A[j*lda]; if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) ); } } } } /* Similar to dlat2s_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlag2s and zlaset. */ __global__ void dlat2s_upper( int n, const double *A, int lda, float *SA, int ldsa, double rmax ) { double tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { tmp = A[j*lda]; if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) ); } } } } } /***************************************************************************//** Purpose ------- DLAT2S converts a double-real matrix, A, to a single-real matrix, SA. RMAX is the overflow for the single-real arithmetic. DLAT2S checks that all the entries of A are between -RMAX and RMAX. If not, the conversion is aborted and a flag is raised. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A DOUBLE PRECISION array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA SINGLE PRECISION array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value - = 1: an entry of the matrix A is greater than the SINGLE PRECISION overflow threshold, in this case, the content of SA on exit is unspecified. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lat2 *******************************************************************************/ extern "C" void magmablas_dlat2s( magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaFloat_ptr SA, magma_int_t ldsa, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } double rmax = (double)lapackf77_slamch("O"); dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0 if (uplo == MagmaLower) { dlat2s_lower<<< grid, threads, 0, queue->cuda_stream() >>> (n, A, lda, SA, ldsa, rmax); } else if (uplo == MagmaUpper) { dlat2s_upper<<< grid, threads, 0, queue->cuda_stream() >>> (n, A, lda, SA, ldsa, rmax); } cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag }
5e1b61351c6b242e5a843d73e0b561976374902e.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <hip/hip_runtime.h> #include <thrust/iterator/constant_iterator.h> #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/graph.h" #include "cupoch/geometry/lineset.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/geometry/distancetransform.h" #include "cupoch/geometry/geometry_functor.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/utility/platform.h" #include "cupoch/utility/range.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/simple_shader.h" #include "cupoch/visualization/utility/color_map.h" #include "cupoch/visualization/visualizer/render_option.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { // Vertex indices of 12 lines in a cuboid __constant__ int cuboid_lines_vertex_indices[12][2] = { {0, 1}, {0, 2}, {0, 4}, {3, 1}, {3, 2}, {3, 7}, {5, 1}, {5, 4}, {5, 7}, {6, 2}, {6, 4}, {6, 7}, }; struct copy_pointcloud_functor { copy_pointcloud_functor(bool has_colors, RenderOption::PointColorOption color_option, const ViewControl &view) : has_colors_(has_colors), color_option_(color_option), view_(view){}; const bool has_colors_; const RenderOption::PointColorOption color_option_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()( const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> &pt_cl) { const Eigen::Vector3f &point = thrust::get<0>(pt_cl); const Eigen::Vector3f &color = thrust::get<1>(pt_cl); Eigen::Vector4f color_tmp; color_tmp[3] = 1.0; switch (color_option_) { case RenderOption::PointColorOption::XCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetXPercentage(point(0)), colormap_option_); break; case RenderOption::PointColorOption::YCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetYPercentage(point(1)), colormap_option_); break; case RenderOption::PointColorOption::ZCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); break; case RenderOption::PointColorOption::Color: case RenderOption::PointColorOption::Default: default: if (has_colors_) { color_tmp.head<3>() = color; } else { color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); } break; } return thrust::make_tuple(point, color_tmp); } }; struct copy_lineset_functor { copy_lineset_functor( const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords, const Eigen::Vector3f *line_colors, bool has_colors) : line_coords_(line_coords), line_colors_(line_colors), has_colors_(has_colors){}; const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords_; const Eigen::Vector3f *line_colors_; const bool has_colors_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()( size_t k) const { int i = k / 2; int j = k % 2; Eigen::Vector4f color_tmp; color_tmp[3] = 1.0; color_tmp.head<3>() = (has_colors_) ? line_colors_[i] : Eigen::Vector3f::Ones(); if (j == 0) { return thrust::make_tuple(line_coords_[i].first, color_tmp); } else { return thrust::make_tuple(line_coords_[i].second, color_tmp); } } }; struct line_coordinates_functor { line_coordinates_functor(const Eigen::Vector3f *points) : points_(points){}; const Eigen::Vector3f *points_; __device__ thrust::pair<Eigen::Vector3f, Eigen::Vector3f> operator()( const Eigen::Vector2i &idxs) const { return thrust::make_pair(points_[idxs[0]], points_[idxs[1]]); } }; struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f *vertices, const int *triangles, const Eigen::Vector3f *vertex_colors, bool has_vertex_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f &default_mesh_color, const ViewControl &view) : vertices_(vertices), triangles_(triangles), vertex_colors_(vertex_colors), has_vertex_colors_(has_vertex_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view){}; const Eigen::Vector3f *vertices_; const int *triangles_; const Eigen::Vector3f *vertex_colors_; const bool has_vertex_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()( size_t k) const { size_t vi = triangles_[k]; const auto &vertex = vertices_[vi]; Eigen::Vector4f color_tmp; color_tmp[3] = 1.0; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetXPercentage(vertex(0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetYPercentage(vertex(1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(vertex(2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_vertex_colors_) { color_tmp.head<3>() = vertex_colors_[vi]; break; } case RenderOption::MeshColorOption::Default: default: color_tmp.head<3>() = default_mesh_color_; break; } return thrust::make_tuple(vertex, color_tmp); } }; struct copy_voxelgrid_line_functor { copy_voxelgrid_line_functor(const Eigen::Vector3f *vertices, const geometry::Voxel *voxels, bool has_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f &default_mesh_color, const ViewControl &view) : vertices_(vertices), voxels_(voxels), has_colors_(has_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view){}; const Eigen::Vector3f *vertices_; const geometry::Voxel *voxels_; const bool has_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()( size_t idx) const { int i = idx / (12 * 2); int jk = idx % (12 * 2); int j = jk / 2; int k = jk % 2; // Voxel color (applied to all points) Eigen::Vector4f voxel_color; voxel_color[3] = 1.0; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: voxel_color.head<3>() = GetColorMapColor(view_.GetBoundingBox().GetXPercentage( vertices_[i * 8](0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: voxel_color.head<3>() = GetColorMapColor(view_.GetBoundingBox().GetYPercentage( vertices_[i * 8](1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: voxel_color.head<3>() = GetColorMapColor(view_.GetBoundingBox().GetZPercentage( vertices_[i * 8](2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_colors_) { voxel_color.head<3>() = voxels_[i].color_; break; } case RenderOption::MeshColorOption::Default: default: voxel_color.head<3>() = default_mesh_color_; break; } return thrust::make_tuple( vertices_[i * 8 + cuboid_lines_vertex_indices[j][k]], voxel_color); } }; struct copy_distance_voxel_functor { copy_distance_voxel_functor(float voxel_size, int resolution, const Eigen::Vector3f& origin, float distance_max) : voxel_size_(voxel_size), resolution_(resolution), origin_(origin), distance_max_(distance_max){}; const float voxel_size_; const int resolution_; const Eigen::Vector3f origin_; const float distance_max_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(const thrust::tuple<size_t, geometry::DistanceVoxel>& kv) const { int idx = thrust::get<0>(kv); geometry::DistanceVoxel v = thrust::get<1>(kv); int res2 = resolution_ * resolution_; int x = idx / res2; int yz = idx % res2; int y = yz / resolution_; int z = yz % resolution_; // Voxel color (applied to all points) Eigen::Vector4f voxel_color = Eigen::Vector4f::Ones(); int h_res = resolution_ / 2; Eigen::Vector3f pt = (Eigen::Vector3i(x - h_res, y - h_res, z - h_res).cast<float>() + Eigen::Vector3f::Constant(0.5)) * voxel_size_ - origin_; voxel_color[3] = 1.0 - min(v.distance_, distance_max_) / distance_max_; return thrust::make_tuple(pt, voxel_color); } }; struct alpha_greater_functor { __device__ bool operator() (const thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>& lhs, const thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>& rhs) const { return thrust::get<1>(lhs)[3] > thrust::get<1>(rhs)[3]; } }; } // namespace bool SimpleShader::Compile() { if (CompileShaders(simple_vertex_shader, NULL, simple_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_color_ = glGetAttribLocation(program_, "vertex_color"); MVP_ = glGetUniformLocation(program_, "MVP"); return true; } void SimpleShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool SimpleShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace InvalidateGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_color_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector4f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_color_buffer_, hipGraphicsMapFlagsNone)); Eigen::Vector3f *raw_points_ptr; Eigen::Vector4f *raw_colors_ptr; size_t n_bytes; cudaSafeCall(hipGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(hipGraphicsResourceGetMappedPointer( (void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsResourceGetMappedPointer( (void **)&raw_colors_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector4f> dev_colors_ptr = thrust::device_pointer_cast(raw_colors_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_colors_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool SimpleShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_color_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glVertexAttribPointer(vertex_color_, 4, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_color_); return true; } void SimpleShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(hipGraphicsUnregisterResource( cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsUnregisterResource( cuda_graphics_resources_[1])); } glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_color_buffer_); bound_ = false; } } bool SimpleShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } copy_pointcloud_functor func(pointcloud.HasColors(), option.point_color_option_, view); if (pointcloud.HasColors()) { thrust::transform( make_tuple_begin(pointcloud.points_, pointcloud.colors_), make_tuple_end(pointcloud.points_, pointcloud.colors_), make_tuple_iterator(points, colors), func); } else { thrust::transform( make_tuple_iterator(pointcloud.points_.begin(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Zero())), make_tuple_iterator(pointcloud.points_.end(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Zero())), make_tuple_iterator(points, colors), func); } draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t SimpleShaderForPointCloud::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool SimpleShaderForLineSet::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::LineSet) { PrintShaderWarning("Rendering type is not geometry::LineSet."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForLineSet::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::LineSet) { PrintShaderWarning("Rendering type is not geometry::LineSet."); return false; } const geometry::LineSet<3> &lineset = (const geometry::LineSet<3> &)geometry; if (lineset.HasLines() == false) { PrintShaderWarning("Binding failed with empty geometry::LineSet."); return false; } utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(lineset.lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(lineset.points_.data())); thrust::transform(lineset.lines_.begin(), lineset.lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp( thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(lineset.colors_.data()), lineset.HasColors()); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(lineset.lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(lineset.lines_.size() * 2); return true; } size_t SimpleShaderForLineSet::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::LineSet<3> &)geometry).lines_.size() * 2; } bool SimpleShaderForGraphNode::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForGraphNode::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry; if (graph.HasPoints() == false) { PrintShaderWarning("Binding failed with empty graph."); return false; } copy_pointcloud_functor func(graph.HasColors(), option.point_color_option_, view); if (graph.HasNodeColors()) { thrust::transform(make_tuple_begin(graph.points_, graph.node_colors_), make_tuple_end(graph.points_, graph.node_colors_), make_tuple_iterator(points, colors), func); } else { thrust::transform( make_tuple_iterator(graph.points_.begin(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Ones())), make_tuple_iterator(graph.points_.end(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Ones())), make_tuple_iterator(points, colors), func); } draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(graph.points_.size()); return true; } size_t SimpleShaderForGraphNode::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::Graph<3> &)geometry).points_.size(); } bool SimpleShaderForGraphEdge::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForGraphEdge::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry; if (graph.HasLines() == false) { PrintShaderWarning("Binding failed with empty geometry::Graph."); return false; } utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(graph.lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(graph.points_.data())); thrust::transform(graph.lines_.begin(), graph.lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp(thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(graph.colors_.data()), graph.HasColors()); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(graph.lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(graph.lines_.size() * 2); return true; } size_t SimpleShaderForGraphEdge::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::Graph<3> &)geometry).lines_.size() * 2; } bool SimpleShaderForAxisAlignedBoundingBox::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForAxisAlignedBoundingBox::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(lineset->lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(lineset->points_.data())); thrust::transform(lineset->lines_.begin(), lineset->lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp( thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(lineset->colors_.data()), lineset->HasColors()); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(lineset->lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(lineset->lines_.size() * 2); return true; } size_t SimpleShaderForAxisAlignedBoundingBox::GetDataSize( const geometry::Geometry &geometry) const { auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); return lineset->lines_.size() * 2; } bool SimpleShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool SimpleShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } copy_trianglemesh_functor func( thrust::raw_pointer_cast(mesh.vertices_.data()), (int *)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.vertex_colors_.data()), mesh.HasVertexColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, colors), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t SimpleShaderForTriangleMesh::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } bool SimpleShaderForVoxelGridLine::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } glDisable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForVoxelGridLine::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } const geometry::VoxelGrid &voxel_grid = (const geometry::VoxelGrid &)geometry; if (voxel_grid.HasVoxels() == false) { PrintShaderWarning("Binding failed with empty voxel grid."); return false; } utility::device_vector<Eigen::Vector3f> vertices( voxel_grid.voxels_values_.size() * 8); thrust::tiled_range< thrust::counting_iterator<size_t>> irange(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(8), voxel_grid.voxels_values_.size()); auto gfunc = geometry::get_grid_index_functor<geometry::Voxel, Eigen::Vector3i>(); auto begin = thrust::make_transform_iterator(voxel_grid.voxels_values_.begin(), gfunc); thrust::repeated_range<decltype(begin)> vrange(begin, thrust::make_transform_iterator(voxel_grid.voxels_values_.end(), gfunc), 8); geometry::compute_voxel_vertices_functor<Eigen::Vector3i> func1(voxel_grid.origin_, voxel_grid.voxel_size_); thrust::transform(make_tuple_begin(irange, vrange), make_tuple_end(irange, vrange), vertices.begin(), func1); size_t n_out = voxel_grid.voxels_values_.size() * 12 * 2; copy_voxelgrid_line_functor func2( thrust::raw_pointer_cast(vertices.data()), thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.HasColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_out), make_tuple_iterator(points, colors), func2); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(n_out); return true; } size_t SimpleShaderForVoxelGridLine::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 * 2; } bool SimpleShaderForDistanceTransform::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::DistanceTransform) { PrintShaderWarning("Rendering type is not geometry::DistanceTransform."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); return true; } bool SimpleShaderForDistanceTransform::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::DistanceTransform) { PrintShaderWarning("Rendering type is not geometry::DistanceTransform."); return false; } const geometry::DistanceTransform &dist_trans = (const geometry::DistanceTransform &)geometry; if (dist_trans.IsEmpty()) { PrintShaderWarning("Binding failed with empty distance transform."); return false; } size_t n_out = dist_trans.voxels_.size(); copy_distance_voxel_functor func(dist_trans.voxel_size_, dist_trans.resolution_, dist_trans.origin_, dist_trans.voxel_size_ * dist_trans.resolution_ * 0.1); thrust::transform(make_tuple_iterator(thrust::make_counting_iterator<size_t>(0), dist_trans.voxels_.begin()), make_tuple_iterator(thrust::make_counting_iterator(n_out), dist_trans.voxels_.end()), make_tuple_iterator(points, colors), func); auto tp_begin = make_tuple_iterator(points, colors); thrust::sort(tp_begin, tp_begin + n_out, alpha_greater_functor()); draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(n_out); return true; } size_t SimpleShaderForDistanceTransform::GetDataSize( const geometry::Geometry &geometry) const { int res = ((const geometry::DistanceTransform &)geometry).resolution_; return res * res * res; }
5e1b61351c6b242e5a843d73e0b561976374902e.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <cuda_runtime.h> #include <thrust/iterator/constant_iterator.h> #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/graph.h" #include "cupoch/geometry/lineset.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/geometry/distancetransform.h" #include "cupoch/geometry/geometry_functor.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/utility/platform.h" #include "cupoch/utility/range.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/simple_shader.h" #include "cupoch/visualization/utility/color_map.h" #include "cupoch/visualization/visualizer/render_option.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { // Vertex indices of 12 lines in a cuboid __constant__ int cuboid_lines_vertex_indices[12][2] = { {0, 1}, {0, 2}, {0, 4}, {3, 1}, {3, 2}, {3, 7}, {5, 1}, {5, 4}, {5, 7}, {6, 2}, {6, 4}, {6, 7}, }; struct copy_pointcloud_functor { copy_pointcloud_functor(bool has_colors, RenderOption::PointColorOption color_option, const ViewControl &view) : has_colors_(has_colors), color_option_(color_option), view_(view){}; const bool has_colors_; const RenderOption::PointColorOption color_option_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()( const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> &pt_cl) { const Eigen::Vector3f &point = thrust::get<0>(pt_cl); const Eigen::Vector3f &color = thrust::get<1>(pt_cl); Eigen::Vector4f color_tmp; color_tmp[3] = 1.0; switch (color_option_) { case RenderOption::PointColorOption::XCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetXPercentage(point(0)), colormap_option_); break; case RenderOption::PointColorOption::YCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetYPercentage(point(1)), colormap_option_); break; case RenderOption::PointColorOption::ZCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); break; case RenderOption::PointColorOption::Color: case RenderOption::PointColorOption::Default: default: if (has_colors_) { color_tmp.head<3>() = color; } else { color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); } break; } return thrust::make_tuple(point, color_tmp); } }; struct copy_lineset_functor { copy_lineset_functor( const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords, const Eigen::Vector3f *line_colors, bool has_colors) : line_coords_(line_coords), line_colors_(line_colors), has_colors_(has_colors){}; const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords_; const Eigen::Vector3f *line_colors_; const bool has_colors_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()( size_t k) const { int i = k / 2; int j = k % 2; Eigen::Vector4f color_tmp; color_tmp[3] = 1.0; color_tmp.head<3>() = (has_colors_) ? line_colors_[i] : Eigen::Vector3f::Ones(); if (j == 0) { return thrust::make_tuple(line_coords_[i].first, color_tmp); } else { return thrust::make_tuple(line_coords_[i].second, color_tmp); } } }; struct line_coordinates_functor { line_coordinates_functor(const Eigen::Vector3f *points) : points_(points){}; const Eigen::Vector3f *points_; __device__ thrust::pair<Eigen::Vector3f, Eigen::Vector3f> operator()( const Eigen::Vector2i &idxs) const { return thrust::make_pair(points_[idxs[0]], points_[idxs[1]]); } }; struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f *vertices, const int *triangles, const Eigen::Vector3f *vertex_colors, bool has_vertex_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f &default_mesh_color, const ViewControl &view) : vertices_(vertices), triangles_(triangles), vertex_colors_(vertex_colors), has_vertex_colors_(has_vertex_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view){}; const Eigen::Vector3f *vertices_; const int *triangles_; const Eigen::Vector3f *vertex_colors_; const bool has_vertex_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()( size_t k) const { size_t vi = triangles_[k]; const auto &vertex = vertices_[vi]; Eigen::Vector4f color_tmp; color_tmp[3] = 1.0; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetXPercentage(vertex(0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetYPercentage(vertex(1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: color_tmp.head<3>() = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(vertex(2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_vertex_colors_) { color_tmp.head<3>() = vertex_colors_[vi]; break; } case RenderOption::MeshColorOption::Default: default: color_tmp.head<3>() = default_mesh_color_; break; } return thrust::make_tuple(vertex, color_tmp); } }; struct copy_voxelgrid_line_functor { copy_voxelgrid_line_functor(const Eigen::Vector3f *vertices, const geometry::Voxel *voxels, bool has_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f &default_mesh_color, const ViewControl &view) : vertices_(vertices), voxels_(voxels), has_colors_(has_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view){}; const Eigen::Vector3f *vertices_; const geometry::Voxel *voxels_; const bool has_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()( size_t idx) const { int i = idx / (12 * 2); int jk = idx % (12 * 2); int j = jk / 2; int k = jk % 2; // Voxel color (applied to all points) Eigen::Vector4f voxel_color; voxel_color[3] = 1.0; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: voxel_color.head<3>() = GetColorMapColor(view_.GetBoundingBox().GetXPercentage( vertices_[i * 8](0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: voxel_color.head<3>() = GetColorMapColor(view_.GetBoundingBox().GetYPercentage( vertices_[i * 8](1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: voxel_color.head<3>() = GetColorMapColor(view_.GetBoundingBox().GetZPercentage( vertices_[i * 8](2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_colors_) { voxel_color.head<3>() = voxels_[i].color_; break; } case RenderOption::MeshColorOption::Default: default: voxel_color.head<3>() = default_mesh_color_; break; } return thrust::make_tuple( vertices_[i * 8 + cuboid_lines_vertex_indices[j][k]], voxel_color); } }; struct copy_distance_voxel_functor { copy_distance_voxel_functor(float voxel_size, int resolution, const Eigen::Vector3f& origin, float distance_max) : voxel_size_(voxel_size), resolution_(resolution), origin_(origin), distance_max_(distance_max){}; const float voxel_size_; const int resolution_; const Eigen::Vector3f origin_; const float distance_max_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(const thrust::tuple<size_t, geometry::DistanceVoxel>& kv) const { int idx = thrust::get<0>(kv); geometry::DistanceVoxel v = thrust::get<1>(kv); int res2 = resolution_ * resolution_; int x = idx / res2; int yz = idx % res2; int y = yz / resolution_; int z = yz % resolution_; // Voxel color (applied to all points) Eigen::Vector4f voxel_color = Eigen::Vector4f::Ones(); int h_res = resolution_ / 2; Eigen::Vector3f pt = (Eigen::Vector3i(x - h_res, y - h_res, z - h_res).cast<float>() + Eigen::Vector3f::Constant(0.5)) * voxel_size_ - origin_; voxel_color[3] = 1.0 - min(v.distance_, distance_max_) / distance_max_; return thrust::make_tuple(pt, voxel_color); } }; struct alpha_greater_functor { __device__ bool operator() (const thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>& lhs, const thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>& rhs) const { return thrust::get<1>(lhs)[3] > thrust::get<1>(rhs)[3]; } }; } // namespace bool SimpleShader::Compile() { if (CompileShaders(simple_vertex_shader, NULL, simple_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_color_ = glGetAttribLocation(program_, "vertex_color"); MVP_ = glGetUniformLocation(program_, "MVP"); return true; } void SimpleShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool SimpleShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace InvalidateGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_color_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector4f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_color_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f *raw_points_ptr; Eigen::Vector4f *raw_colors_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_colors_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector4f> dev_colors_ptr = thrust::device_pointer_cast(raw_colors_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_colors_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool SimpleShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_color_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glVertexAttribPointer(vertex_color_, 4, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_color_); return true; } void SimpleShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[1])); } glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_color_buffer_); bound_ = false; } } bool SimpleShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } copy_pointcloud_functor func(pointcloud.HasColors(), option.point_color_option_, view); if (pointcloud.HasColors()) { thrust::transform( make_tuple_begin(pointcloud.points_, pointcloud.colors_), make_tuple_end(pointcloud.points_, pointcloud.colors_), make_tuple_iterator(points, colors), func); } else { thrust::transform( make_tuple_iterator(pointcloud.points_.begin(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Zero())), make_tuple_iterator(pointcloud.points_.end(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Zero())), make_tuple_iterator(points, colors), func); } draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t SimpleShaderForPointCloud::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool SimpleShaderForLineSet::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::LineSet) { PrintShaderWarning("Rendering type is not geometry::LineSet."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForLineSet::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::LineSet) { PrintShaderWarning("Rendering type is not geometry::LineSet."); return false; } const geometry::LineSet<3> &lineset = (const geometry::LineSet<3> &)geometry; if (lineset.HasLines() == false) { PrintShaderWarning("Binding failed with empty geometry::LineSet."); return false; } utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(lineset.lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(lineset.points_.data())); thrust::transform(lineset.lines_.begin(), lineset.lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp( thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(lineset.colors_.data()), lineset.HasColors()); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(lineset.lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(lineset.lines_.size() * 2); return true; } size_t SimpleShaderForLineSet::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::LineSet<3> &)geometry).lines_.size() * 2; } bool SimpleShaderForGraphNode::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForGraphNode::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry; if (graph.HasPoints() == false) { PrintShaderWarning("Binding failed with empty graph."); return false; } copy_pointcloud_functor func(graph.HasColors(), option.point_color_option_, view); if (graph.HasNodeColors()) { thrust::transform(make_tuple_begin(graph.points_, graph.node_colors_), make_tuple_end(graph.points_, graph.node_colors_), make_tuple_iterator(points, colors), func); } else { thrust::transform( make_tuple_iterator(graph.points_.begin(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Ones())), make_tuple_iterator(graph.points_.end(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Ones())), make_tuple_iterator(points, colors), func); } draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(graph.points_.size()); return true; } size_t SimpleShaderForGraphNode::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::Graph<3> &)geometry).points_.size(); } bool SimpleShaderForGraphEdge::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForGraphEdge::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry; if (graph.HasLines() == false) { PrintShaderWarning("Binding failed with empty geometry::Graph."); return false; } utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(graph.lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(graph.points_.data())); thrust::transform(graph.lines_.begin(), graph.lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp(thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(graph.colors_.data()), graph.HasColors()); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(graph.lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(graph.lines_.size() * 2); return true; } size_t SimpleShaderForGraphEdge::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::Graph<3> &)geometry).lines_.size() * 2; } bool SimpleShaderForAxisAlignedBoundingBox::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForAxisAlignedBoundingBox::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(lineset->lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(lineset->points_.data())); thrust::transform(lineset->lines_.begin(), lineset->lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp( thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(lineset->colors_.data()), lineset->HasColors()); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(lineset->lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(lineset->lines_.size() * 2); return true; } size_t SimpleShaderForAxisAlignedBoundingBox::GetDataSize( const geometry::Geometry &geometry) const { auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); return lineset->lines_.size() * 2; } bool SimpleShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool SimpleShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } copy_trianglemesh_functor func( thrust::raw_pointer_cast(mesh.vertices_.data()), (int *)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.vertex_colors_.data()), mesh.HasVertexColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, colors), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t SimpleShaderForTriangleMesh::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } bool SimpleShaderForVoxelGridLine::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } glDisable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForVoxelGridLine::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } const geometry::VoxelGrid &voxel_grid = (const geometry::VoxelGrid &)geometry; if (voxel_grid.HasVoxels() == false) { PrintShaderWarning("Binding failed with empty voxel grid."); return false; } utility::device_vector<Eigen::Vector3f> vertices( voxel_grid.voxels_values_.size() * 8); thrust::tiled_range< thrust::counting_iterator<size_t>> irange(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(8), voxel_grid.voxels_values_.size()); auto gfunc = geometry::get_grid_index_functor<geometry::Voxel, Eigen::Vector3i>(); auto begin = thrust::make_transform_iterator(voxel_grid.voxels_values_.begin(), gfunc); thrust::repeated_range<decltype(begin)> vrange(begin, thrust::make_transform_iterator(voxel_grid.voxels_values_.end(), gfunc), 8); geometry::compute_voxel_vertices_functor<Eigen::Vector3i> func1(voxel_grid.origin_, voxel_grid.voxel_size_); thrust::transform(make_tuple_begin(irange, vrange), make_tuple_end(irange, vrange), vertices.begin(), func1); size_t n_out = voxel_grid.voxels_values_.size() * 12 * 2; copy_voxelgrid_line_functor func2( thrust::raw_pointer_cast(vertices.data()), thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.HasColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_out), make_tuple_iterator(points, colors), func2); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(n_out); return true; } size_t SimpleShaderForVoxelGridLine::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 * 2; } bool SimpleShaderForDistanceTransform::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::DistanceTransform) { PrintShaderWarning("Rendering type is not geometry::DistanceTransform."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); return true; } bool SimpleShaderForDistanceTransform::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector4f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::DistanceTransform) { PrintShaderWarning("Rendering type is not geometry::DistanceTransform."); return false; } const geometry::DistanceTransform &dist_trans = (const geometry::DistanceTransform &)geometry; if (dist_trans.IsEmpty()) { PrintShaderWarning("Binding failed with empty distance transform."); return false; } size_t n_out = dist_trans.voxels_.size(); copy_distance_voxel_functor func(dist_trans.voxel_size_, dist_trans.resolution_, dist_trans.origin_, dist_trans.voxel_size_ * dist_trans.resolution_ * 0.1); thrust::transform(make_tuple_iterator(thrust::make_counting_iterator<size_t>(0), dist_trans.voxels_.begin()), make_tuple_iterator(thrust::make_counting_iterator(n_out), dist_trans.voxels_.end()), make_tuple_iterator(points, colors), func); auto tp_begin = make_tuple_iterator(points, colors); thrust::sort(tp_begin, tp_begin + n_out, alpha_greater_functor()); draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(n_out); return true; } size_t SimpleShaderForDistanceTransform::GetDataSize( const geometry::Geometry &geometry) const { int res = ((const geometry::DistanceTransform &)geometry).resolution_; return res * res * res; }
063b895bdfaa615746f8a4777ae3ae62255fe20c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width; i += numThreads) { tgtMat[width*i + i] = mat[width*i + i] * vec[i]; } }
063b895bdfaa615746f8a4777ae3ae62255fe20c.cu
#include "includes.h" __global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width; i += numThreads) { tgtMat[width*i + i] = mat[width*i + i] * vec[i]; } }
70d85ac0ad9578f6004c20923f245ca2298ccdb8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <iostream> #include <stdio.h> #include "bloom.h" #include "murmuda3.h" __global__ void cuda_add(uint32_t* cuda_bit_vector, int num_bits, uint32_t* cuda_seeds, int num_seeds, const void* cuda_key, int len) { // For now lets pretend this is just called for one key int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; // Allocate memory on device from kernel for output of hash. This is init // by the decorator extern __shared__ uint32_t out[]; uint32_t bit_index; // Hash them in parallel for (int k = index; k < num_seeds; k+= stride) { _Murmur3_helper(cuda_key, len, cuda_seeds[k], &(out[k])); // Use cuda atomic functions to guarentee it is flipped bit_index = out[k] % num_bits; atomicOr(&(cuda_bit_vector[bit_index / 32]), (uint32_t) 1 << (bit_index % 32)); } } __global__ void cuda_test(uint32_t* cuda_bit_vector, int num_bits, uint32_t* cuda_seeds, int num_seeds, const void* cuda_key, int len, bool * bool_out) { // For now lets pretend this is just called for one key int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; // Allocate memory on device from kernel for output of hash. This is init // by the decorator extern __shared__ bool test_vals[]; test_vals[index] = true; uint32_t out, bit_index; // Hash them in parallel for (int k = index; k < num_seeds; k+= stride) { _Murmur3_helper(cuda_key, len, cuda_seeds[k], &out); bit_index = out % num_bits; test_vals[k] = (cuda_bit_vector[bit_index / 32] & (1 << (bit_index % 32))); } __syncthreads(); for (uint16_t t = blockDim.x / 2; t > 0; t >>= 1) { // I think this requires // a power of two for the grid dim if (index < t) { test_vals[index] = test_vals[index] && test_vals[index + t]; } __syncthreads(); } if (threadIdx.x == 0) { *bool_out = test_vals[index]; } } BloomFilter::BloomFilter(int n_bits, int n_seeds) { num_bits = n_bits; num_int = (num_bits + (sizeof(uint32_t) - 1)) / sizeof(uint32_t); bit_vector = new uint32_t[num_int]; std::fill(bit_vector, bit_vector+num_int, 0); // Allocate the bit vector on the device hipMalloc(&cuda_bit_vector, num_int * sizeof(bit_vector[0])); hipMemcpy(cuda_bit_vector, bit_vector, num_int * sizeof(bit_vector[0]), hipMemcpyHostToDevice); num_seeds = n_seeds; seeds = new uint32_t[num_seeds]; for (int i = 0; i < num_seeds; i++) { seeds[i] = i; } hipMalloc(&cuda_seeds, num_seeds * sizeof(uint32_t)); hipMemcpy(cuda_seeds, seeds, num_seeds * sizeof(uint32_t), hipMemcpyHostToDevice); } void BloomFilter::add(const void * key, int len) { void * cuda_key; hipMalloc(&cuda_key, len); hipMemcpy(cuda_key, key, len, hipMemcpyHostToDevice); int blockSize = num_seeds; int numBlocks = 1; hipLaunchKernelGGL(( cuda_add), dim3(numBlocks), dim3(blockSize), num_seeds * sizeof(uint32_t), 0, cuda_bit_vector, num_bits, cuda_seeds, num_seeds, cuda_key, len); hipDeviceSynchronize(); hipFree(cuda_key); } void BloomFilter::sync() { hipMemcpy(bit_vector, cuda_bit_vector, num_int * sizeof(bit_vector[0]), hipMemcpyDeviceToHost); } bool BloomFilter::test(const void * key, int len) { bool result; bool * cuda_result; hipMalloc(&cuda_result, sizeof(bool)); void * cuda_key; hipMalloc(&cuda_key, len); hipMemcpy(cuda_key, key, len, hipMemcpyHostToDevice); uint32_t blockSize = num_seeds; // Round up to the nearest power of 2 blockSize--; blockSize |= blockSize >> 1; blockSize |= blockSize >> 2; blockSize |= blockSize >> 4; blockSize |= blockSize >> 8; blockSize |= blockSize >> 16; blockSize++; int numBlocks = 1; hipLaunchKernelGGL(( cuda_test), dim3(numBlocks), dim3(blockSize), blockSize * sizeof(bool), 0, cuda_bit_vector, num_bits, cuda_seeds, num_seeds, cuda_key, len, cuda_result); hipDeviceSynchronize(); hipMemcpy(&result, cuda_result, sizeof(bool), hipMemcpyDeviceToHost); hipFree(cuda_key); hipFree(cuda_result); return result; }
70d85ac0ad9578f6004c20923f245ca2298ccdb8.cu
#include <algorithm> #include <iostream> #include <stdio.h> #include "bloom.h" #include "murmuda3.h" __global__ void cuda_add(uint32_t* cuda_bit_vector, int num_bits, uint32_t* cuda_seeds, int num_seeds, const void* cuda_key, int len) { // For now lets pretend this is just called for one key int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; // Allocate memory on device from kernel for output of hash. This is init // by the decorator extern __shared__ uint32_t out[]; uint32_t bit_index; // Hash them in parallel for (int k = index; k < num_seeds; k+= stride) { _Murmur3_helper(cuda_key, len, cuda_seeds[k], &(out[k])); // Use cuda atomic functions to guarentee it is flipped bit_index = out[k] % num_bits; atomicOr(&(cuda_bit_vector[bit_index / 32]), (uint32_t) 1 << (bit_index % 32)); } } __global__ void cuda_test(uint32_t* cuda_bit_vector, int num_bits, uint32_t* cuda_seeds, int num_seeds, const void* cuda_key, int len, bool * bool_out) { // For now lets pretend this is just called for one key int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; // Allocate memory on device from kernel for output of hash. This is init // by the decorator extern __shared__ bool test_vals[]; test_vals[index] = true; uint32_t out, bit_index; // Hash them in parallel for (int k = index; k < num_seeds; k+= stride) { _Murmur3_helper(cuda_key, len, cuda_seeds[k], &out); bit_index = out % num_bits; test_vals[k] = (cuda_bit_vector[bit_index / 32] & (1 << (bit_index % 32))); } __syncthreads(); for (uint16_t t = blockDim.x / 2; t > 0; t >>= 1) { // I think this requires // a power of two for the grid dim if (index < t) { test_vals[index] = test_vals[index] && test_vals[index + t]; } __syncthreads(); } if (threadIdx.x == 0) { *bool_out = test_vals[index]; } } BloomFilter::BloomFilter(int n_bits, int n_seeds) { num_bits = n_bits; num_int = (num_bits + (sizeof(uint32_t) - 1)) / sizeof(uint32_t); bit_vector = new uint32_t[num_int]; std::fill(bit_vector, bit_vector+num_int, 0); // Allocate the bit vector on the device cudaMalloc(&cuda_bit_vector, num_int * sizeof(bit_vector[0])); cudaMemcpy(cuda_bit_vector, bit_vector, num_int * sizeof(bit_vector[0]), cudaMemcpyHostToDevice); num_seeds = n_seeds; seeds = new uint32_t[num_seeds]; for (int i = 0; i < num_seeds; i++) { seeds[i] = i; } cudaMalloc(&cuda_seeds, num_seeds * sizeof(uint32_t)); cudaMemcpy(cuda_seeds, seeds, num_seeds * sizeof(uint32_t), cudaMemcpyHostToDevice); } void BloomFilter::add(const void * key, int len) { void * cuda_key; cudaMalloc(&cuda_key, len); cudaMemcpy(cuda_key, key, len, cudaMemcpyHostToDevice); int blockSize = num_seeds; int numBlocks = 1; cuda_add<<<numBlocks, blockSize, num_seeds * sizeof(uint32_t)>>>(cuda_bit_vector, num_bits, cuda_seeds, num_seeds, cuda_key, len); cudaDeviceSynchronize(); cudaFree(cuda_key); } void BloomFilter::sync() { cudaMemcpy(bit_vector, cuda_bit_vector, num_int * sizeof(bit_vector[0]), cudaMemcpyDeviceToHost); } bool BloomFilter::test(const void * key, int len) { bool result; bool * cuda_result; cudaMalloc(&cuda_result, sizeof(bool)); void * cuda_key; cudaMalloc(&cuda_key, len); cudaMemcpy(cuda_key, key, len, cudaMemcpyHostToDevice); uint32_t blockSize = num_seeds; // Round up to the nearest power of 2 blockSize--; blockSize |= blockSize >> 1; blockSize |= blockSize >> 2; blockSize |= blockSize >> 4; blockSize |= blockSize >> 8; blockSize |= blockSize >> 16; blockSize++; int numBlocks = 1; cuda_test<<<numBlocks, blockSize, blockSize * sizeof(bool)>>>(cuda_bit_vector, num_bits, cuda_seeds, num_seeds, cuda_key, len, cuda_result); cudaDeviceSynchronize(); cudaMemcpy(&result, cuda_result, sizeof(bool), cudaMemcpyDeviceToHost); cudaFree(cuda_key); cudaFree(cuda_result); return result; }
e59b81bdbcc2562bee9cd4ddd005e29eb580e361.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrix_t.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *data = NULL; hipMalloc(&data, XSIZE*YSIZE); int *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int *rows = NULL; hipMalloc(&rows, XSIZE*YSIZE); int *cols = NULL; hipMalloc(&cols, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrix_t), dim3(gridBlock),dim3(threadBlock), 0, 0, data,out,rows,cols); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrix_t), dim3(gridBlock),dim3(threadBlock), 0, 0, data,out,rows,cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrix_t), dim3(gridBlock),dim3(threadBlock), 0, 0, data,out,rows,cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e59b81bdbcc2562bee9cd4ddd005e29eb580e361.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrix_t.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *data = NULL; cudaMalloc(&data, XSIZE*YSIZE); int *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int *rows = NULL; cudaMalloc(&rows, XSIZE*YSIZE); int *cols = NULL; cudaMalloc(&cols, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrix_t<<<gridBlock,threadBlock>>>(data,out,rows,cols); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrix_t<<<gridBlock,threadBlock>>>(data,out,rows,cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrix_t<<<gridBlock,threadBlock>>>(data,out,rows,cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
12e0a037ac5a3de095b01fb38a415e4db02a862f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> // CUDA runtime #include <hip/hip_runtime.h> #include <rocblas.h> #include "dense_help_func.hpp" // cal offset from row col and ld , in row-major matrix, ld is the width of the matrix #define OFFSET(row, col, ld) ((row) * (ld) + (col)) // transfer float4 #define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0]) template < const int BLOCK_SIZE_M, // width of block of C that each thread block calculate const int BLOCK_SIZE_K, // height of block of A that each thread block load into shared memory const int BLOCK_SIZE_N, // height of block of C that each thread block calculate const int THREAD_SIZE_Y, // height of block of C that each thread calculate const int THREAD_SIZE_X, // width of block of C that each thread calculate const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not > __global__ void MatrixMulCUDAEncoding( float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, const int K, const int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // size of thread block const int bszx = BLOCK_SIZE_N / THREAD_SIZE_X; const int bszy = BLOCK_SIZE_M / THREAD_SIZE_Y; const int THREAD_NUM_PER_BLOCK = bszy * bszx; // thread id const int tid = ty * bszx + tx; // shared memory __shared__ float As[BLOCK_SIZE_M][BLOCK_SIZE_K]; // avoid bank conflict __shared__ float Bs[BLOCK_SIZE_K][BLOCK_SIZE_N]; int ROW_PTR[7] = {1,4,3,5,7,0,2}; // add encoding // registers for C float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0}; // registers for A and B float frag_a[THREAD_SIZE_Y]; float frag_b[THREAD_SIZE_X]; // threads needed to load one row of tile // / 4 is because float4 is used const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4; const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4; // row number and col number that needs to be loaded by this thread const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW; const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4; const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4; // row stride that thread uses to load multiple rows of a tile const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW; // can not unroll since K can not be determined at this point for (int tile_idx = 0 ; tile_idx < K ; tile_idx += BLOCK_SIZE_K) { // load A from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) { FETCH_FLOAT4(As[A_TILE_ROW_START + i][A_TILE_COL]) = FETCH_FLOAT4(A[OFFSET( BLOCK_SIZE_M * by + A_TILE_ROW_START + i, // row A_TILE_COL + tile_idx, // col K )]); } // load B from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) { FETCH_FLOAT4(Bs[B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET( tile_idx + B_TILE_ROW_START + i, // row B_TILE_COL + BLOCK_SIZE_N * bx, // col N )]); } __syncthreads(); // compute c #pragma unroll for (int k = 0; k < BLOCK_SIZE_K; ++ k) { // load A from shared memory to register //#pragma unroll //for(int i=0; i<4; i++){ // ROW_PTR[i] = i*BLOCK_SIZE_K+k; //} #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { // add one decoding overhead frag_a[thread_y] = reinterpret_cast<float*>(As)[ROW_PTR[((ty * THREAD_SIZE_Y + thread_y) * BLOCK_SIZE_K + k)%7]]; } // load B from shared memory to register #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) { FETCH_FLOAT4(frag_b[thread_x]) = FETCH_FLOAT4(Bs[k][THREAD_SIZE_X * tx + thread_x]); } #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { accum[thread_y][thread_x] += frag_a[thread_y] * frag_b[thread_x]; } } } __syncthreads(); } // store back to C #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { C[OFFSET( BLOCK_SIZE_M * by + ty * THREAD_SIZE_Y + thread_y, BLOCK_SIZE_N * bx + tx * THREAD_SIZE_X + thread_x, N)] = accum[thread_y][thread_x]; } } } // TODO add shuffle to enable GPU write back col
12e0a037ac5a3de095b01fb38a415e4db02a862f.cu
#include <stdio.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> #include <cublas_v2.h> #include "dense_help_func.hpp" // cal offset from row col and ld , in row-major matrix, ld is the width of the matrix #define OFFSET(row, col, ld) ((row) * (ld) + (col)) // transfer float4 #define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0]) template < const int BLOCK_SIZE_M, // width of block of C that each thread block calculate const int BLOCK_SIZE_K, // height of block of A that each thread block load into shared memory const int BLOCK_SIZE_N, // height of block of C that each thread block calculate const int THREAD_SIZE_Y, // height of block of C that each thread calculate const int THREAD_SIZE_X, // width of block of C that each thread calculate const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not > __global__ void MatrixMulCUDAEncoding( float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, const int K, const int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // size of thread block const int bszx = BLOCK_SIZE_N / THREAD_SIZE_X; const int bszy = BLOCK_SIZE_M / THREAD_SIZE_Y; const int THREAD_NUM_PER_BLOCK = bszy * bszx; // thread id const int tid = ty * bszx + tx; // shared memory __shared__ float As[BLOCK_SIZE_M][BLOCK_SIZE_K]; // avoid bank conflict __shared__ float Bs[BLOCK_SIZE_K][BLOCK_SIZE_N]; int ROW_PTR[7] = {1,4,3,5,7,0,2}; // add encoding // registers for C float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0}; // registers for A and B float frag_a[THREAD_SIZE_Y]; float frag_b[THREAD_SIZE_X]; // threads needed to load one row of tile // / 4 is because float4 is used const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4; const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4; // row number and col number that needs to be loaded by this thread const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW; const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4; const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4; // row stride that thread uses to load multiple rows of a tile const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW; // can not unroll since K can not be determined at this point for (int tile_idx = 0 ; tile_idx < K ; tile_idx += BLOCK_SIZE_K) { // load A from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) { FETCH_FLOAT4(As[A_TILE_ROW_START + i][A_TILE_COL]) = FETCH_FLOAT4(A[OFFSET( BLOCK_SIZE_M * by + A_TILE_ROW_START + i, // row A_TILE_COL + tile_idx, // col K )]); } // load B from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) { FETCH_FLOAT4(Bs[B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET( tile_idx + B_TILE_ROW_START + i, // row B_TILE_COL + BLOCK_SIZE_N * bx, // col N )]); } __syncthreads(); // compute c #pragma unroll for (int k = 0; k < BLOCK_SIZE_K; ++ k) { // load A from shared memory to register //#pragma unroll //for(int i=0; i<4; i++){ // ROW_PTR[i] = i*BLOCK_SIZE_K+k; //} #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { // add one decoding overhead frag_a[thread_y] = reinterpret_cast<float*>(As)[ROW_PTR[((ty * THREAD_SIZE_Y + thread_y) * BLOCK_SIZE_K + k)%7]]; } // load B from shared memory to register #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) { FETCH_FLOAT4(frag_b[thread_x]) = FETCH_FLOAT4(Bs[k][THREAD_SIZE_X * tx + thread_x]); } #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { accum[thread_y][thread_x] += frag_a[thread_y] * frag_b[thread_x]; } } } __syncthreads(); } // store back to C #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { C[OFFSET( BLOCK_SIZE_M * by + ty * THREAD_SIZE_Y + thread_y, BLOCK_SIZE_N * bx + tx * THREAD_SIZE_X + thread_x, N)] = accum[thread_y][thread_x]; } } } // TODO add shuffle to enable GPU write back col
6559388ac87965b4ff282cf9bfe6ad697a5e99d5.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "minimap.h" #include "mmpriv.h" #include "kalloc.h" __global__ void mm_chain_dp_fill_cuda_score(int64_t i, uint64_t ri, int32_t qi, int32_t q_span, int32_t sidi, int64_t *mem_max_j, int32_t *mem_max_f, int max_dist_x, int max_dist_y, int bw, int max_iter, int n_segs, int is_cdna, mm128_t *a, float avg_qspan, int32_t *f, int32_t *t, int32_t *p) { int tid = blockIdx.x*blockDim.x+threadIdx.x; if (tid>=max_iter) return; int j = i-1-tid; mem_max_j[tid] = -1; mem_max_f[tid] = q_span; if (j<0) return; if (ri > a[j].x + max_dist_x) return; int32_t min_d; int64_t dr = ri - a[j].x; int32_t dq = qi - (int32_t)a[j].y, dd, sc, log_dd; int32_t sidj = (a[j].y & MM_SEED_SEG_MASK) >> MM_SEED_SEG_SHIFT; if ((sidi == sidj && dr == 0) || dq <= 0) return; // don't skip if an anchor is used by multiple segments; see below if ((sidi == sidj && dq > max_dist_y) || dq > max_dist_x) return; dd = dr > dq? dr - dq : dq - dr; if (sidi == sidj && dd > bw) return; if (n_segs > 1 && !is_cdna && sidi == sidj && dr > max_dist_y) return; min_d = dq < dr? dq : dr; sc = min_d > q_span? q_span : dq < dr? dq : dr; log_dd = dd? 31-__clz(dd) : 0; if (is_cdna || sidi != sidj) { int c_log, c_lin; c_lin = (int)(dd * .01 * avg_qspan); c_log = log_dd; if (sidi != sidj && dr == 0) ++sc; // possibly due to overlapping paired ends; give a minor bonus else if (dr > dq || sidi != sidj) sc -= c_lin < c_log? c_lin : c_log; else sc -= c_lin + (c_log>>1); } else sc -= (int)(dd * .01 * avg_qspan) + (log_dd>>1); sc += f[j]; if (sc > mem_max_f[tid]) { mem_max_f[tid] = sc, mem_max_j[tid] = j; } if (p[j] >= 0) t[p[j]] = i; } __global__ void mm_chain_dp_fill_cuda_max(int i, int64_t *mem_max_j, int32_t *mem_max_f, int max_iter, int32_t *f, int32_t *p, int32_t *v) { __shared__ int64_t max_j[1024]; __shared__ int32_t max_f[1024]; int tid = threadIdx.x; int stride = blockDim.x; if (tid<max_iter) { max_j[tid] = mem_max_j[tid]; max_f[tid] = mem_max_f[tid]; } else { max_j[tid] = -1; max_f[tid] = 0; } for (int k=stride+tid;k<max_iter;k+=stride) if (mem_max_f[k]>max_f[tid]) { max_j[tid] = mem_max_j[k]; max_f[tid] = mem_max_f[k]; } for (unsigned int offset=stride>>1;offset>0;offset>>=1) { __syncthreads(); if (tid<offset) if (max_f[tid+offset]>max_f[tid]||(max_f[tid+offset]==max_f[tid]&&max_j[tid+offset]>max_j[tid])) { max_j[tid] = max_j[tid+offset]; max_f[tid] = max_f[tid+offset]; } } __syncthreads(); if (tid==0) { f[i] = max_f[0], p[i] = max_j[0]; v[i] = max_j[0] >= 0 && v[max_j[0]] > max_f[0]? v[max_j[0]] : max_f[0]; // v[] keeps the peak score up to i; f[] is the score ending at i, not always the peak } } __global__ void mm_chain_dp_fill_cuda_optimized(int64_t j, int64_t n, int max_dist_x, int max_dist_y, int bw, int max_iter, int n_segs, int is_cdna, mm128_t *a, float avg_qspan, int32_t *f, int32_t *t, int32_t *p, int32_t *v) { int tid = blockIdx.x*blockDim.x+threadIdx.x; if (tid>=max_iter) return; int64_t i = j+1+tid; if (i>=n) return; uint64_t ri = a[i].x; int32_t qi = (int32_t)a[i].y, q_span = a[i].y>>32&0xff; // NB: only 8 bits of span is used!!! int32_t sidi = (a[i].y & MM_SEED_SEG_MASK) >> MM_SEED_SEG_SHIFT; int64_t max_j = -1; int32_t max_f = q_span; if (ri > a[j].x + max_dist_x) return; int32_t min_d; int64_t dr = ri - a[j].x; int32_t dq = qi - (int32_t)a[j].y, dd, sc, log_dd; int32_t sidj = (a[j].y & MM_SEED_SEG_MASK) >> MM_SEED_SEG_SHIFT; if ((sidi == sidj && dr == 0) || dq <= 0) return; // don't skip if an anchor is used by multiple segments; see below if ((sidi == sidj && dq > max_dist_y) || dq > max_dist_x) return; dd = dr > dq? dr - dq : dq - dr; if (sidi == sidj && dd > bw) return; if (n_segs > 1 && !is_cdna && sidi == sidj && dr > max_dist_y) return; min_d = dq < dr? dq : dr; sc = min_d > q_span? q_span : dq < dr? dq : dr; log_dd = dd? 31-__clz(dd) : 0; if (is_cdna || sidi != sidj) { int c_log, c_lin; c_lin = (int)(dd * .01 * avg_qspan); c_log = log_dd; if (sidi != sidj && dr == 0) ++sc; // possibly due to overlapping paired ends; give a minor bonus else if (dr > dq || sidi != sidj) sc -= c_lin < c_log? c_lin : c_log; else sc -= c_lin + (c_log>>1); } else sc -= (int)(dd * .01 * avg_qspan) + (log_dd>>1); sc += f[j]; if (sc > max_f) { max_f = sc, max_j = j; } if (p[j] >= 0) t[p[j]] = i; if (max_f>f[i]||(max_f==f[i]&&max_j!=-1)) { f[i] = max_f, p[i] = max_j; v[i] = max_j >= 0 && v[max_j] > max_f? v[max_j] : max_f; // v[] keeps the peak score up to i; f[] is the score ending at i, not always the peak } } __global__ void mm_chain_dp_fill_cuda_optimized_batch(int64_t j, int64_t n, int64_t start, int n_process, int max_dist_x, int max_dist_y, int bw, int max_iter, int n_segs, int is_cdna, mm128_t *a, float avg_qspan, int32_t *f, int32_t *t, int32_t *p, int32_t *v) { int tid = blockIdx.x*blockDim.x+threadIdx.x; if (tid>=max_iter) return; int64_t i = j+1+tid; if (i>=n) return; i -= start; j -= start; if (j>=n_process) return; uint64_t ri = a[i].x; int32_t qi = (int32_t)a[i].y, q_span = a[i].y>>32&0xff; // NB: only 8 bits of span is used!!! int32_t sidi = (a[i].y & MM_SEED_SEG_MASK) >> MM_SEED_SEG_SHIFT; int64_t max_j = -1; int32_t max_f = q_span; if (ri > a[j].x + max_dist_x) return; int32_t min_d; int64_t dr = ri - a[j].x; int32_t dq = qi - (int32_t)a[j].y, dd, sc, log_dd; int32_t sidj = (a[j].y & MM_SEED_SEG_MASK) >> MM_SEED_SEG_SHIFT; if ((sidi == sidj && dr == 0) || dq <= 0) return; // don't skip if an anchor is used by multiple segments; see below if ((sidi == sidj && dq > max_dist_y) || dq > max_dist_x) return; dd = dr > dq? dr - dq : dq - dr; if (sidi == sidj && dd > bw) return; if (n_segs > 1 && !is_cdna && sidi == sidj && dr > max_dist_y) return; min_d = dq < dr? dq : dr; sc = min_d > q_span? q_span : dq < dr? dq : dr; log_dd = dd? 31-__clz(dd) : 0; if (is_cdna || sidi != sidj) { int c_log, c_lin; c_lin = (int)(dd * .01 * avg_qspan); c_log = log_dd; if (sidi != sidj && dr == 0) ++sc; // possibly due to overlapping paired ends; give a minor bonus else if (dr > dq || sidi != sidj) sc -= c_lin < c_log? c_lin : c_log; else sc -= c_lin + (c_log>>1); } else sc -= (int)(dd * .01 * avg_qspan) + (log_dd>>1); sc += f[j]; if (sc > max_f) { max_f = sc, max_j = j; } if (p[j] >= 0) t[p[j]-start] = i+start; if (max_f>f[i]||(max_f==f[i]&&max_j!=-1)) { f[i] = max_f, p[i] = max_j; if (max_j!=-1) p[i] = max_j+start; v[i] = max_j >= 0 && v[max_j] > max_f? v[max_j] : max_f; // v[] keeps the peak score up to i; f[] is the score ending at i, not always the peak } }
6559388ac87965b4ff282cf9bfe6ad697a5e99d5.cu
#include <cuda.h> #include "minimap.h" #include "mmpriv.h" #include "kalloc.h" __global__ void mm_chain_dp_fill_cuda_score(int64_t i, uint64_t ri, int32_t qi, int32_t q_span, int32_t sidi, int64_t *mem_max_j, int32_t *mem_max_f, int max_dist_x, int max_dist_y, int bw, int max_iter, int n_segs, int is_cdna, mm128_t *a, float avg_qspan, int32_t *f, int32_t *t, int32_t *p) { int tid = blockIdx.x*blockDim.x+threadIdx.x; if (tid>=max_iter) return; int j = i-1-tid; mem_max_j[tid] = -1; mem_max_f[tid] = q_span; if (j<0) return; if (ri > a[j].x + max_dist_x) return; int32_t min_d; int64_t dr = ri - a[j].x; int32_t dq = qi - (int32_t)a[j].y, dd, sc, log_dd; int32_t sidj = (a[j].y & MM_SEED_SEG_MASK) >> MM_SEED_SEG_SHIFT; if ((sidi == sidj && dr == 0) || dq <= 0) return; // don't skip if an anchor is used by multiple segments; see below if ((sidi == sidj && dq > max_dist_y) || dq > max_dist_x) return; dd = dr > dq? dr - dq : dq - dr; if (sidi == sidj && dd > bw) return; if (n_segs > 1 && !is_cdna && sidi == sidj && dr > max_dist_y) return; min_d = dq < dr? dq : dr; sc = min_d > q_span? q_span : dq < dr? dq : dr; log_dd = dd? 31-__clz(dd) : 0; if (is_cdna || sidi != sidj) { int c_log, c_lin; c_lin = (int)(dd * .01 * avg_qspan); c_log = log_dd; if (sidi != sidj && dr == 0) ++sc; // possibly due to overlapping paired ends; give a minor bonus else if (dr > dq || sidi != sidj) sc -= c_lin < c_log? c_lin : c_log; else sc -= c_lin + (c_log>>1); } else sc -= (int)(dd * .01 * avg_qspan) + (log_dd>>1); sc += f[j]; if (sc > mem_max_f[tid]) { mem_max_f[tid] = sc, mem_max_j[tid] = j; } if (p[j] >= 0) t[p[j]] = i; } __global__ void mm_chain_dp_fill_cuda_max(int i, int64_t *mem_max_j, int32_t *mem_max_f, int max_iter, int32_t *f, int32_t *p, int32_t *v) { __shared__ int64_t max_j[1024]; __shared__ int32_t max_f[1024]; int tid = threadIdx.x; int stride = blockDim.x; if (tid<max_iter) { max_j[tid] = mem_max_j[tid]; max_f[tid] = mem_max_f[tid]; } else { max_j[tid] = -1; max_f[tid] = 0; } for (int k=stride+tid;k<max_iter;k+=stride) if (mem_max_f[k]>max_f[tid]) { max_j[tid] = mem_max_j[k]; max_f[tid] = mem_max_f[k]; } for (unsigned int offset=stride>>1;offset>0;offset>>=1) { __syncthreads(); if (tid<offset) if (max_f[tid+offset]>max_f[tid]||(max_f[tid+offset]==max_f[tid]&&max_j[tid+offset]>max_j[tid])) { max_j[tid] = max_j[tid+offset]; max_f[tid] = max_f[tid+offset]; } } __syncthreads(); if (tid==0) { f[i] = max_f[0], p[i] = max_j[0]; v[i] = max_j[0] >= 0 && v[max_j[0]] > max_f[0]? v[max_j[0]] : max_f[0]; // v[] keeps the peak score up to i; f[] is the score ending at i, not always the peak } } __global__ void mm_chain_dp_fill_cuda_optimized(int64_t j, int64_t n, int max_dist_x, int max_dist_y, int bw, int max_iter, int n_segs, int is_cdna, mm128_t *a, float avg_qspan, int32_t *f, int32_t *t, int32_t *p, int32_t *v) { int tid = blockIdx.x*blockDim.x+threadIdx.x; if (tid>=max_iter) return; int64_t i = j+1+tid; if (i>=n) return; uint64_t ri = a[i].x; int32_t qi = (int32_t)a[i].y, q_span = a[i].y>>32&0xff; // NB: only 8 bits of span is used!!! int32_t sidi = (a[i].y & MM_SEED_SEG_MASK) >> MM_SEED_SEG_SHIFT; int64_t max_j = -1; int32_t max_f = q_span; if (ri > a[j].x + max_dist_x) return; int32_t min_d; int64_t dr = ri - a[j].x; int32_t dq = qi - (int32_t)a[j].y, dd, sc, log_dd; int32_t sidj = (a[j].y & MM_SEED_SEG_MASK) >> MM_SEED_SEG_SHIFT; if ((sidi == sidj && dr == 0) || dq <= 0) return; // don't skip if an anchor is used by multiple segments; see below if ((sidi == sidj && dq > max_dist_y) || dq > max_dist_x) return; dd = dr > dq? dr - dq : dq - dr; if (sidi == sidj && dd > bw) return; if (n_segs > 1 && !is_cdna && sidi == sidj && dr > max_dist_y) return; min_d = dq < dr? dq : dr; sc = min_d > q_span? q_span : dq < dr? dq : dr; log_dd = dd? 31-__clz(dd) : 0; if (is_cdna || sidi != sidj) { int c_log, c_lin; c_lin = (int)(dd * .01 * avg_qspan); c_log = log_dd; if (sidi != sidj && dr == 0) ++sc; // possibly due to overlapping paired ends; give a minor bonus else if (dr > dq || sidi != sidj) sc -= c_lin < c_log? c_lin : c_log; else sc -= c_lin + (c_log>>1); } else sc -= (int)(dd * .01 * avg_qspan) + (log_dd>>1); sc += f[j]; if (sc > max_f) { max_f = sc, max_j = j; } if (p[j] >= 0) t[p[j]] = i; if (max_f>f[i]||(max_f==f[i]&&max_j!=-1)) { f[i] = max_f, p[i] = max_j; v[i] = max_j >= 0 && v[max_j] > max_f? v[max_j] : max_f; // v[] keeps the peak score up to i; f[] is the score ending at i, not always the peak } } __global__ void mm_chain_dp_fill_cuda_optimized_batch(int64_t j, int64_t n, int64_t start, int n_process, int max_dist_x, int max_dist_y, int bw, int max_iter, int n_segs, int is_cdna, mm128_t *a, float avg_qspan, int32_t *f, int32_t *t, int32_t *p, int32_t *v) { int tid = blockIdx.x*blockDim.x+threadIdx.x; if (tid>=max_iter) return; int64_t i = j+1+tid; if (i>=n) return; i -= start; j -= start; if (j>=n_process) return; uint64_t ri = a[i].x; int32_t qi = (int32_t)a[i].y, q_span = a[i].y>>32&0xff; // NB: only 8 bits of span is used!!! int32_t sidi = (a[i].y & MM_SEED_SEG_MASK) >> MM_SEED_SEG_SHIFT; int64_t max_j = -1; int32_t max_f = q_span; if (ri > a[j].x + max_dist_x) return; int32_t min_d; int64_t dr = ri - a[j].x; int32_t dq = qi - (int32_t)a[j].y, dd, sc, log_dd; int32_t sidj = (a[j].y & MM_SEED_SEG_MASK) >> MM_SEED_SEG_SHIFT; if ((sidi == sidj && dr == 0) || dq <= 0) return; // don't skip if an anchor is used by multiple segments; see below if ((sidi == sidj && dq > max_dist_y) || dq > max_dist_x) return; dd = dr > dq? dr - dq : dq - dr; if (sidi == sidj && dd > bw) return; if (n_segs > 1 && !is_cdna && sidi == sidj && dr > max_dist_y) return; min_d = dq < dr? dq : dr; sc = min_d > q_span? q_span : dq < dr? dq : dr; log_dd = dd? 31-__clz(dd) : 0; if (is_cdna || sidi != sidj) { int c_log, c_lin; c_lin = (int)(dd * .01 * avg_qspan); c_log = log_dd; if (sidi != sidj && dr == 0) ++sc; // possibly due to overlapping paired ends; give a minor bonus else if (dr > dq || sidi != sidj) sc -= c_lin < c_log? c_lin : c_log; else sc -= c_lin + (c_log>>1); } else sc -= (int)(dd * .01 * avg_qspan) + (log_dd>>1); sc += f[j]; if (sc > max_f) { max_f = sc, max_j = j; } if (p[j] >= 0) t[p[j]-start] = i+start; if (max_f>f[i]||(max_f==f[i]&&max_j!=-1)) { f[i] = max_f, p[i] = max_j; if (max_j!=-1) p[i] = max_j+start; v[i] = max_j >= 0 && v[max_j] > max_f? v[max_j] : max_f; // v[] keeps the peak score up to i; f[] is the score ending at i, not always the peak } }
01ec47ce78774c0593a58c6770fa1d3af8a3c927.hip
// !!! This is a file automatically generated by hipify!!! #include "util.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <algorithm> // std::min std::max using namespace std; #define TILE_WIDTH_16 16 #define TILE_HEIGHT_16 16 #define TILE_WIDTH_32 32 #define TILE_HEIGHT_32 32 // Ej 1a) Kernel __global__ void transpose_global_kernel(float* d_input, int width, int height, float* d_output) { unsigned int imgx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int imgy = (blockIdx.y * blockDim.y) + threadIdx.y; if (imgx < width && imgy < height) { d_output[(imgx*height) + imgy] = d_input[(imgy*width) + imgx]; } } // Ej 1b) Kernel __global__ void transpose_shared_kernel_16(float* d_input, int width, int height, float* d_output) { __shared__ float tile[TILE_WIDTH_16][TILE_HEIGHT_16]; // Indices (x,y) en imagen de entrada unsigned int imgx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int imgy = (blockIdx.y * blockDim.y) + threadIdx.y; // Lectura por fila (global) / Escritura por fila (compartida) if (imgx < width && imgy < height) { tile[threadIdx.y][threadIdx.x] = d_input[(imgy*width) + imgx]; __syncthreads(); } // Indices (x,y) en imagen de salida ((y,x) en imagen de entrada) imgx = (blockIdx.y * blockDim.x) + threadIdx.x; imgy = (blockIdx.x * blockDim.y) + threadIdx.y; // Lectura por columna (compartida) / Escritura por fila (global) if (imgx < height && imgy < width) { d_output[(imgy*height) + imgx] = tile[threadIdx.x][threadIdx.y] ; } } __global__ void transpose_shared_kernel_32(float* d_input, int width, int height, float* d_output) { __shared__ float tile[TILE_WIDTH_32][TILE_HEIGHT_32]; // Indices (x,y) en imagen de entrada unsigned int imgx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int imgy = (blockIdx.y * blockDim.y) + threadIdx.y; // Lectura por fila (global) / Escritura por fila (compartida) if (imgx < width && imgy < height ) { tile[threadIdx.y][threadIdx.x] = d_input[(imgy*width) + imgx]; __syncthreads(); } // Indices (x,y) en imagen de salida ((y,x) en imagen de entrada) imgx = (blockIdx.y * blockDim.x) + threadIdx.x; imgy = (blockIdx.x * blockDim.y) + threadIdx.y; // Lectura por columna (compartida) / Escritura por fila (global) if (imgx < height && imgy < width) { d_output[(imgy*height) + imgx] = tile[threadIdx.x][threadIdx.y] ; } } // Ej 1c) Kernel __global__ void transpose_shared_extra_kernel_16(float* d_input, int width, int height, float* d_output) { __shared__ float tile[TILE_WIDTH_16][TILE_HEIGHT_16 + 1]; // Indices (x,y) en imagen de entrada unsigned int imgx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int imgy = (blockIdx.y * blockDim.y) + threadIdx.y; // Lectura por fila (global) / Escritura por fila (compartida) if (imgx < width && imgy < height ) { tile[threadIdx.y][threadIdx.x] = d_input[(imgy*width) + imgx]; __syncthreads(); } // Indices (x,y) en imagen de salida ((y,x) en imagen de entrada) imgx = (blockIdx.y * blockDim.x) + threadIdx.x; imgy = (blockIdx.x * blockDim.y) + threadIdx.y; // Lectura por columna (compartida) / Escritura por fila (global) if (imgx < height && imgy < width) { d_output[(imgy*height) + imgx] = tile[threadIdx.x][threadIdx.y] ; } } __global__ void transpose_shared_extra_kernel_32(float* d_input, int width, int height, float* d_output) { __shared__ float tile[TILE_WIDTH_32][TILE_HEIGHT_32 + 1]; // Indices (x,y) en imagen de entrada unsigned int imgx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int imgy = (blockIdx.y * blockDim.y) + threadIdx.y; // Lectura por fila (global) / Escritura por fila (compartida) if (imgx < width && imgy < height ) { tile[threadIdx.y][threadIdx.x] = d_input[(imgy*width) + imgx]; __syncthreads(); } // Indices (x,y) en imagen de salida ((y,x) en imagen de entrada) imgx = (blockIdx.y * blockDim.x) + threadIdx.x; imgy = (blockIdx.x * blockDim.y) + threadIdx.y; // Lectura por columna (compartida) / Escritura por fila (global) if (imgx < height && imgy < width) { d_output[(imgy*height) + imgx] = tile[threadIdx.x][threadIdx.y] ; } } void transpose_gpu(float * img_in, int width, int height, float * img_out, int block_size, int algorithm){ switch(algorithm) { case 1: printf("\n"); if (block_size == 16) printf("-> Kernel con memoria global, tamao de bloque = 16\n"); else printf("-> Kernel con memoria global, tamao de bloque = 32\n"); break; case 2: printf("\n"); if (block_size == 16) printf("-> Kernel con memoria compartida, tamao de bloque = 16\n"); else printf("-> Kernel con memoria compartida, tamao de bloque = 32\n"); break; case 3: printf("\n"); if (block_size == 16) printf("-> Kernel con memoria compartida y columna extra, tamao de bloque = 16\n"); else printf("-> Kernel con memoria compartida y columna extra, tamao de bloque = 32\n");; printf("\n"); break; default: printf("Invocar como: './ej1.x nombre_archivo, algoritmo, tamao_bloque'\n"); printf("-> Algoritmo:\n"); printf("\t 1 - Kernel con memoria global\n"); printf("\t 2 - Kernel con memoria compartida\n"); printf("\t 3 - Kernel con memoria compartida y columna extra\n"); printf("\t 0 - Todos los algoritmos\n"); printf("-> Tamao de bloque:\n"); printf("\t 16\n"); printf("\t 32\n"); } // Auxiliar para contar tiempo total // float t_total = 0; // Etapa 1: Reserva de Memoria // CLK_CUEVTS_INIT; // CLK_CUEVTS_START; // Reserva en CPU unsigned int size = width * height * sizeof(float); float * device_img_in = (float *)malloc(size); float * device_img_out = (float *)malloc(size); // Reserva en GPU CUDA_CHK(hipMalloc((void**)& device_img_in, size)); CUDA_CHK(hipMalloc((void**)& device_img_out, size)); // CLK_CUEVTS_STOP; // CLK_CUEVTS_ELAPSED; // printf("Tiempo transposicion GPU (Reserva de memoria): %f ms\n", t_elap); // t_total = t_total + t_elap; // Etapa 2: Transferencia de datos (Host -> Device) // CLK_CUEVTS_START; CUDA_CHK(hipMemcpy(device_img_in, img_in, size, hipMemcpyHostToDevice)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia // CLK_CUEVTS_STOP; // CLK_CUEVTS_ELAPSED; // printf("Tiempo transposicion GPU (Transferencia de datos (Host -> Device)): %f ms\n", t_elap); // t_total = t_total + t_elap; // Etapa 3: Definir grilla int block_amount_x = width / block_size + (width % block_size != 0); // Division with ceiling int block_amount_y = height / block_size + (height % block_size != 0); // Division with ceiling dim3 tamGrid(block_amount_x, block_amount_y); // Grid dimension dim3 tamBlock(block_size, block_size); // Block dimension // Etapa 4 : Lanzar Kernel // CLK_CUEVTS_START; switch(algorithm) { case 1: hipLaunchKernelGGL(( transpose_global_kernel), dim3(tamGrid), dim3(tamBlock), 0, 0, device_img_in, width, height, device_img_out); break; case 2: if (block_size == 16) hipLaunchKernelGGL(( transpose_shared_kernel_16), dim3(tamGrid), dim3(tamBlock), 0, 0, device_img_in, width, height, device_img_out); else hipLaunchKernelGGL(( transpose_shared_kernel_32), dim3(tamGrid), dim3(tamBlock), 0, 0, device_img_in, width, height, device_img_out); break; case 3: if (block_size == 16) hipLaunchKernelGGL(( transpose_shared_extra_kernel_16), dim3(tamGrid), dim3(tamBlock), 0, 0, device_img_in, width, height, device_img_out); else hipLaunchKernelGGL(( transpose_shared_extra_kernel_32), dim3(tamGrid), dim3(tamBlock), 0, 0, device_img_in, width, height, device_img_out); break; default: hipLaunchKernelGGL(( transpose_global_kernel), dim3(tamGrid), dim3(tamBlock), 0, 0, device_img_in, width, height, device_img_out); } // Sincronizar threads antes de parar timers hipDeviceSynchronize(); // CLK_CUEVTS_STOP; // CLK_CUEVTS_ELAPSED; // printf("Tiempo transposicion GPU (Kernel): %f ms\n", t_elap); // t_total = t_total + t_elap; // Etapa 5: Transferencia de Datos (Device -> Host) // CLK_CUEVTS_START; CUDA_CHK(hipMemcpy(img_out, device_img_out, size, hipMemcpyDeviceToHost)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia // CLK_CUEVTS_STOP; // CLK_CUEVTS_ELAPSED; // printf("Tiempo transposicion GPU (Transferencia de datos (Host <- Device)): %f ms\n", t_elap); // t_total = t_total + t_elap; // printf("Tiempo transposicion GPU: %f ms\n", t_total); // printf("\n"); // Etapa 6: Liberacin de Memoria CUDA_CHK(hipFree(device_img_in)); CUDA_CHK(hipFree(device_img_out)); }
01ec47ce78774c0593a58c6770fa1d3af8a3c927.cu
#include "util.h" #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <algorithm> // std::min std::max using namespace std; #define TILE_WIDTH_16 16 #define TILE_HEIGHT_16 16 #define TILE_WIDTH_32 32 #define TILE_HEIGHT_32 32 // Ej 1a) Kernel __global__ void transpose_global_kernel(float* d_input, int width, int height, float* d_output) { unsigned int imgx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int imgy = (blockIdx.y * blockDim.y) + threadIdx.y; if (imgx < width && imgy < height) { d_output[(imgx*height) + imgy] = d_input[(imgy*width) + imgx]; } } // Ej 1b) Kernel __global__ void transpose_shared_kernel_16(float* d_input, int width, int height, float* d_output) { __shared__ float tile[TILE_WIDTH_16][TILE_HEIGHT_16]; // Indices (x,y) en imagen de entrada unsigned int imgx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int imgy = (blockIdx.y * blockDim.y) + threadIdx.y; // Lectura por fila (global) / Escritura por fila (compartida) if (imgx < width && imgy < height) { tile[threadIdx.y][threadIdx.x] = d_input[(imgy*width) + imgx]; __syncthreads(); } // Indices (x,y) en imagen de salida ((y,x) en imagen de entrada) imgx = (blockIdx.y * blockDim.x) + threadIdx.x; imgy = (blockIdx.x * blockDim.y) + threadIdx.y; // Lectura por columna (compartida) / Escritura por fila (global) if (imgx < height && imgy < width) { d_output[(imgy*height) + imgx] = tile[threadIdx.x][threadIdx.y] ; } } __global__ void transpose_shared_kernel_32(float* d_input, int width, int height, float* d_output) { __shared__ float tile[TILE_WIDTH_32][TILE_HEIGHT_32]; // Indices (x,y) en imagen de entrada unsigned int imgx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int imgy = (blockIdx.y * blockDim.y) + threadIdx.y; // Lectura por fila (global) / Escritura por fila (compartida) if (imgx < width && imgy < height ) { tile[threadIdx.y][threadIdx.x] = d_input[(imgy*width) + imgx]; __syncthreads(); } // Indices (x,y) en imagen de salida ((y,x) en imagen de entrada) imgx = (blockIdx.y * blockDim.x) + threadIdx.x; imgy = (blockIdx.x * blockDim.y) + threadIdx.y; // Lectura por columna (compartida) / Escritura por fila (global) if (imgx < height && imgy < width) { d_output[(imgy*height) + imgx] = tile[threadIdx.x][threadIdx.y] ; } } // Ej 1c) Kernel __global__ void transpose_shared_extra_kernel_16(float* d_input, int width, int height, float* d_output) { __shared__ float tile[TILE_WIDTH_16][TILE_HEIGHT_16 + 1]; // Indices (x,y) en imagen de entrada unsigned int imgx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int imgy = (blockIdx.y * blockDim.y) + threadIdx.y; // Lectura por fila (global) / Escritura por fila (compartida) if (imgx < width && imgy < height ) { tile[threadIdx.y][threadIdx.x] = d_input[(imgy*width) + imgx]; __syncthreads(); } // Indices (x,y) en imagen de salida ((y,x) en imagen de entrada) imgx = (blockIdx.y * blockDim.x) + threadIdx.x; imgy = (blockIdx.x * blockDim.y) + threadIdx.y; // Lectura por columna (compartida) / Escritura por fila (global) if (imgx < height && imgy < width) { d_output[(imgy*height) + imgx] = tile[threadIdx.x][threadIdx.y] ; } } __global__ void transpose_shared_extra_kernel_32(float* d_input, int width, int height, float* d_output) { __shared__ float tile[TILE_WIDTH_32][TILE_HEIGHT_32 + 1]; // Indices (x,y) en imagen de entrada unsigned int imgx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int imgy = (blockIdx.y * blockDim.y) + threadIdx.y; // Lectura por fila (global) / Escritura por fila (compartida) if (imgx < width && imgy < height ) { tile[threadIdx.y][threadIdx.x] = d_input[(imgy*width) + imgx]; __syncthreads(); } // Indices (x,y) en imagen de salida ((y,x) en imagen de entrada) imgx = (blockIdx.y * blockDim.x) + threadIdx.x; imgy = (blockIdx.x * blockDim.y) + threadIdx.y; // Lectura por columna (compartida) / Escritura por fila (global) if (imgx < height && imgy < width) { d_output[(imgy*height) + imgx] = tile[threadIdx.x][threadIdx.y] ; } } void transpose_gpu(float * img_in, int width, int height, float * img_out, int block_size, int algorithm){ switch(algorithm) { case 1: printf("\n"); if (block_size == 16) printf("-> Kernel con memoria global, tamaño de bloque = 16\n"); else printf("-> Kernel con memoria global, tamaño de bloque = 32\n"); break; case 2: printf("\n"); if (block_size == 16) printf("-> Kernel con memoria compartida, tamaño de bloque = 16\n"); else printf("-> Kernel con memoria compartida, tamaño de bloque = 32\n"); break; case 3: printf("\n"); if (block_size == 16) printf("-> Kernel con memoria compartida y columna extra, tamaño de bloque = 16\n"); else printf("-> Kernel con memoria compartida y columna extra, tamaño de bloque = 32\n");; printf("\n"); break; default: printf("Invocar como: './ej1.x nombre_archivo, algoritmo, tamaño_bloque'\n"); printf("-> Algoritmo:\n"); printf("\t 1 - Kernel con memoria global\n"); printf("\t 2 - Kernel con memoria compartida\n"); printf("\t 3 - Kernel con memoria compartida y columna extra\n"); printf("\t 0 - Todos los algoritmos\n"); printf("-> Tamaño de bloque:\n"); printf("\t 16\n"); printf("\t 32\n"); } // Auxiliar para contar tiempo total // float t_total = 0; // Etapa 1: Reserva de Memoria // CLK_CUEVTS_INIT; // CLK_CUEVTS_START; // Reserva en CPU unsigned int size = width * height * sizeof(float); float * device_img_in = (float *)malloc(size); float * device_img_out = (float *)malloc(size); // Reserva en GPU CUDA_CHK(cudaMalloc((void**)& device_img_in, size)); CUDA_CHK(cudaMalloc((void**)& device_img_out, size)); // CLK_CUEVTS_STOP; // CLK_CUEVTS_ELAPSED; // printf("Tiempo transposicion GPU (Reserva de memoria): %f ms\n", t_elap); // t_total = t_total + t_elap; // Etapa 2: Transferencia de datos (Host -> Device) // CLK_CUEVTS_START; CUDA_CHK(cudaMemcpy(device_img_in, img_in, size, cudaMemcpyHostToDevice)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia // CLK_CUEVTS_STOP; // CLK_CUEVTS_ELAPSED; // printf("Tiempo transposicion GPU (Transferencia de datos (Host -> Device)): %f ms\n", t_elap); // t_total = t_total + t_elap; // Etapa 3: Definir grilla int block_amount_x = width / block_size + (width % block_size != 0); // Division with ceiling int block_amount_y = height / block_size + (height % block_size != 0); // Division with ceiling dim3 tamGrid(block_amount_x, block_amount_y); // Grid dimension dim3 tamBlock(block_size, block_size); // Block dimension // Etapa 4 : Lanzar Kernel // CLK_CUEVTS_START; switch(algorithm) { case 1: transpose_global_kernel<<<tamGrid, tamBlock>>>(device_img_in, width, height, device_img_out); break; case 2: if (block_size == 16) transpose_shared_kernel_16<<<tamGrid, tamBlock>>>(device_img_in, width, height, device_img_out); else transpose_shared_kernel_32<<<tamGrid, tamBlock>>>(device_img_in, width, height, device_img_out); break; case 3: if (block_size == 16) transpose_shared_extra_kernel_16<<<tamGrid, tamBlock>>>(device_img_in, width, height, device_img_out); else transpose_shared_extra_kernel_32<<<tamGrid, tamBlock>>>(device_img_in, width, height, device_img_out); break; default: transpose_global_kernel<<<tamGrid, tamBlock>>>(device_img_in, width, height, device_img_out); } // Sincronizar threads antes de parar timers cudaDeviceSynchronize(); // CLK_CUEVTS_STOP; // CLK_CUEVTS_ELAPSED; // printf("Tiempo transposicion GPU (Kernel): %f ms\n", t_elap); // t_total = t_total + t_elap; // Etapa 5: Transferencia de Datos (Device -> Host) // CLK_CUEVTS_START; CUDA_CHK(cudaMemcpy(img_out, device_img_out, size, cudaMemcpyDeviceToHost)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia // CLK_CUEVTS_STOP; // CLK_CUEVTS_ELAPSED; // printf("Tiempo transposicion GPU (Transferencia de datos (Host <- Device)): %f ms\n", t_elap); // t_total = t_total + t_elap; // printf("Tiempo transposicion GPU: %f ms\n", t_total); // printf("\n"); // Etapa 6: Liberación de Memoria CUDA_CHK(cudaFree(device_img_in)); CUDA_CHK(cudaFree(device_img_out)); }
9b097d8a934fbe5bafbb400118340aa5808eef67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "noise_manager.h" #include <chrono> #include <cmath> #include <iostream> #include <memory> #include <random> #include "cuda_math_util.h" #include "cuda_util.h" #include <hipcub/hipcub.hpp> #include "io_iterator.h" namespace RPU { template <typename T> __global__ void kernelAbsMaxNPSum( float *scale_values, const int m_batch, const float *amax_values, const T *psum_values, const T *nsum_values, const T out_bound, const T assumed_wmax, const T bm_max // io.max_bm_res/io.inp_res ) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < m_batch) { // w_max*MAX(psum,nsum)/out_bound < scale T w_max = assumed_wmax; T amax = amax_values[tid]; T psum = psum_values[tid]; T nsum = -nsum_values[tid]; T sum = MAX(psum, nsum); scale_values[tid] = MAX(amax, MIN(sum * w_max / out_bound, amax * bm_max)); ; } } template <typename InputIteratorT, typename T> __global__ void kernelNPSumBatchTrans( InputIteratorT input, const int total_size_in, const int m_batch_in, T *psum_values, T *nsum_values, T *psum_values0, T *nsum_values0) { // -- only use this version if m_batch < blockDim.x !!! // -- probably: strided version would be faster... int tid = blockDim.x * blockIdx.x + threadIdx.x; // assumes that shared is of size 2*nthreads*sizeof(T) !!!!!!!!!! extern __shared__ __align__(sizeof(double)) unsigned char rpu_smem_nm[]; T *block_sum_values = reinterpret_cast<T *>(rpu_smem_nm); T *block_psum_values = &block_sum_values[0]; T *block_nsum_values = &block_sum_values[blockDim.x]; const int size = total_size_in; const int m_batch = m_batch_in; block_psum_values[threadIdx.x] = (T)0.0; block_nsum_values[threadIdx.x] = (T)0.0; __syncthreads(); if (tid < m_batch) { psum_values0[tid] = (T)0.0; nsum_values0[tid] = (T)0.0; } if (tid < size) { T value = input[tid]; int midx = tid % m_batch; if (value >= 0) { atomicAdd(&(block_psum_values[midx]), value); } else { atomicAdd(&(block_nsum_values[midx]), value); } } __syncthreads(); int bidx = threadIdx.x; if (bidx < m_batch) { atomicAdd(&(psum_values[bidx]), block_psum_values[bidx]); atomicAdd(&(nsum_values[bidx]), block_nsum_values[bidx]); } } template <typename InputIteratorT, typename T> __global__ void kernelNPSumBatchTrans_LargeBatch( InputIteratorT input, const int total_size_in, const int m_batch_in, T *psum_values, T *nsum_values, T *psum_values0, T *nsum_values0) { // -- use this version if m_batch >= blockDim.x // -- just uses atomic on global memory int tid = blockDim.x * blockIdx.x + threadIdx.x; const int size = total_size_in; const int m_batch = m_batch_in; if (tid < m_batch) { psum_values0[tid] = (T)0.0; nsum_values0[tid] = (T)0.0; } if (tid < size) { T value = input[tid]; int midx = tid % m_batch; if (value >= 0) { atomicAdd(&psum_values[midx], value); } else { atomicAdd(&nsum_values[midx], value); } } } template <typename T> __global__ void kernelAverageAbsMaxSetScales( float *scales, float *ravg, const float *sum, const int m_batch_in, T decay_rate_in) { int tid = blockDim.x * blockIdx.x + threadIdx.x; const int m_batch = m_batch_in; T decay_rate = decay_rate_in; T max_avg = (*sum) / m_batch; T run_avg = *ravg; if (tid < m_batch) { scales[tid] = (float)(run_avg * (1.0 - decay_rate) + decay_rate * max_avg); } if (tid == m_batch) { *ravg = (float)(run_avg * (1.0 - decay_rate) + decay_rate * max_avg); } } template <typename T> __global__ void kernelAverageAbsMaxSingleMomentum(float *ravg, const float *sum, const int m_batch, T decay_rate) { // just single block! int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { T max_avg = (*sum) / m_batch; *ravg = (float)(*ravg * (1.0 - decay_rate) + decay_rate * max_avg); } } /****************************************************************************************************************/ /* NoiseManager */ /******************************************************************************************************************/ #define LAUNCH_NPSUM_KERNEL(KNAME, SHARED_MEM, ARGS) \ hipLaunchKernelGGL(( KNAME<InputIteratorT, T>), dim3(nblocks), dim3(nthreads), SHARED_MEM, s, ARGS; template <typename T> NoiseManager<T>::NoiseManagerCudaContext *c, int size) : size_(size), context_(c), buffer_m_batch_(0), const_set_if_(false) { // initialize for m_batch=1 dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, 1); dev_psum_values_ = RPU::make_unique<CudaArray<T>>(context_, 1); dev_nsum_values_ = RPU::make_unique<CudaArray<T>>(context_, 1); dev_ravg_scale_value_ = RPU::make_unique<CudaArray<float>>(context_, 1); dev_ravg_scale_value_->setConst(1.0); amaximizer_ = RPU::make_unique<Maximizer<T>>(context_, size, true); maximizer_ = RPU::make_unique<Maximizer<T>>(context_, size, false); size_t temp_storage_bytes = 0; RPU::hipcub::DeviceReduce::Reduce( nullptr, temp_storage_bytes, dev_psum_values_->getData(), dev_psum_values_->getData(), size_, nsum_op_, 0, context_->getStream()); dev_v_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); context_->synchronize(); } template <typename T> void NoiseManager<T>::initializeBatchBuffer(int m_batch) { // this inits all the buffers needed for PMSum only !! if ((m_batch > 1) && (buffer_m_batch_ < m_batch)) { buffer_m_batch_ = m_batch; dev_psum_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_psum_values0_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_psum_values0_->setConst((T)0.0); dev_nsum_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_nsum_values0_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_nsum_values0_->setConst((T)0.0); int *offsets = new int[m_batch + 1]; // not trans for (int i = 0; i <= m_batch; i++) { offsets[i] = i * size_; } dev_offsets_ = RPU::make_unique<CudaArray<int>>(context_, m_batch + 1, offsets); size_t temp_storage_bytes = 0; RPU::hipcub::DeviceSegmentedReduce::Reduce( nullptr, temp_storage_bytes, dev_psum_values_->getData(), dev_psum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, psum_op_, 0, context_->getStream()); dev_m_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); const_set_if_ = false; context_->synchronize(); delete[] offsets; } } template <typename T> template <typename InputIteratorT> void NoiseManager<T>::computeNPSum(InputIteratorT dev_input, int m_batch, bool trans) { hipStream_t s = context_->getStream(); if (m_batch == 1) { size_t ssz = dev_v_temp_storage_->getSize(); RPU::hipcub::DeviceReduce::Reduce( (void *)dev_v_temp_storage_->getData(), ssz, dev_input, dev_psum_values_->getData(), size_, psum_op_, (T)0, s); RPU::hipcub::DeviceReduce::Reduce( (void *)dev_v_temp_storage_->getData(), ssz, dev_input, dev_nsum_values_->getData(), size_, nsum_op_, (T)0, s); } else { if (buffer_m_batch_ < m_batch) { this->initializeBatchBuffer(m_batch); } if (trans) { std::swap(dev_psum_values_, dev_psum_values0_); std::swap(dev_nsum_values_, dev_nsum_values0_); int nthreads = context_->getNThreads(); int n = size_ * m_batch; int nblocks = context_->getNBlocks(n, nthreads); if (m_batch <= nthreads) { int shared_mem = 2 * nthreads * sizeof(T); LAUNCH_NPSUM_KERNEL( kernelNPSumBatchTrans, shared_mem, (dev_input, n, m_batch, dev_psum_values_->getData(), dev_nsum_values_->getData(), dev_psum_values0_->getData(), dev_nsum_values0_->getData())); } else { // simple atomic global memory version LAUNCH_NPSUM_KERNEL( kernelNPSumBatchTrans_LargeBatch, 0, (dev_input, n, m_batch, dev_psum_values_->getData(), dev_nsum_values_->getData(), dev_psum_values0_->getData(), dev_nsum_values0_->getData())); } } else { // Fast Segmented reduction size_t ssz = dev_m_temp_storage_->getSize(); RPU::hipcub::DeviceSegmentedReduce::Reduce( (void *)dev_m_temp_storage_->getData(), ssz, dev_input, dev_psum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, psum_op_, (T)0.0, s); RPU::hipcub::DeviceSegmentedReduce::Reduce( (void *)dev_m_temp_storage_->getData(), ssz, dev_input, dev_nsum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, nsum_op_, (T)0.0, s); } } } template <typename T> void NoiseManager<T>::setAverageAbsMax(float value) { dev_ravg_scale_value_->setConst(value); dev_scale_values_->setConst(value); ravg_initialized_ = true; context_->synchronize(); } template <typename T> float NoiseManager<T>::getAverageAbsMax() const { float tmp; dev_ravg_scale_value_->copyTo(&tmp); return tmp; }; template <typename T> template <typename InputIteratorT> void NoiseManager<T>::compute( InputIteratorT dev_input, const NoiseManagementType &nm_type, const IOMetaParameter<T> &io, int m_batch, bool trans, bool is_test) { // does not check for positive m_batch! nm_type_ = nm_type; switch (nm_type_) { case NoiseManagementType::None: { return; } case NoiseManagementType::Constant: { if (m_batch > dev_scale_values_->getSize()) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); const_set_if_ = false; } if (!const_set_if_) { dev_scale_values_->setConst(io.nm_thres > 0 ? (float)io.nm_thres : (float)1.0); const_set_if_ = true; } return; } case NoiseManagementType::Max: { this->maximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->maximizer_->saturateAbove(io.nm_thres); } return; } case NoiseManagementType::AbsMax: { this->amaximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->amaximizer_->saturateAbove(io.nm_thres); } return; } case NoiseManagementType::AbsMaxNPSum: { if (m_batch > dev_scale_values_->getSize()) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); } // get amax and npsum this->amaximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->amaximizer_->saturateAbove(io.nm_thres); } this->computeNPSum(dev_input, m_batch, trans); // combine int nthreads = context_->getNThreads(); int nblocks = context_->getNBlocks(m_batch, nthreads); hipStream_t s = context_->getStream(); hipLaunchKernelGGL(( kernelAbsMaxNPSum<T>), dim3(nblocks), dim3(nthreads), 0, s, dev_scale_values_->getData(), m_batch, this->amaximizer_->getMaxValues(), dev_psum_values_->getDataConst(), dev_nsum_values_->getDataConst(), io.out_bound, io.nm_assumed_wmax, io.inp_res > 0 ? io.max_bm_res / io.inp_res : 1.0); return; } case NoiseManagementType::AverageAbsMax: case NoiseManagementType::AverageAbsMaxSingleValue: { // CAUTION: the running average will not be saved for checkpointing... so there might be a // glitch when continueing training from checkpoint... // ALSO: average max is computed across // mbatch whereas for CPU it is based running average of single mat-vecs if ((nm_type_ == NoiseManagementType::AverageAbsMax) && (m_batch > dev_scale_values_->getSize())) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); hipStream_t s = context_->getStream(); int nthreads = context_->getNThreads(); // set scales to ravg [first time, could be set from outside] hipLaunchKernelGGL(( kernelAverageAbsMaxSetScales<T>) , dim3(context_->getNBlocks(m_batch + 1, nthreads)), dim3(nthreads), 0, s, dev_scale_values_->getData(), dev_ravg_scale_value_->getData(), dev_ravg_scale_value_->getDataConst(), m_batch, (T)0.0); } if (!is_test) { this->amaximizer_->compute(dev_input, m_batch, trans); hipStream_t s = context_->getStream(); int nthreads = context_->getNThreads(); int nblocks = context_->getNBlocks(m_batch, nthreads); if (m_batch > 1) { // first compute the average of the max over batch if (!dev_a_temp_storage_) { dev_avgmax_value_ = RPU::make_unique<CudaArray<float>>(context_, 1); size_t temp_storage_bytes = 0; RPU::hipcub::DeviceReduce::Sum( nullptr, temp_storage_bytes, amaximizer_->getMaxValues(), dev_avgmax_value_->getData(), m_batch, s); dev_a_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); } size_t ssz = dev_v_temp_storage_->getSize(); RPU::hipcub::DeviceReduce::Sum( (void *)dev_v_temp_storage_->getData(), ssz, amaximizer_->getMaxValues(), dev_avgmax_value_->getData(), m_batch, s); } if (nm_type_ == NoiseManagementType::AverageAbsMax) { // now update the running scale and set the current scales constant for all m_batch hipLaunchKernelGGL(( kernelAverageAbsMaxSetScales<T>) , dim3(context_->getNBlocks(m_batch + 1, nthreads)), dim3(nthreads), 0, s, dev_scale_values_->getData(), dev_ravg_scale_value_->getData(), m_batch > 1 ? dev_avgmax_value_->getData() : amaximizer_->getMaxValues(), dev_scale_values_->getSize(), ravg_initialized_ ? MIN(io.nm_decay * m_batch, 1.) : 1.0); } else { // just update the running avg value as only single output requested hipLaunchKernelGGL(( kernelAverageAbsMaxSingleMomentum<T>), dim3(1), dim3(1), 0, s, dev_ravg_scale_value_->getData(), m_batch > 1 ? dev_avgmax_value_->getData() : amaximizer_->getMaxValues(), m_batch, ravg_initialized_ ? MIN(io.nm_decay * m_batch, 1.0) : 1.0); } ravg_initialized_ = true; } return; } default: RPU_FATAL("Noise management type not implemented."); } } template <typename T> float *NoiseManager<T>::getScaleValues() const { switch (nm_type_) { case NoiseManagementType::None: return nullptr; case NoiseManagementType::AbsMaxNPSum: case NoiseManagementType::Constant: case NoiseManagementType::AverageAbsMax: return dev_scale_values_->getData(); case NoiseManagementType::AbsMax: return amaximizer_->getMaxValues(); case NoiseManagementType::Max: return maximizer_->getMaxValues(); case NoiseManagementType::AverageAbsMaxSingleValue: return dev_ravg_scale_value_->getData(); default: RPU_FATAL("Noise management type not implemented."); } }; #define ARGS1(NUM_T) , const NoiseManagementType &, const IOMetaParameter<NUM_T> &, int, bool, bool #define ARGS2 , int, bool template class NoiseManager<float>; RPU_GEN_IITER_TEMPLATES(float, void, NoiseManager<float>::compute, ARGS1(float)); RPU_GEN_IITER_TEMPLATES(float, void, NoiseManager<float>::computeNPSum, ARGS2); #ifdef RPU_USE_DOUBLE template class NoiseManager<double>; RPU_GEN_IITER_TEMPLATES(double, void, NoiseManager<double>::compute, ARGS1(double)); RPU_GEN_IITER_TEMPLATES(double, void, NoiseManager<double>::computeNPSum, ARGS2); #endif #undef ARGS1 #undef ARGS2 #undef LAUNCH_NPSUM_KERNEL } // namespace RPU
9b097d8a934fbe5bafbb400118340aa5808eef67.cu
/** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "noise_manager.h" #include <chrono> #include <cmath> #include <iostream> #include <memory> #include <random> #include "cuda_math_util.h" #include "cuda_util.h" #include <cub/cub.cuh> #include "io_iterator.h" namespace RPU { template <typename T> __global__ void kernelAbsMaxNPSum( float *scale_values, const int m_batch, const float *amax_values, const T *psum_values, const T *nsum_values, const T out_bound, const T assumed_wmax, const T bm_max // io.max_bm_res/io.inp_res ) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < m_batch) { // w_max*MAX(psum,nsum)/out_bound < scale T w_max = assumed_wmax; T amax = amax_values[tid]; T psum = psum_values[tid]; T nsum = -nsum_values[tid]; T sum = MAX(psum, nsum); scale_values[tid] = MAX(amax, MIN(sum * w_max / out_bound, amax * bm_max)); ; } } template <typename InputIteratorT, typename T> __global__ void kernelNPSumBatchTrans( InputIteratorT input, const int total_size_in, const int m_batch_in, T *psum_values, T *nsum_values, T *psum_values0, T *nsum_values0) { // -- only use this version if m_batch < blockDim.x !!! // -- probably: strided version would be faster... int tid = blockDim.x * blockIdx.x + threadIdx.x; // assumes that shared is of size 2*nthreads*sizeof(T) !!!!!!!!!! extern __shared__ __align__(sizeof(double)) unsigned char rpu_smem_nm[]; T *block_sum_values = reinterpret_cast<T *>(rpu_smem_nm); T *block_psum_values = &block_sum_values[0]; T *block_nsum_values = &block_sum_values[blockDim.x]; const int size = total_size_in; const int m_batch = m_batch_in; block_psum_values[threadIdx.x] = (T)0.0; block_nsum_values[threadIdx.x] = (T)0.0; __syncthreads(); if (tid < m_batch) { psum_values0[tid] = (T)0.0; nsum_values0[tid] = (T)0.0; } if (tid < size) { T value = input[tid]; int midx = tid % m_batch; if (value >= 0) { atomicAdd(&(block_psum_values[midx]), value); } else { atomicAdd(&(block_nsum_values[midx]), value); } } __syncthreads(); int bidx = threadIdx.x; if (bidx < m_batch) { atomicAdd(&(psum_values[bidx]), block_psum_values[bidx]); atomicAdd(&(nsum_values[bidx]), block_nsum_values[bidx]); } } template <typename InputIteratorT, typename T> __global__ void kernelNPSumBatchTrans_LargeBatch( InputIteratorT input, const int total_size_in, const int m_batch_in, T *psum_values, T *nsum_values, T *psum_values0, T *nsum_values0) { // -- use this version if m_batch >= blockDim.x // -- just uses atomic on global memory int tid = blockDim.x * blockIdx.x + threadIdx.x; const int size = total_size_in; const int m_batch = m_batch_in; if (tid < m_batch) { psum_values0[tid] = (T)0.0; nsum_values0[tid] = (T)0.0; } if (tid < size) { T value = input[tid]; int midx = tid % m_batch; if (value >= 0) { atomicAdd(&psum_values[midx], value); } else { atomicAdd(&nsum_values[midx], value); } } } template <typename T> __global__ void kernelAverageAbsMaxSetScales( float *scales, float *ravg, const float *sum, const int m_batch_in, T decay_rate_in) { int tid = blockDim.x * blockIdx.x + threadIdx.x; const int m_batch = m_batch_in; T decay_rate = decay_rate_in; T max_avg = (*sum) / m_batch; T run_avg = *ravg; if (tid < m_batch) { scales[tid] = (float)(run_avg * (1.0 - decay_rate) + decay_rate * max_avg); } if (tid == m_batch) { *ravg = (float)(run_avg * (1.0 - decay_rate) + decay_rate * max_avg); } } template <typename T> __global__ void kernelAverageAbsMaxSingleMomentum(float *ravg, const float *sum, const int m_batch, T decay_rate) { // just single block! int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { T max_avg = (*sum) / m_batch; *ravg = (float)(*ravg * (1.0 - decay_rate) + decay_rate * max_avg); } } /****************************************************************************************************************/ /* NoiseManager */ /******************************************************************************************************************/ #define LAUNCH_NPSUM_KERNEL(KNAME, SHARED_MEM, ARGS) \ KNAME<InputIteratorT, T><<<nblocks, nthreads, SHARED_MEM, s>>> ARGS; template <typename T> NoiseManager<T>::NoiseManager(CudaContext *c, int size) : size_(size), context_(c), buffer_m_batch_(0), const_set_if_(false) { // initialize for m_batch=1 dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, 1); dev_psum_values_ = RPU::make_unique<CudaArray<T>>(context_, 1); dev_nsum_values_ = RPU::make_unique<CudaArray<T>>(context_, 1); dev_ravg_scale_value_ = RPU::make_unique<CudaArray<float>>(context_, 1); dev_ravg_scale_value_->setConst(1.0); amaximizer_ = RPU::make_unique<Maximizer<T>>(context_, size, true); maximizer_ = RPU::make_unique<Maximizer<T>>(context_, size, false); size_t temp_storage_bytes = 0; RPU::cub::DeviceReduce::Reduce( nullptr, temp_storage_bytes, dev_psum_values_->getData(), dev_psum_values_->getData(), size_, nsum_op_, 0, context_->getStream()); dev_v_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); context_->synchronize(); } template <typename T> void NoiseManager<T>::initializeBatchBuffer(int m_batch) { // this inits all the buffers needed for PMSum only !! if ((m_batch > 1) && (buffer_m_batch_ < m_batch)) { buffer_m_batch_ = m_batch; dev_psum_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_psum_values0_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_psum_values0_->setConst((T)0.0); dev_nsum_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_nsum_values0_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_nsum_values0_->setConst((T)0.0); int *offsets = new int[m_batch + 1]; // not trans for (int i = 0; i <= m_batch; i++) { offsets[i] = i * size_; } dev_offsets_ = RPU::make_unique<CudaArray<int>>(context_, m_batch + 1, offsets); size_t temp_storage_bytes = 0; RPU::cub::DeviceSegmentedReduce::Reduce( nullptr, temp_storage_bytes, dev_psum_values_->getData(), dev_psum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, psum_op_, 0, context_->getStream()); dev_m_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); const_set_if_ = false; context_->synchronize(); delete[] offsets; } } template <typename T> template <typename InputIteratorT> void NoiseManager<T>::computeNPSum(InputIteratorT dev_input, int m_batch, bool trans) { cudaStream_t s = context_->getStream(); if (m_batch == 1) { size_t ssz = dev_v_temp_storage_->getSize(); RPU::cub::DeviceReduce::Reduce( (void *)dev_v_temp_storage_->getData(), ssz, dev_input, dev_psum_values_->getData(), size_, psum_op_, (T)0, s); RPU::cub::DeviceReduce::Reduce( (void *)dev_v_temp_storage_->getData(), ssz, dev_input, dev_nsum_values_->getData(), size_, nsum_op_, (T)0, s); } else { if (buffer_m_batch_ < m_batch) { this->initializeBatchBuffer(m_batch); } if (trans) { std::swap(dev_psum_values_, dev_psum_values0_); std::swap(dev_nsum_values_, dev_nsum_values0_); int nthreads = context_->getNThreads(); int n = size_ * m_batch; int nblocks = context_->getNBlocks(n, nthreads); if (m_batch <= nthreads) { int shared_mem = 2 * nthreads * sizeof(T); LAUNCH_NPSUM_KERNEL( kernelNPSumBatchTrans, shared_mem, (dev_input, n, m_batch, dev_psum_values_->getData(), dev_nsum_values_->getData(), dev_psum_values0_->getData(), dev_nsum_values0_->getData())); } else { // simple atomic global memory version LAUNCH_NPSUM_KERNEL( kernelNPSumBatchTrans_LargeBatch, 0, (dev_input, n, m_batch, dev_psum_values_->getData(), dev_nsum_values_->getData(), dev_psum_values0_->getData(), dev_nsum_values0_->getData())); } } else { // Fast Segmented reduction size_t ssz = dev_m_temp_storage_->getSize(); RPU::cub::DeviceSegmentedReduce::Reduce( (void *)dev_m_temp_storage_->getData(), ssz, dev_input, dev_psum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, psum_op_, (T)0.0, s); RPU::cub::DeviceSegmentedReduce::Reduce( (void *)dev_m_temp_storage_->getData(), ssz, dev_input, dev_nsum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, nsum_op_, (T)0.0, s); } } } template <typename T> void NoiseManager<T>::setAverageAbsMax(float value) { dev_ravg_scale_value_->setConst(value); dev_scale_values_->setConst(value); ravg_initialized_ = true; context_->synchronize(); } template <typename T> float NoiseManager<T>::getAverageAbsMax() const { float tmp; dev_ravg_scale_value_->copyTo(&tmp); return tmp; }; template <typename T> template <typename InputIteratorT> void NoiseManager<T>::compute( InputIteratorT dev_input, const NoiseManagementType &nm_type, const IOMetaParameter<T> &io, int m_batch, bool trans, bool is_test) { // does not check for positive m_batch! nm_type_ = nm_type; switch (nm_type_) { case NoiseManagementType::None: { return; } case NoiseManagementType::Constant: { if (m_batch > dev_scale_values_->getSize()) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); const_set_if_ = false; } if (!const_set_if_) { dev_scale_values_->setConst(io.nm_thres > 0 ? (float)io.nm_thres : (float)1.0); const_set_if_ = true; } return; } case NoiseManagementType::Max: { this->maximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->maximizer_->saturateAbove(io.nm_thres); } return; } case NoiseManagementType::AbsMax: { this->amaximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->amaximizer_->saturateAbove(io.nm_thres); } return; } case NoiseManagementType::AbsMaxNPSum: { if (m_batch > dev_scale_values_->getSize()) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); } // get amax and npsum this->amaximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->amaximizer_->saturateAbove(io.nm_thres); } this->computeNPSum(dev_input, m_batch, trans); // combine int nthreads = context_->getNThreads(); int nblocks = context_->getNBlocks(m_batch, nthreads); cudaStream_t s = context_->getStream(); kernelAbsMaxNPSum<T><<<nblocks, nthreads, 0, s>>>( dev_scale_values_->getData(), m_batch, this->amaximizer_->getMaxValues(), dev_psum_values_->getDataConst(), dev_nsum_values_->getDataConst(), io.out_bound, io.nm_assumed_wmax, io.inp_res > 0 ? io.max_bm_res / io.inp_res : 1.0); return; } case NoiseManagementType::AverageAbsMax: case NoiseManagementType::AverageAbsMaxSingleValue: { // CAUTION: the running average will not be saved for checkpointing... so there might be a // glitch when continueing training from checkpoint... // ALSO: average max is computed across // mbatch whereas for CPU it is based running average of single mat-vecs if ((nm_type_ == NoiseManagementType::AverageAbsMax) && (m_batch > dev_scale_values_->getSize())) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); cudaStream_t s = context_->getStream(); int nthreads = context_->getNThreads(); // set scales to ravg [first time, could be set from outside] kernelAverageAbsMaxSetScales<T> <<<context_->getNBlocks(m_batch + 1, nthreads), nthreads, 0, s>>>( dev_scale_values_->getData(), dev_ravg_scale_value_->getData(), dev_ravg_scale_value_->getDataConst(), m_batch, (T)0.0); } if (!is_test) { this->amaximizer_->compute(dev_input, m_batch, trans); cudaStream_t s = context_->getStream(); int nthreads = context_->getNThreads(); int nblocks = context_->getNBlocks(m_batch, nthreads); if (m_batch > 1) { // first compute the average of the max over batch if (!dev_a_temp_storage_) { dev_avgmax_value_ = RPU::make_unique<CudaArray<float>>(context_, 1); size_t temp_storage_bytes = 0; RPU::cub::DeviceReduce::Sum( nullptr, temp_storage_bytes, amaximizer_->getMaxValues(), dev_avgmax_value_->getData(), m_batch, s); dev_a_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); } size_t ssz = dev_v_temp_storage_->getSize(); RPU::cub::DeviceReduce::Sum( (void *)dev_v_temp_storage_->getData(), ssz, amaximizer_->getMaxValues(), dev_avgmax_value_->getData(), m_batch, s); } if (nm_type_ == NoiseManagementType::AverageAbsMax) { // now update the running scale and set the current scales constant for all m_batch kernelAverageAbsMaxSetScales<T> <<<context_->getNBlocks(m_batch + 1, nthreads), nthreads, 0, s>>>( dev_scale_values_->getData(), dev_ravg_scale_value_->getData(), m_batch > 1 ? dev_avgmax_value_->getData() : amaximizer_->getMaxValues(), dev_scale_values_->getSize(), ravg_initialized_ ? MIN(io.nm_decay * m_batch, 1.) : 1.0); } else { // just update the running avg value as only single output requested kernelAverageAbsMaxSingleMomentum<T><<<1, 1, 0, s>>>( dev_ravg_scale_value_->getData(), m_batch > 1 ? dev_avgmax_value_->getData() : amaximizer_->getMaxValues(), m_batch, ravg_initialized_ ? MIN(io.nm_decay * m_batch, 1.0) : 1.0); } ravg_initialized_ = true; } return; } default: RPU_FATAL("Noise management type not implemented."); } } template <typename T> float *NoiseManager<T>::getScaleValues() const { switch (nm_type_) { case NoiseManagementType::None: return nullptr; case NoiseManagementType::AbsMaxNPSum: case NoiseManagementType::Constant: case NoiseManagementType::AverageAbsMax: return dev_scale_values_->getData(); case NoiseManagementType::AbsMax: return amaximizer_->getMaxValues(); case NoiseManagementType::Max: return maximizer_->getMaxValues(); case NoiseManagementType::AverageAbsMaxSingleValue: return dev_ravg_scale_value_->getData(); default: RPU_FATAL("Noise management type not implemented."); } }; #define ARGS1(NUM_T) , const NoiseManagementType &, const IOMetaParameter<NUM_T> &, int, bool, bool #define ARGS2 , int, bool template class NoiseManager<float>; RPU_GEN_IITER_TEMPLATES(float, void, NoiseManager<float>::compute, ARGS1(float)); RPU_GEN_IITER_TEMPLATES(float, void, NoiseManager<float>::computeNPSum, ARGS2); #ifdef RPU_USE_DOUBLE template class NoiseManager<double>; RPU_GEN_IITER_TEMPLATES(double, void, NoiseManager<double>::compute, ARGS1(double)); RPU_GEN_IITER_TEMPLATES(double, void, NoiseManager<double>::computeNPSum, ARGS2); #endif #undef ARGS1 #undef ARGS2 #undef LAUNCH_NPSUM_KERNEL } // namespace RPU
3c5122e68b2d3a943d289259b28ba2c7c1237525.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef __CUDASEMAPHOREEBO_CU__ #define __CUDASEMAPHOREEBO_CU__ inline __host__ hipError_t cudaSemaphoreCreateEBO(cudaSemaphore_t * const handle, const int semaphoreNumber, const unsigned int count, const int NUM_SM) { /* Here we set the initial value to be count+1, this allows us to do an atomicExch(sem, 0) and basically use the semaphore value as both a lock and a semaphore. */ unsigned int initialValue = (count + 1); *handle = semaphoreNumber; for (int id = 0; id < NUM_SM; ++id) { // need to set these values for all SMs cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4))] = initialValue; cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 1] = 0; cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 2] = 0; cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 3] = initialValue; } return hipSuccess; } inline __device__ bool cudaSemaphoreEBOTryWait(const cudaSemaphore_t sem, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); /* Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each SM uses 4 of them (current count, head, tail, max count). For the global semaphore all SMs use semaphoreBuffers[sem * 4 * NUM_SM]. */ unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM); unsigned int * const lock = currCount + 1; /* Reuse the tail for the "writers are waiting" flag since tail is unused. For now just use to indicate that at least 1 writer is waiting instead of a count to make sure that readers aren't totally starved out until all the writers are done. */ unsigned int * const writerWaiting = currCount + 2; __shared__ bool acq1, acq2; __syncthreads(); if (isMasterThread) { acq1 = false; // try to acquire the sem head "lock" if (atomicCAS(lock, 0, 1) == 0) { acq1 = true; } } __syncthreads(); if (!acq1) { return false; } // return if we couldn't acquire the lock if (isMasterThread) { acq2 = false; /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ unsigned int currSemCount = currCount[0]; if (isWriter) { // writer needs the count to be == maxSemCount to enter the critical // section (otherwise there are readers in the critical section) if (currSemCount == maxSemCount) { acq2 = true; } } else { // if there is a writer waiting, readers aren't allowed to enter the // critical section if (writerWaiting[0] == 0) { // readers need count > 1 to enter critical section (otherwise semaphore // is full) if (currSemCount > 1) { acq2 = true; } } } } __syncthreads(); if (!acq2) // release the sem head "lock" since the semaphore was full { // writers set a flag to note that they are waiting so more readers don't // join after the writer started waiting if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ } if (isMasterThread) { atomicExch(lock, 0); } __syncthreads(); return false; } __syncthreads(); if (isMasterThread) { /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ if (isWriter) { /* writer decrements the current count of the semaphore by the max to ensure that no one else can enter the critical section while it's writing. */ currCount[0] -= maxSemCount; // writers also need to unset the "writer is waiting" flag writerWaiting[0] = 0; } else { /* readers decrement the current count of the semaphore by 1 so other readers can also read the data (but not the writers since they needs the entire CS). */ --currCount[0]; //atomicSub(currCount, 1); } // now that we've updated the semaphore count can release the lock atomicExch(lock, 0); } __syncthreads(); return true; } inline __device__ void cudaSemaphoreEBOWait(const cudaSemaphore_t sem, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { __shared__ int iter, backoff; const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); volatile __shared__ int dummySum; if (isMasterThread) { iter = 0; backoff = 10; dummySum = 0; } __syncthreads(); while (!cudaSemaphoreEBOTryWait(sem, isWriter, maxSemCount, semaphoreBuffers, NUM_SM)) { __syncthreads(); if (isMasterThread) { // if we failed to enter the semaphore, wait for a little while before // trying again for (int j = 0; j < backoff; ++j) { dummySum += j; } /* for writers increse backoff a lot because failing means readers are in the CS currently -- most important for non-unique because all TBs on all SMs are going for the same semaphore. */ if (isWriter) { backoff += 250; } else { backoff += 5; /* small, linear backoff increase for readers */ } ++iter; // track how long we've been trying if (iter > 25) { iter = 0; backoff = 1; } } __syncthreads(); } __syncthreads(); } inline __device__ void cudaSemaphoreEBOPost(const cudaSemaphore_t sem, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); /* Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each SM uses 4 of them (current count, head, tail, max count). For the global semaphore use semaphoreBuffers[sem * 4 * NUM_SM]. */ unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM); unsigned int * const lock = currCount + 1; __shared__ bool acquired; if (isMasterThread) { acquired = false; } __syncthreads(); while (!acquired) { __syncthreads(); if (isMasterThread) { // try to acquire sem head "lock" if (atomicCAS(lock, 0, 1) == 1) { acquired = false; } else { acquired = true; } } __syncthreads(); } if (isMasterThread) { /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ if (isWriter) { // writers add the max value to the semaphore to allow the readers to // start accessing the critical section. currCount[0] += maxSemCount; } else { ++currCount[0]; // readers add 1 to the semaphore } // now that we've updated the semaphore count can release the lock atomicExch(lock, 0); } __syncthreads(); } // same wait algorithm but with local scope and per-SM synchronization inline __device__ bool cudaSemaphoreEBOTryWaitLocal(const cudaSemaphore_t sem, const unsigned int smID, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); /* Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each SM gets 4 of them (current count, head, tail, max count). So SM 0 starts at semaphoreBuffers[sem * 4 * NUM_SM]. */ unsigned int * const currCount = semaphoreBuffers + ((sem * 4 * NUM_SM) + (smID * 4)); unsigned int * const lock = currCount + 1; /* Reuse the tail for the "writers are waiting" flag since tail is unused. For now just use to indicate that at least 1 writer is waiting instead of a count to make sure that readers aren't totally starved out until all the writers are done. */ unsigned int * const writerWaiting = currCount + 2; __shared__ bool acq1, acq2; __syncthreads(); if (isMasterThread) { acq1 = false; // try to acquire the sem head "lock" if (atomicCAS(lock, 0, 1) == 0) { acq1 = true; } } __syncthreads(); if (!acq1) { return false; } // return if we couldn't acquire the lock if (isMasterThread) { acq2 = false; /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ unsigned int currSemCount = currCount[0]; if (isWriter) { // writer needs the count to be == maxSemCount to enter the critical // section (otherwise there are readers in the critical section) if (currSemCount == maxSemCount) { acq2 = true; } } else { // if there is a writer waiting, readers aren't allowed to enter the // critical section if (writerWaiting[0] == 0) { // readers need count > 1 to enter critical section (otherwise semaphore // is full) if (currSemCount > 1) { acq2 = true; } } } } __syncthreads(); if (!acq2) // release the sem head "lock" since the semaphore was full { // writers set a flag to note that they are waiting so more readers don't // join after the writer started waiting if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ } if (isMasterThread) { atomicExch(lock, 0); } __syncthreads(); return false; } __syncthreads(); if (isMasterThread) { /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ if (isWriter) { /* writer decrements the current count of the semaphore by the max to ensure that no one else can enter the critical section while it's writing. */ currCount[0] -= maxSemCount; // writers also need to unset the "writer is waiting" flag writerWaiting[0] = 0; } else { /* readers decrement the current count of the semaphore by 1 so other readers can also read the data (but not the writers since they needs the entire CS). */ --currCount[0]; } // now that we've updated the semaphore count can release the lock atomicExch(lock, 0); } __syncthreads(); return true; } // same algorithm but with local scope inline __device__ void cudaSemaphoreEBOWaitLocal(const cudaSemaphore_t sem, const unsigned int smID, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { __shared__ int iter, backoff; const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); volatile __shared__ int dummySum; if (isMasterThread) { iter = 0; backoff = 10; dummySum = 0; } __syncthreads(); while (!cudaSemaphoreEBOTryWaitLocal(sem, smID, isWriter, maxSemCount, semaphoreBuffers, NUM_SM)) { __syncthreads(); if (isMasterThread) { // if we failed to enter the semaphore, wait for a little while before // trying again for (int j = 0; j < backoff; ++j) { dummySum += j; } backoff += 5; /* small, linear backoff increase */ ++iter; // track how long we've been trying if (iter > 25) { iter = 0; backoff = 1; } } __syncthreads(); } __syncthreads(); } inline __device__ void cudaSemaphoreEBOPostLocal(const cudaSemaphore_t sem, const unsigned int smID, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); // Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each // SM gets 4 of them. So SM 0 starts at semaphoreBuffers[sem * 4 * NUM_SM]. unsigned int * const currCount = semaphoreBuffers + ((sem * 4 * NUM_SM) + (smID * 4)); unsigned int * const lock = currCount + 1; __shared__ bool acquired; if (isMasterThread) { acquired = false; } __syncthreads(); while (!acquired) { __syncthreads(); if (isMasterThread) { // try to acquire sem head "lock" if (atomicCAS(lock, 0, 1) == 1) { acquired = false; } else { acquired = true; } } __syncthreads(); } if (isMasterThread) { /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ if (isWriter) { // writers add the max value to the semaphore to allow the readers to // start accessing the critical section. currCount[0] += maxSemCount; } else { ++currCount[0]; // readers add 1 to the semaphore } // now that we've updated the semaphore count can release the lock atomicExch(lock, 0); } __syncthreads(); } #endif // #ifndef __CUDASEMAPHOREEBO_CU__
3c5122e68b2d3a943d289259b28ba2c7c1237525.cu
#ifndef __CUDASEMAPHOREEBO_CU__ #define __CUDASEMAPHOREEBO_CU__ inline __host__ cudaError_t cudaSemaphoreCreateEBO(cudaSemaphore_t * const handle, const int semaphoreNumber, const unsigned int count, const int NUM_SM) { /* Here we set the initial value to be count+1, this allows us to do an atomicExch(sem, 0) and basically use the semaphore value as both a lock and a semaphore. */ unsigned int initialValue = (count + 1); *handle = semaphoreNumber; for (int id = 0; id < NUM_SM; ++id) { // need to set these values for all SMs cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4))] = initialValue; cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 1] = 0; cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 2] = 0; cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 3] = initialValue; } return cudaSuccess; } inline __device__ bool cudaSemaphoreEBOTryWait(const cudaSemaphore_t sem, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); /* Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each SM uses 4 of them (current count, head, tail, max count). For the global semaphore all SMs use semaphoreBuffers[sem * 4 * NUM_SM]. */ unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM); unsigned int * const lock = currCount + 1; /* Reuse the tail for the "writers are waiting" flag since tail is unused. For now just use to indicate that at least 1 writer is waiting instead of a count to make sure that readers aren't totally starved out until all the writers are done. */ unsigned int * const writerWaiting = currCount + 2; __shared__ bool acq1, acq2; __syncthreads(); if (isMasterThread) { acq1 = false; // try to acquire the sem head "lock" if (atomicCAS(lock, 0, 1) == 0) { acq1 = true; } } __syncthreads(); if (!acq1) { return false; } // return if we couldn't acquire the lock if (isMasterThread) { acq2 = false; /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ unsigned int currSemCount = currCount[0]; if (isWriter) { // writer needs the count to be == maxSemCount to enter the critical // section (otherwise there are readers in the critical section) if (currSemCount == maxSemCount) { acq2 = true; } } else { // if there is a writer waiting, readers aren't allowed to enter the // critical section if (writerWaiting[0] == 0) { // readers need count > 1 to enter critical section (otherwise semaphore // is full) if (currSemCount > 1) { acq2 = true; } } } } __syncthreads(); if (!acq2) // release the sem head "lock" since the semaphore was full { // writers set a flag to note that they are waiting so more readers don't // join after the writer started waiting if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ } if (isMasterThread) { atomicExch(lock, 0); } __syncthreads(); return false; } __syncthreads(); if (isMasterThread) { /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ if (isWriter) { /* writer decrements the current count of the semaphore by the max to ensure that no one else can enter the critical section while it's writing. */ currCount[0] -= maxSemCount; // writers also need to unset the "writer is waiting" flag writerWaiting[0] = 0; } else { /* readers decrement the current count of the semaphore by 1 so other readers can also read the data (but not the writers since they needs the entire CS). */ --currCount[0]; //atomicSub(currCount, 1); } // now that we've updated the semaphore count can release the lock atomicExch(lock, 0); } __syncthreads(); return true; } inline __device__ void cudaSemaphoreEBOWait(const cudaSemaphore_t sem, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { __shared__ int iter, backoff; const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); volatile __shared__ int dummySum; if (isMasterThread) { iter = 0; backoff = 10; dummySum = 0; } __syncthreads(); while (!cudaSemaphoreEBOTryWait(sem, isWriter, maxSemCount, semaphoreBuffers, NUM_SM)) { __syncthreads(); if (isMasterThread) { // if we failed to enter the semaphore, wait for a little while before // trying again for (int j = 0; j < backoff; ++j) { dummySum += j; } /* for writers increse backoff a lot because failing means readers are in the CS currently -- most important for non-unique because all TBs on all SMs are going for the same semaphore. */ if (isWriter) { backoff += 250; } else { backoff += 5; /* small, linear backoff increase for readers */ } ++iter; // track how long we've been trying if (iter > 25) { iter = 0; backoff = 1; } } __syncthreads(); } __syncthreads(); } inline __device__ void cudaSemaphoreEBOPost(const cudaSemaphore_t sem, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); /* Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each SM uses 4 of them (current count, head, tail, max count). For the global semaphore use semaphoreBuffers[sem * 4 * NUM_SM]. */ unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM); unsigned int * const lock = currCount + 1; __shared__ bool acquired; if (isMasterThread) { acquired = false; } __syncthreads(); while (!acquired) { __syncthreads(); if (isMasterThread) { // try to acquire sem head "lock" if (atomicCAS(lock, 0, 1) == 1) { acquired = false; } else { acquired = true; } } __syncthreads(); } if (isMasterThread) { /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ if (isWriter) { // writers add the max value to the semaphore to allow the readers to // start accessing the critical section. currCount[0] += maxSemCount; } else { ++currCount[0]; // readers add 1 to the semaphore } // now that we've updated the semaphore count can release the lock atomicExch(lock, 0); } __syncthreads(); } // same wait algorithm but with local scope and per-SM synchronization inline __device__ bool cudaSemaphoreEBOTryWaitLocal(const cudaSemaphore_t sem, const unsigned int smID, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); /* Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each SM gets 4 of them (current count, head, tail, max count). So SM 0 starts at semaphoreBuffers[sem * 4 * NUM_SM]. */ unsigned int * const currCount = semaphoreBuffers + ((sem * 4 * NUM_SM) + (smID * 4)); unsigned int * const lock = currCount + 1; /* Reuse the tail for the "writers are waiting" flag since tail is unused. For now just use to indicate that at least 1 writer is waiting instead of a count to make sure that readers aren't totally starved out until all the writers are done. */ unsigned int * const writerWaiting = currCount + 2; __shared__ bool acq1, acq2; __syncthreads(); if (isMasterThread) { acq1 = false; // try to acquire the sem head "lock" if (atomicCAS(lock, 0, 1) == 0) { acq1 = true; } } __syncthreads(); if (!acq1) { return false; } // return if we couldn't acquire the lock if (isMasterThread) { acq2 = false; /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ unsigned int currSemCount = currCount[0]; if (isWriter) { // writer needs the count to be == maxSemCount to enter the critical // section (otherwise there are readers in the critical section) if (currSemCount == maxSemCount) { acq2 = true; } } else { // if there is a writer waiting, readers aren't allowed to enter the // critical section if (writerWaiting[0] == 0) { // readers need count > 1 to enter critical section (otherwise semaphore // is full) if (currSemCount > 1) { acq2 = true; } } } } __syncthreads(); if (!acq2) // release the sem head "lock" since the semaphore was full { // writers set a flag to note that they are waiting so more readers don't // join after the writer started waiting if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ } if (isMasterThread) { atomicExch(lock, 0); } __syncthreads(); return false; } __syncthreads(); if (isMasterThread) { /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ if (isWriter) { /* writer decrements the current count of the semaphore by the max to ensure that no one else can enter the critical section while it's writing. */ currCount[0] -= maxSemCount; // writers also need to unset the "writer is waiting" flag writerWaiting[0] = 0; } else { /* readers decrement the current count of the semaphore by 1 so other readers can also read the data (but not the writers since they needs the entire CS). */ --currCount[0]; } // now that we've updated the semaphore count can release the lock atomicExch(lock, 0); } __syncthreads(); return true; } // same algorithm but with local scope inline __device__ void cudaSemaphoreEBOWaitLocal(const cudaSemaphore_t sem, const unsigned int smID, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { __shared__ int iter, backoff; const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); volatile __shared__ int dummySum; if (isMasterThread) { iter = 0; backoff = 10; dummySum = 0; } __syncthreads(); while (!cudaSemaphoreEBOTryWaitLocal(sem, smID, isWriter, maxSemCount, semaphoreBuffers, NUM_SM)) { __syncthreads(); if (isMasterThread) { // if we failed to enter the semaphore, wait for a little while before // trying again for (int j = 0; j < backoff; ++j) { dummySum += j; } backoff += 5; /* small, linear backoff increase */ ++iter; // track how long we've been trying if (iter > 25) { iter = 0; backoff = 1; } } __syncthreads(); } __syncthreads(); } inline __device__ void cudaSemaphoreEBOPostLocal(const cudaSemaphore_t sem, const unsigned int smID, const bool isWriter, const unsigned int maxSemCount, unsigned int * semaphoreBuffers, const int NUM_SM) { const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); // Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each // SM gets 4 of them. So SM 0 starts at semaphoreBuffers[sem * 4 * NUM_SM]. unsigned int * const currCount = semaphoreBuffers + ((sem * 4 * NUM_SM) + (smID * 4)); unsigned int * const lock = currCount + 1; __shared__ bool acquired; if (isMasterThread) { acquired = false; } __syncthreads(); while (!acquired) { __syncthreads(); if (isMasterThread) { // try to acquire sem head "lock" if (atomicCAS(lock, 0, 1) == 1) { acquired = false; } else { acquired = true; } } __syncthreads(); } if (isMasterThread) { /* NOTE: currCount is only accessed by 1 TB at a time and has a lock around it, so we can safely access it as a regular data access instead of with atomics. */ if (isWriter) { // writers add the max value to the semaphore to allow the readers to // start accessing the critical section. currCount[0] += maxSemCount; } else { ++currCount[0]; // readers add 1 to the semaphore } // now that we've updated the semaphore count can release the lock atomicExch(lock, 0); } __syncthreads(); } #endif // #ifndef __CUDASEMAPHOREEBO_CU__
66ad0533010d8c98357b451e174ee1b8dafae6c1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "rgbUtoGreyF_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int width = XSIZE; int height = YSIZE; unsigned int *rgbU = NULL; hipMalloc(&rgbU, XSIZE*YSIZE); float *grey = NULL; hipMalloc(&grey, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( rgbUtoGreyF_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,rgbU,grey); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( rgbUtoGreyF_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,rgbU,grey); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( rgbUtoGreyF_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,rgbU,grey); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
66ad0533010d8c98357b451e174ee1b8dafae6c1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "rgbUtoGreyF_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int width = XSIZE; int height = YSIZE; unsigned int *rgbU = NULL; cudaMalloc(&rgbU, XSIZE*YSIZE); float *grey = NULL; cudaMalloc(&grey, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); rgbUtoGreyF_kernel<<<gridBlock,threadBlock>>>(width,height,rgbU,grey); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { rgbUtoGreyF_kernel<<<gridBlock,threadBlock>>>(width,height,rgbU,grey); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { rgbUtoGreyF_kernel<<<gridBlock,threadBlock>>>(width,height,rgbU,grey); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
aee995b18f81f5409b7eeecc8c1815f34761ef1b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "setAllValuesKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *houghSpace = NULL; hipMalloc(&houghSpace, XSIZE*YSIZE); int height = YSIZE; int width = XSIZE; float value = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( setAllValuesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, houghSpace,height,width,value); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( setAllValuesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, houghSpace,height,width,value); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( setAllValuesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, houghSpace,height,width,value); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
aee995b18f81f5409b7eeecc8c1815f34761ef1b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "setAllValuesKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *houghSpace = NULL; cudaMalloc(&houghSpace, XSIZE*YSIZE); int height = YSIZE; int width = XSIZE; float value = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); setAllValuesKernel<<<gridBlock,threadBlock>>>(houghSpace,height,width,value); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { setAllValuesKernel<<<gridBlock,threadBlock>>>(houghSpace,height,width,value); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { setAllValuesKernel<<<gridBlock,threadBlock>>>(houghSpace,height,width,value); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2f558bf4b896a2598622fe755dc92e38b1ee50d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #include "lodepng.h" #define MAX_THREAD 1024 __global__ void max_pooling(unsigned char* original_img, unsigned char* new_img, unsigned int width, unsigned int num_thread, unsigned int size) { unsigned int position; unsigned char max; for (int i = threadIdx.x; i < size/4; i = i + num_thread) { position = i + (4 * (i / 4)) + (width * 4 * (i / (width * 2))); max = original_img[position]; if (original_img[position + 4] > max) max = original_img[position + 4]; if (original_img[position + width] > max) max = original_img[position + width]; if (original_img[position + width + 4] > max) max = original_img[position + width + 1]; new_img[i] = max; } } int main(int argc, char* argv[]) { if (argc != 4) { printf("Invalid number of arguments\n"); return -1; } clock_t start = clock(); unsigned char* original_img, * new_img; unsigned char* original_cudaImg, * new_cudaImg; unsigned int num_thread = atoi(argv[3]); unsigned width, height; unsigned int imagesize; unsigned error; error = lodepng_decode32_file(&original_img, &width, &height, argv[1]); if (error) { printf("%d: %s\n", error, lodepng_error_text(error)); return -1; } printf("%d %d\n",width, height); imagesize = width * height * 4 * sizeof(unsigned char); new_img = (unsigned char*)malloc(imagesize/4); hipMalloc((void**)&original_cudaImg, imagesize); hipMalloc((void**)&new_cudaImg, imagesize/4); hipMemcpy(original_cudaImg, original_img, imagesize, hipMemcpyHostToDevice); max_pooling<< <1, num_thread >> > (original_cudaImg, new_cudaImg, width, num_thread, imagesize); hipDeviceSynchronize(); hipMemcpy(new_img, new_cudaImg, imagesize/4, hipMemcpyDeviceToHost); error = lodepng_encode32_file(argv[2], new_img, width/2, height/2); if (error) { printf("%d: %s\n", error, lodepng_error_text(error)); return -1; } printf("%ul msec", clock() - start); free(original_img); free(new_img); hipFree(original_cudaImg); hipFree(new_cudaImg); return 0; }
2f558bf4b896a2598622fe755dc92e38b1ee50d8.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #include "lodepng.h" #define MAX_THREAD 1024 __global__ void max_pooling(unsigned char* original_img, unsigned char* new_img, unsigned int width, unsigned int num_thread, unsigned int size) { unsigned int position; unsigned char max; for (int i = threadIdx.x; i < size/4; i = i + num_thread) { position = i + (4 * (i / 4)) + (width * 4 * (i / (width * 2))); max = original_img[position]; if (original_img[position + 4] > max) max = original_img[position + 4]; if (original_img[position + width] > max) max = original_img[position + width]; if (original_img[position + width + 4] > max) max = original_img[position + width + 1]; new_img[i] = max; } } int main(int argc, char* argv[]) { if (argc != 4) { printf("Invalid number of arguments\n"); return -1; } clock_t start = clock(); unsigned char* original_img, * new_img; unsigned char* original_cudaImg, * new_cudaImg; unsigned int num_thread = atoi(argv[3]); unsigned width, height; unsigned int imagesize; unsigned error; error = lodepng_decode32_file(&original_img, &width, &height, argv[1]); if (error) { printf("%d: %s\n", error, lodepng_error_text(error)); return -1; } printf("%d %d\n",width, height); imagesize = width * height * 4 * sizeof(unsigned char); new_img = (unsigned char*)malloc(imagesize/4); cudaMalloc((void**)&original_cudaImg, imagesize); cudaMalloc((void**)&new_cudaImg, imagesize/4); cudaMemcpy(original_cudaImg, original_img, imagesize, cudaMemcpyHostToDevice); max_pooling<< <1, num_thread >> > (original_cudaImg, new_cudaImg, width, num_thread, imagesize); cudaDeviceSynchronize(); cudaMemcpy(new_img, new_cudaImg, imagesize/4, cudaMemcpyDeviceToHost); error = lodepng_encode32_file(argv[2], new_img, width/2, height/2); if (error) { printf("%d: %s\n", error, lodepng_error_text(error)); return -1; } printf("%ul msec", clock() - start); free(original_img); free(new_img); cudaFree(original_cudaImg); cudaFree(new_cudaImg); return 0; }
7119510a5b8e40e853509227284614a5fe68dcd8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void forward_zero_nonmax_kernel(int n, float *input, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= n) return; if (input[id] != output[id]) output[id] = 0; }
7119510a5b8e40e853509227284614a5fe68dcd8.cu
#include "includes.h" __global__ void forward_zero_nonmax_kernel(int n, float *input, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= n) return; if (input[id] != output[id]) output[id] = 0; }
f44873c055c1e3463db2b09a2e4dbece00a08fc5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "ActiveForceComputeGPU.cuh" #include "hoomd/extern/saruprngCUDA.h" #include "EvaluatorConstraintEllipsoid.h" #include <assert.h> /*! \file ActiveForceComputeGPU.cu \brief Declares GPU kernel code for calculating active forces forces on the GPU. Used by ActiveForceComputeGPU. */ //! Kernel for setting active force vectors on the GPU /*! \param group_size number of particles \param d_rtag convert global tag to global index \param d_groupTags stores list to convert group index to global tag \param d_force particle force on device \param d_torque particle torque on device \param d_orientation particle orientation on device \param d_f_actVec particle active force unit vector \param d_f_actMag particle active force vector magnitude \param d_t_actVec particle active torque unit vector \param d_t_actMag particle active torque vector magnitude \param P position of the ellipsoid constraint \param rx radius of the ellipsoid in x direction \param ry radius of the ellipsoid in y direction \param rz radius of the ellipsoid in z direction \param orientationLink check if particle orientation is linked to active force vector */ __global__ void gpu_compute_active_force_set_forces_kernel(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, Scalar4 *d_force, Scalar4 *d_torque, Scalar4 *d_orientation, Scalar3 *d_f_actVec, Scalar *d_f_actMag, Scalar3 *d_t_actVec, Scalar *d_t_actMag, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz, bool orientationLink, bool orientationReverseLink, const unsigned int N) { unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx >= group_size) return; unsigned int tag = d_groupTags[group_idx]; unsigned int idx = d_rtag[tag]; Scalar3 f; Scalar3 t; // rotate force according to particle orientation only if orientation is linked to active force vector if (orientationLink == true) { vec3<Scalar> fi; f = make_scalar3(d_f_actMag[tag] * d_f_actVec[tag].x, d_f_actMag[tag] * d_f_actVec[tag].y, d_f_actMag[tag] * d_f_actVec[tag].z); quat<Scalar> quati(d_orientation[idx]); fi = rotate(quati, vec3<Scalar>(f)); d_force[idx].x = fi.x; d_force[idx].y = fi.y; d_force[idx].z = fi.z; vec3<Scalar> ti; t = make_scalar3(d_t_actMag[tag] * d_t_actVec[tag].x, d_t_actMag[tag] * d_t_actVec[tag].y, d_t_actMag[tag] * d_t_actVec[tag].z); ti = rotate(quati, vec3<Scalar>(t)); d_torque[idx].x = ti.x; d_torque[idx].y = ti.y; d_torque[idx].z = ti.z; } else // no orientation link { f = make_scalar3(d_f_actMag[tag] * d_f_actVec[tag].x, d_f_actMag[tag] * d_f_actVec[tag].y, d_f_actMag[tag] * d_f_actVec[tag].z); d_force[idx].x = f.x; d_force[idx].y = f.y; d_force[idx].z = f.z; t = make_scalar3(d_t_actMag[tag] * d_t_actVec[tag].x, d_t_actMag[tag] * d_t_actVec[tag].y, d_t_actMag[tag] * d_t_actVec[tag].z); d_torque[idx].x = t.x; d_torque[idx].y = t.y; d_torque[idx].z = t.z; } // rotate particle orientation only if orientation is reverse linked to active force vector. Ignore torque here if (orientationReverseLink == true) { vec3<Scalar> f(d_f_actMag[tag] * d_f_actVec[tag].x, d_f_actMag[tag] * d_f_actVec[tag].y, d_f_actMag[tag] * d_f_actVec[tag].z); vec3<Scalar> vecZ(0.0, 0.0, 1.0); vec3<Scalar> quatVec = cross(vecZ, f); Scalar quatScal = slow::sqrt(d_f_actMag[tag]*d_f_actMag[tag]) + dot(f, vecZ); quat<Scalar> quati(quatScal, quatVec); quati = quati * (Scalar(1.0) / slow::sqrt(norm2(quati))); d_orientation[idx] = quat_to_scalar4(quati); } } //! Kernel for adjusting active force vectors to align parallel to an ellipsoid surface constraint on the GPU /*! \param group_size number of particles \param d_rtag convert global tag to global index \param d_groupTags stores list to convert group index to global tag \param d_pos particle positions on device \param d_f_actVec particle active force unit vector \param d_t_actVec particle active force unit vector \param P position of the ellipsoid constraint \param rx radius of the ellipsoid in x direction \param ry radius of the ellipsoid in y direction \param rz radius of the ellipsoid in z direction */ __global__ void gpu_compute_active_force_set_constraints_kernel(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, const Scalar4 *d_pos, Scalar3 *d_f_actVec, Scalar3 *d_t_actVec, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz) { unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx >= group_size) return; unsigned int tag = d_groupTags[group_idx]; unsigned int idx = d_rtag[tag]; EvaluatorConstraintEllipsoid Ellipsoid(P, rx, ry, rz); Scalar3 current_pos = make_scalar3(d_pos[idx].x, d_pos[idx].y, d_pos[idx].z); Scalar3 norm_scalar3 = Ellipsoid.evalNormal(current_pos); // the normal vector to which the particles are confined. vec3<Scalar> norm; norm = vec3<Scalar>(norm_scalar3); Scalar f_dot_prod = d_f_actVec[tag].x * norm.x + d_f_actVec[tag].y * norm.y + d_f_actVec[tag].z * norm.z; Scalar t_dot_prod = d_t_actVec[tag].x * norm.x + d_t_actVec[tag].y * norm.y + d_t_actVec[tag].z * norm.z; d_f_actVec[tag].x -= norm.x * f_dot_prod; d_f_actVec[tag].y -= norm.y * f_dot_prod; d_f_actVec[tag].z -= norm.z * f_dot_prod; d_t_actVec[tag].x -= norm.x * t_dot_prod; d_t_actVec[tag].y -= norm.y * t_dot_prod; d_t_actVec[tag].z -= norm.z * t_dot_prod; Scalar new_f_norm = slow::sqrt(d_f_actVec[tag].x * d_f_actVec[tag].x + d_f_actVec[tag].y * d_f_actVec[tag].y + d_f_actVec[tag].z * d_f_actVec[tag].z); Scalar new_t_norm = slow::sqrt(d_t_actVec[tag].x * d_t_actVec[tag].x + d_t_actVec[tag].y * d_t_actVec[tag].y + d_t_actVec[tag].z * d_t_actVec[tag].z); d_f_actVec[tag].x /= new_f_norm; d_f_actVec[tag].y /= new_f_norm; d_f_actVec[tag].z /= new_f_norm; d_t_actVec[tag].x /= new_t_norm; d_t_actVec[tag].y /= new_t_norm; d_t_actVec[tag].z /= new_t_norm; } //! Kernel for applying rotational diffusion to active force vectors on the GPU /*! \param group_size number of particles \param d_rtag convert global tag to global index \param d_groupTags stores list to convert group index to global tag \param d_pos particle positions on device \param d_f_actVec particle active force unit vector \param d_t_actVec particle active torque unit vector \param P position of the ellipsoid constraint \param rx radius of the ellipsoid in x direction \param ry radius of the ellipsoid in y direction \param rz radius of the ellipsoid in z direction \param is2D check if simulation is 2D or 3D \param rotationDiff particle rotational diffusion constant \param seed seed for random number generator */ __global__ void gpu_compute_active_force_rotational_diffusion_kernel(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, const Scalar4 *d_pos, Scalar3 *d_f_actVec, Scalar3 *d_t_actVec, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz, bool is2D, const Scalar rotationDiff, const unsigned int timestep, const int seed) { unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx >= group_size) return; unsigned int tag = d_groupTags[group_idx]; unsigned int idx = d_rtag[tag]; if (is2D) // 2D { SaruGPU saru(idx, timestep, seed); Scalar delta_theta; // rotational diffusion angle delta_theta = rotationDiff * gaussian_rng(saru, 1.0); Scalar theta; // angle on plane defining orientation of active force vector theta = atan2(d_f_actVec[tag].y, d_f_actVec[tag].x); theta += delta_theta; d_f_actVec[tag].x = cos(theta); d_f_actVec[tag].y = sin(theta); // in 2D there is only one meaningful direction for torque } else // 3D: Following Stenhammar, Soft Matter, 2014 { if (rx == 0) // if no constraint { SaruGPU saru(idx, timestep, seed); Scalar u = saru.d(0, 1.0); // generates an even distribution of random unit vectors in 3D Scalar v = saru.d(0, 1.0); Scalar theta = 2.0 * M_PI * u; Scalar phi = acos(2.0 * v - 1.0); vec3<Scalar> rand_vec; rand_vec.x = sin(phi) * cos(theta); rand_vec.y = sin(phi) * sin(theta); rand_vec.z = cos(phi); vec3<Scalar> aux_vec; aux_vec.x = d_f_actVec[tag].y * rand_vec.z - d_f_actVec[tag].z * rand_vec.y; aux_vec.y = d_f_actVec[tag].z * rand_vec.x - d_f_actVec[tag].x * rand_vec.z; aux_vec.z = d_f_actVec[tag].x * rand_vec.y - d_f_actVec[tag].y * rand_vec.x; Scalar aux_vec_mag = sqrt(aux_vec.x*aux_vec.x + aux_vec.y*aux_vec.y + aux_vec.z*aux_vec.z); aux_vec.x /= aux_vec_mag; aux_vec.y /= aux_vec_mag; aux_vec.z /= aux_vec_mag; vec3<Scalar> current_vec; current_vec.x = d_f_actVec[tag].x; current_vec.y = d_f_actVec[tag].y; current_vec.z = d_f_actVec[tag].z; Scalar delta_theta = rotationDiff * gaussian_rng(saru, 1.0); d_f_actVec[tag].x = cos(delta_theta)*current_vec.x + sin(delta_theta)*aux_vec.x; d_f_actVec[tag].y = cos(delta_theta)*current_vec.y + sin(delta_theta)*aux_vec.y; d_f_actVec[tag].z = cos(delta_theta)*current_vec.z + sin(delta_theta)*aux_vec.z; // torque vector rotates rigidly along with force vector d_t_actVec[tag].x = cos(delta_theta)*current_vec.x + sin(delta_theta)*aux_vec.x; d_t_actVec[tag].y = cos(delta_theta)*current_vec.y + sin(delta_theta)*aux_vec.y; d_t_actVec[tag].z = cos(delta_theta)*current_vec.z + sin(delta_theta)*aux_vec.z; } else // if constraint { EvaluatorConstraintEllipsoid Ellipsoid(P, rx, ry, rz); SaruGPU saru(idx, timestep, seed); Scalar3 current_pos = make_scalar3(d_pos[idx].x, d_pos[idx].y, d_pos[idx].z); Scalar3 norm_scalar3 = Ellipsoid.evalNormal(current_pos); // the normal vector to which the particles are confined. vec3<Scalar> norm; norm = vec3<Scalar> (norm_scalar3); vec3<Scalar> current_vec; current_vec.x = d_f_actVec[tag].x; current_vec.y = d_f_actVec[tag].y; current_vec.z = d_f_actVec[tag].z; vec3<Scalar> aux_vec = cross(current_vec, norm); // aux vec for defining direction that active force vector rotates towards. Scalar delta_theta; // rotational diffusion angle delta_theta = rotationDiff * gaussian_rng(saru, 1.0); d_f_actVec[tag].x = cos(delta_theta) * current_vec.x + sin(delta_theta) * aux_vec.x; d_f_actVec[tag].y = cos(delta_theta) * current_vec.y + sin(delta_theta) * aux_vec.y; d_f_actVec[tag].z = cos(delta_theta) * current_vec.z + sin(delta_theta) * aux_vec.z; // torque vector rotates rigidly along with force vector d_t_actVec[tag].x = cos(delta_theta) * current_vec.x + sin(delta_theta) * aux_vec.x; d_t_actVec[tag].y = cos(delta_theta) * current_vec.y + sin(delta_theta) * aux_vec.y; d_t_actVec[tag].z = cos(delta_theta) * current_vec.z + sin(delta_theta) * aux_vec.z; } } } hipError_t gpu_compute_active_force_set_forces(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, Scalar4 *d_force, Scalar4 *d_torque, Scalar4 *d_orientation, Scalar3 *d_f_actVec, Scalar *d_f_actMag, Scalar3 *d_t_actVec, Scalar *d_t_actMag, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz, bool orientationLink, bool orientationReverseLink, const unsigned int N, unsigned int block_size) { // setup the grid to run the kernel dim3 grid( group_size / block_size + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipMemset(d_force, 0, sizeof(Scalar4)*N); hipLaunchKernelGGL(( gpu_compute_active_force_set_forces_kernel), dim3(grid), dim3(threads), 0, 0, group_size, d_rtag, d_groupTags, d_force, d_torque, d_orientation, d_f_actVec, d_f_actMag, d_t_actVec, d_t_actMag, P, rx, ry, rz, orientationLink, orientationReverseLink, N); return hipSuccess; } hipError_t gpu_compute_active_force_set_constraints(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, const Scalar4 *d_pos, Scalar4 *d_force, Scalar4 *d_torque, Scalar3 *d_f_actVec, Scalar3 *d_t_actVec, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz, unsigned int block_size) { // setup the grid to run the kernel dim3 grid( group_size / block_size + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_compute_active_force_set_constraints_kernel), dim3(grid), dim3(threads), 0, 0, group_size, d_rtag, d_groupTags, d_pos, d_f_actVec, d_t_actVec, P, rx, ry, rz); return hipSuccess; } hipError_t gpu_compute_active_force_rotational_diffusion(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, const Scalar4 *d_pos, Scalar4 *d_force, Scalar4 *d_torque, Scalar3 *d_f_actVec, Scalar3 *d_t_actVec, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz, bool is2D, const Scalar rotationDiff, const unsigned int timestep, const int seed, unsigned int block_size) { // setup the grid to run the kernel dim3 grid( group_size / block_size + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_compute_active_force_rotational_diffusion_kernel), dim3(grid), dim3(threads), 0, 0, group_size, d_rtag, d_groupTags, d_pos, d_f_actVec, d_t_actVec, P, rx, ry, rz, is2D, rotationDiff, timestep, seed); return hipSuccess; }
f44873c055c1e3463db2b09a2e4dbece00a08fc5.cu
// Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander #include "ActiveForceComputeGPU.cuh" #include "hoomd/extern/saruprngCUDA.h" #include "EvaluatorConstraintEllipsoid.h" #include <assert.h> /*! \file ActiveForceComputeGPU.cu \brief Declares GPU kernel code for calculating active forces forces on the GPU. Used by ActiveForceComputeGPU. */ //! Kernel for setting active force vectors on the GPU /*! \param group_size number of particles \param d_rtag convert global tag to global index \param d_groupTags stores list to convert group index to global tag \param d_force particle force on device \param d_torque particle torque on device \param d_orientation particle orientation on device \param d_f_actVec particle active force unit vector \param d_f_actMag particle active force vector magnitude \param d_t_actVec particle active torque unit vector \param d_t_actMag particle active torque vector magnitude \param P position of the ellipsoid constraint \param rx radius of the ellipsoid in x direction \param ry radius of the ellipsoid in y direction \param rz radius of the ellipsoid in z direction \param orientationLink check if particle orientation is linked to active force vector */ __global__ void gpu_compute_active_force_set_forces_kernel(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, Scalar4 *d_force, Scalar4 *d_torque, Scalar4 *d_orientation, Scalar3 *d_f_actVec, Scalar *d_f_actMag, Scalar3 *d_t_actVec, Scalar *d_t_actMag, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz, bool orientationLink, bool orientationReverseLink, const unsigned int N) { unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx >= group_size) return; unsigned int tag = d_groupTags[group_idx]; unsigned int idx = d_rtag[tag]; Scalar3 f; Scalar3 t; // rotate force according to particle orientation only if orientation is linked to active force vector if (orientationLink == true) { vec3<Scalar> fi; f = make_scalar3(d_f_actMag[tag] * d_f_actVec[tag].x, d_f_actMag[tag] * d_f_actVec[tag].y, d_f_actMag[tag] * d_f_actVec[tag].z); quat<Scalar> quati(d_orientation[idx]); fi = rotate(quati, vec3<Scalar>(f)); d_force[idx].x = fi.x; d_force[idx].y = fi.y; d_force[idx].z = fi.z; vec3<Scalar> ti; t = make_scalar3(d_t_actMag[tag] * d_t_actVec[tag].x, d_t_actMag[tag] * d_t_actVec[tag].y, d_t_actMag[tag] * d_t_actVec[tag].z); ti = rotate(quati, vec3<Scalar>(t)); d_torque[idx].x = ti.x; d_torque[idx].y = ti.y; d_torque[idx].z = ti.z; } else // no orientation link { f = make_scalar3(d_f_actMag[tag] * d_f_actVec[tag].x, d_f_actMag[tag] * d_f_actVec[tag].y, d_f_actMag[tag] * d_f_actVec[tag].z); d_force[idx].x = f.x; d_force[idx].y = f.y; d_force[idx].z = f.z; t = make_scalar3(d_t_actMag[tag] * d_t_actVec[tag].x, d_t_actMag[tag] * d_t_actVec[tag].y, d_t_actMag[tag] * d_t_actVec[tag].z); d_torque[idx].x = t.x; d_torque[idx].y = t.y; d_torque[idx].z = t.z; } // rotate particle orientation only if orientation is reverse linked to active force vector. Ignore torque here if (orientationReverseLink == true) { vec3<Scalar> f(d_f_actMag[tag] * d_f_actVec[tag].x, d_f_actMag[tag] * d_f_actVec[tag].y, d_f_actMag[tag] * d_f_actVec[tag].z); vec3<Scalar> vecZ(0.0, 0.0, 1.0); vec3<Scalar> quatVec = cross(vecZ, f); Scalar quatScal = slow::sqrt(d_f_actMag[tag]*d_f_actMag[tag]) + dot(f, vecZ); quat<Scalar> quati(quatScal, quatVec); quati = quati * (Scalar(1.0) / slow::sqrt(norm2(quati))); d_orientation[idx] = quat_to_scalar4(quati); } } //! Kernel for adjusting active force vectors to align parallel to an ellipsoid surface constraint on the GPU /*! \param group_size number of particles \param d_rtag convert global tag to global index \param d_groupTags stores list to convert group index to global tag \param d_pos particle positions on device \param d_f_actVec particle active force unit vector \param d_t_actVec particle active force unit vector \param P position of the ellipsoid constraint \param rx radius of the ellipsoid in x direction \param ry radius of the ellipsoid in y direction \param rz radius of the ellipsoid in z direction */ __global__ void gpu_compute_active_force_set_constraints_kernel(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, const Scalar4 *d_pos, Scalar3 *d_f_actVec, Scalar3 *d_t_actVec, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz) { unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx >= group_size) return; unsigned int tag = d_groupTags[group_idx]; unsigned int idx = d_rtag[tag]; EvaluatorConstraintEllipsoid Ellipsoid(P, rx, ry, rz); Scalar3 current_pos = make_scalar3(d_pos[idx].x, d_pos[idx].y, d_pos[idx].z); Scalar3 norm_scalar3 = Ellipsoid.evalNormal(current_pos); // the normal vector to which the particles are confined. vec3<Scalar> norm; norm = vec3<Scalar>(norm_scalar3); Scalar f_dot_prod = d_f_actVec[tag].x * norm.x + d_f_actVec[tag].y * norm.y + d_f_actVec[tag].z * norm.z; Scalar t_dot_prod = d_t_actVec[tag].x * norm.x + d_t_actVec[tag].y * norm.y + d_t_actVec[tag].z * norm.z; d_f_actVec[tag].x -= norm.x * f_dot_prod; d_f_actVec[tag].y -= norm.y * f_dot_prod; d_f_actVec[tag].z -= norm.z * f_dot_prod; d_t_actVec[tag].x -= norm.x * t_dot_prod; d_t_actVec[tag].y -= norm.y * t_dot_prod; d_t_actVec[tag].z -= norm.z * t_dot_prod; Scalar new_f_norm = slow::sqrt(d_f_actVec[tag].x * d_f_actVec[tag].x + d_f_actVec[tag].y * d_f_actVec[tag].y + d_f_actVec[tag].z * d_f_actVec[tag].z); Scalar new_t_norm = slow::sqrt(d_t_actVec[tag].x * d_t_actVec[tag].x + d_t_actVec[tag].y * d_t_actVec[tag].y + d_t_actVec[tag].z * d_t_actVec[tag].z); d_f_actVec[tag].x /= new_f_norm; d_f_actVec[tag].y /= new_f_norm; d_f_actVec[tag].z /= new_f_norm; d_t_actVec[tag].x /= new_t_norm; d_t_actVec[tag].y /= new_t_norm; d_t_actVec[tag].z /= new_t_norm; } //! Kernel for applying rotational diffusion to active force vectors on the GPU /*! \param group_size number of particles \param d_rtag convert global tag to global index \param d_groupTags stores list to convert group index to global tag \param d_pos particle positions on device \param d_f_actVec particle active force unit vector \param d_t_actVec particle active torque unit vector \param P position of the ellipsoid constraint \param rx radius of the ellipsoid in x direction \param ry radius of the ellipsoid in y direction \param rz radius of the ellipsoid in z direction \param is2D check if simulation is 2D or 3D \param rotationDiff particle rotational diffusion constant \param seed seed for random number generator */ __global__ void gpu_compute_active_force_rotational_diffusion_kernel(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, const Scalar4 *d_pos, Scalar3 *d_f_actVec, Scalar3 *d_t_actVec, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz, bool is2D, const Scalar rotationDiff, const unsigned int timestep, const int seed) { unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx >= group_size) return; unsigned int tag = d_groupTags[group_idx]; unsigned int idx = d_rtag[tag]; if (is2D) // 2D { SaruGPU saru(idx, timestep, seed); Scalar delta_theta; // rotational diffusion angle delta_theta = rotationDiff * gaussian_rng(saru, 1.0); Scalar theta; // angle on plane defining orientation of active force vector theta = atan2(d_f_actVec[tag].y, d_f_actVec[tag].x); theta += delta_theta; d_f_actVec[tag].x = cos(theta); d_f_actVec[tag].y = sin(theta); // in 2D there is only one meaningful direction for torque } else // 3D: Following Stenhammar, Soft Matter, 2014 { if (rx == 0) // if no constraint { SaruGPU saru(idx, timestep, seed); Scalar u = saru.d(0, 1.0); // generates an even distribution of random unit vectors in 3D Scalar v = saru.d(0, 1.0); Scalar theta = 2.0 * M_PI * u; Scalar phi = acos(2.0 * v - 1.0); vec3<Scalar> rand_vec; rand_vec.x = sin(phi) * cos(theta); rand_vec.y = sin(phi) * sin(theta); rand_vec.z = cos(phi); vec3<Scalar> aux_vec; aux_vec.x = d_f_actVec[tag].y * rand_vec.z - d_f_actVec[tag].z * rand_vec.y; aux_vec.y = d_f_actVec[tag].z * rand_vec.x - d_f_actVec[tag].x * rand_vec.z; aux_vec.z = d_f_actVec[tag].x * rand_vec.y - d_f_actVec[tag].y * rand_vec.x; Scalar aux_vec_mag = sqrt(aux_vec.x*aux_vec.x + aux_vec.y*aux_vec.y + aux_vec.z*aux_vec.z); aux_vec.x /= aux_vec_mag; aux_vec.y /= aux_vec_mag; aux_vec.z /= aux_vec_mag; vec3<Scalar> current_vec; current_vec.x = d_f_actVec[tag].x; current_vec.y = d_f_actVec[tag].y; current_vec.z = d_f_actVec[tag].z; Scalar delta_theta = rotationDiff * gaussian_rng(saru, 1.0); d_f_actVec[tag].x = cos(delta_theta)*current_vec.x + sin(delta_theta)*aux_vec.x; d_f_actVec[tag].y = cos(delta_theta)*current_vec.y + sin(delta_theta)*aux_vec.y; d_f_actVec[tag].z = cos(delta_theta)*current_vec.z + sin(delta_theta)*aux_vec.z; // torque vector rotates rigidly along with force vector d_t_actVec[tag].x = cos(delta_theta)*current_vec.x + sin(delta_theta)*aux_vec.x; d_t_actVec[tag].y = cos(delta_theta)*current_vec.y + sin(delta_theta)*aux_vec.y; d_t_actVec[tag].z = cos(delta_theta)*current_vec.z + sin(delta_theta)*aux_vec.z; } else // if constraint { EvaluatorConstraintEllipsoid Ellipsoid(P, rx, ry, rz); SaruGPU saru(idx, timestep, seed); Scalar3 current_pos = make_scalar3(d_pos[idx].x, d_pos[idx].y, d_pos[idx].z); Scalar3 norm_scalar3 = Ellipsoid.evalNormal(current_pos); // the normal vector to which the particles are confined. vec3<Scalar> norm; norm = vec3<Scalar> (norm_scalar3); vec3<Scalar> current_vec; current_vec.x = d_f_actVec[tag].x; current_vec.y = d_f_actVec[tag].y; current_vec.z = d_f_actVec[tag].z; vec3<Scalar> aux_vec = cross(current_vec, norm); // aux vec for defining direction that active force vector rotates towards. Scalar delta_theta; // rotational diffusion angle delta_theta = rotationDiff * gaussian_rng(saru, 1.0); d_f_actVec[tag].x = cos(delta_theta) * current_vec.x + sin(delta_theta) * aux_vec.x; d_f_actVec[tag].y = cos(delta_theta) * current_vec.y + sin(delta_theta) * aux_vec.y; d_f_actVec[tag].z = cos(delta_theta) * current_vec.z + sin(delta_theta) * aux_vec.z; // torque vector rotates rigidly along with force vector d_t_actVec[tag].x = cos(delta_theta) * current_vec.x + sin(delta_theta) * aux_vec.x; d_t_actVec[tag].y = cos(delta_theta) * current_vec.y + sin(delta_theta) * aux_vec.y; d_t_actVec[tag].z = cos(delta_theta) * current_vec.z + sin(delta_theta) * aux_vec.z; } } } cudaError_t gpu_compute_active_force_set_forces(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, Scalar4 *d_force, Scalar4 *d_torque, Scalar4 *d_orientation, Scalar3 *d_f_actVec, Scalar *d_f_actMag, Scalar3 *d_t_actVec, Scalar *d_t_actMag, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz, bool orientationLink, bool orientationReverseLink, const unsigned int N, unsigned int block_size) { // setup the grid to run the kernel dim3 grid( group_size / block_size + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel cudaMemset(d_force, 0, sizeof(Scalar4)*N); gpu_compute_active_force_set_forces_kernel<<< grid, threads>>>( group_size, d_rtag, d_groupTags, d_force, d_torque, d_orientation, d_f_actVec, d_f_actMag, d_t_actVec, d_t_actMag, P, rx, ry, rz, orientationLink, orientationReverseLink, N); return cudaSuccess; } cudaError_t gpu_compute_active_force_set_constraints(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, const Scalar4 *d_pos, Scalar4 *d_force, Scalar4 *d_torque, Scalar3 *d_f_actVec, Scalar3 *d_t_actVec, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz, unsigned int block_size) { // setup the grid to run the kernel dim3 grid( group_size / block_size + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_compute_active_force_set_constraints_kernel<<< grid, threads>>>(group_size, d_rtag, d_groupTags, d_pos, d_f_actVec, d_t_actVec, P, rx, ry, rz); return cudaSuccess; } cudaError_t gpu_compute_active_force_rotational_diffusion(const unsigned int group_size, unsigned int *d_rtag, unsigned int *d_groupTags, const Scalar4 *d_pos, Scalar4 *d_force, Scalar4 *d_torque, Scalar3 *d_f_actVec, Scalar3 *d_t_actVec, const Scalar3& P, Scalar rx, Scalar ry, Scalar rz, bool is2D, const Scalar rotationDiff, const unsigned int timestep, const int seed, unsigned int block_size) { // setup the grid to run the kernel dim3 grid( group_size / block_size + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_compute_active_force_rotational_diffusion_kernel<<< grid, threads>>>(group_size, d_rtag, d_groupTags, d_pos, d_f_actVec, d_t_actVec, P, rx, ry, rz, is2D, rotationDiff, timestep, seed); return cudaSuccess; }
b1f418204e737d95aa2a27a0152e436d8973b2da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <cstdlib> #include <cstring> #include <ctime> #include <sys/time.h> #define NUM_ITERATIONS 10000 #define EPSILON 0.005 int NUM_PARTICLES = 10000; int TPB = 32; unsigned long get_time(); unsigned long get_time() { struct timeval tv; gettimeofday(&tv, NULL); unsigned long ret = tv.tv_usec; ret /= 1000; ret += (tv.tv_sec * 1000); return ret; } struct Particle { float3 position; float3 velocity; }; float3 randomFloat3() { float3 f; f.x = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); f.y = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); f.z = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); return f; } // inicialize the array of Particles with random position and velocity void inicializeParticles(Particle* particles){ for(int i = 0; i < NUM_PARTICLES ; i++){ particles[i].position = randomFloat3(); particles[i].velocity = randomFloat3(); } } // Inicialize all the necessary random variables at once in a matrix randNumbers void inicializeRandNumbes(float3* randNumbers){ for(int i = 0; i < NUM_PARTICLES; i++) { randNumbers[i] = randomFloat3(); } } __global__ void performStepGPU(Particle* particles, float3* rand_vel_update, int NUM_PARTICLES, float dt=1.0) { const int p_id = blockIdx.x*blockDim.x + threadIdx.x; // only calculate if particle is inside the bounds if (p_id < NUM_PARTICLES){ particles[p_id].position.x += dt * particles[p_id].velocity.x; particles[p_id].position.y += dt * particles[p_id].velocity.y; particles[p_id].position.z += dt * particles[p_id].velocity.z; particles[p_id].velocity.x += rand_vel_update[p_id].x; particles[p_id].velocity.y += rand_vel_update[p_id].y; particles[p_id].velocity.z += rand_vel_update[p_id].z; } } void performStepCPU(Particle* particles, float3* rand_vel_update, float dt=1.0){ for (int p_id = 0; p_id < NUM_PARTICLES; p_id++){ particles[p_id].position.x += dt * particles[p_id].velocity.x; particles[p_id].position.y += dt * particles[p_id].velocity.y; particles[p_id].position.z += dt * particles[p_id].velocity.z; particles[p_id].velocity.x += rand_vel_update[p_id].x; particles[p_id].velocity.y += rand_vel_update[p_id].y; particles[p_id].velocity.z += rand_vel_update[p_id].z; } } bool equalFinalState(Particle* p1, Particle* p2){ for(int i = 0 ; i < NUM_PARTICLES; i++){ if (std::abs(p1[i].position.x - p2[i].position.x) > EPSILON || std::abs(p1[i].position.y - p2[i].position.y) > EPSILON || std::abs(p1[i].position.z - p2[i].position.z) > EPSILON){ return false; } } return true; } int main(int argc, char** argv) { NUM_PARTICLES = (argc >= 2) ? atoi(argv[1]) : 2000000; TPB = argc >= 3 ? atoi(argv[2]) : 128; // seed for random number srand (static_cast <unsigned> (time(0))); // Array of particles Particle* particles = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES); inicializeParticles(particles); // Array of random numbers float3* randNumbers = (float3*) malloc (sizeof(float3)*NUM_PARTICLES); inicializeRandNumbes(randNumbers); // CPU execution long start_time_cpu = get_time(); Particle* particles_cpu = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES); // copy vector to use in the CPU std::memcpy(particles_cpu, particles, NUM_PARTICLES*sizeof(Particle)); printf("Computing particles system on the CPU"); for(int i = 0 ; i < NUM_ITERATIONS ; i++){ //performStepCPU(particles_cpu, randNumbers); } printf("Done\n"); long end_time_cpu = get_time(); // GPU execution long start_time_gpu = get_time(); Particle* particles_gpu = 0; float3* randNumbers_gpu = 0; // Allocate host memory // Particle* particles_gpu_res = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES); // paged memory Particle* particles_gpu_res = NULL; hipHostMalloc(&particles_gpu_res, NUM_PARTICLES*sizeof(Particle)); // pinned memory // Allocate device memory hipMalloc(&particles_gpu, NUM_PARTICLES*sizeof(Particle)); hipMalloc(&randNumbers_gpu, NUM_PARTICLES*sizeof(float3)); // Copy array to device hipMemcpy(randNumbers_gpu, randNumbers, NUM_PARTICLES*sizeof(float3), hipMemcpyHostToDevice); hipMemcpy(particles_gpu, particles, NUM_PARTICLES*sizeof(Particle), hipMemcpyHostToDevice); // Launch kernel to compute the final state of particles printf("Computing particles system on the GPU..."); for(int i = 0 ; i < NUM_ITERATIONS ; i++){ // Copy from host to device if (i > 0) hipMemcpy(particles_gpu, particles_gpu_res, NUM_PARTICLES*sizeof(Particle), hipMemcpyHostToDevice); hipLaunchKernelGGL(( performStepGPU), dim3((NUM_PARTICLES+TPB-1)/TPB), dim3(TPB), 0, 0, particles_gpu, randNumbers_gpu, NUM_PARTICLES); hipDeviceSynchronize(); // Copy back from device to host hipMemcpy(particles_gpu_res, particles_gpu, NUM_PARTICLES*sizeof(Particle), hipMemcpyDeviceToHost); } printf("Done\n"); long end_time_gpu = get_time(); // Compare results printf("Comparing the output for each implementation"); equalFinalState(particles_gpu_res, particles_cpu) ? printf("Correct\n") : printf("Uncorrect\n"); printf("-----------------------------------------------\n"); printf("block size: %d ; NUM_PARTICLES: %d\n", TPB, NUM_PARTICLES); printf("CPU time: %ld ms\n", end_time_cpu-start_time_cpu); printf("GPU time: %ld ms\n", end_time_gpu-start_time_gpu); printf("-----------------------------------------------\n"); // printf("%d %d %ld %ld\n", TPB, NUM_PARTICLES, end_time_cpu - start_time_cpu, end_time_gpu - start_time_gpu); // Free the memory hipFree(particles_gpu); hipFree(randNumbers_gpu); hipFree(particles_gpu_res); // pinned memory // free(particles_gpu_res); // paged memory free(particles_cpu); free(randNumbers); free(particles); return 0; }
b1f418204e737d95aa2a27a0152e436d8973b2da.cu
#include <stdio.h> #include <stdlib.h> #include <cstdlib> #include <cstring> #include <ctime> #include <sys/time.h> #define NUM_ITERATIONS 10000 #define EPSILON 0.005 int NUM_PARTICLES = 10000; int TPB = 32; unsigned long get_time(); unsigned long get_time() { struct timeval tv; gettimeofday(&tv, NULL); unsigned long ret = tv.tv_usec; ret /= 1000; ret += (tv.tv_sec * 1000); return ret; } struct Particle { float3 position; float3 velocity; }; float3 randomFloat3() { float3 f; f.x = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); f.y = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); f.z = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); return f; } // inicialize the array of Particles with random position and velocity void inicializeParticles(Particle* particles){ for(int i = 0; i < NUM_PARTICLES ; i++){ particles[i].position = randomFloat3(); particles[i].velocity = randomFloat3(); } } // Inicialize all the necessary random variables at once in a matrix randNumbers void inicializeRandNumbes(float3* randNumbers){ for(int i = 0; i < NUM_PARTICLES; i++) { randNumbers[i] = randomFloat3(); } } __global__ void performStepGPU(Particle* particles, float3* rand_vel_update, int NUM_PARTICLES, float dt=1.0) { const int p_id = blockIdx.x*blockDim.x + threadIdx.x; // only calculate if particle is inside the bounds if (p_id < NUM_PARTICLES){ particles[p_id].position.x += dt * particles[p_id].velocity.x; particles[p_id].position.y += dt * particles[p_id].velocity.y; particles[p_id].position.z += dt * particles[p_id].velocity.z; particles[p_id].velocity.x += rand_vel_update[p_id].x; particles[p_id].velocity.y += rand_vel_update[p_id].y; particles[p_id].velocity.z += rand_vel_update[p_id].z; } } void performStepCPU(Particle* particles, float3* rand_vel_update, float dt=1.0){ for (int p_id = 0; p_id < NUM_PARTICLES; p_id++){ particles[p_id].position.x += dt * particles[p_id].velocity.x; particles[p_id].position.y += dt * particles[p_id].velocity.y; particles[p_id].position.z += dt * particles[p_id].velocity.z; particles[p_id].velocity.x += rand_vel_update[p_id].x; particles[p_id].velocity.y += rand_vel_update[p_id].y; particles[p_id].velocity.z += rand_vel_update[p_id].z; } } bool equalFinalState(Particle* p1, Particle* p2){ for(int i = 0 ; i < NUM_PARTICLES; i++){ if (std::abs(p1[i].position.x - p2[i].position.x) > EPSILON || std::abs(p1[i].position.y - p2[i].position.y) > EPSILON || std::abs(p1[i].position.z - p2[i].position.z) > EPSILON){ return false; } } return true; } int main(int argc, char** argv) { NUM_PARTICLES = (argc >= 2) ? atoi(argv[1]) : 2000000; TPB = argc >= 3 ? atoi(argv[2]) : 128; // seed for random number srand (static_cast <unsigned> (time(0))); // Array of particles Particle* particles = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES); inicializeParticles(particles); // Array of random numbers float3* randNumbers = (float3*) malloc (sizeof(float3)*NUM_PARTICLES); inicializeRandNumbes(randNumbers); // CPU execution long start_time_cpu = get_time(); Particle* particles_cpu = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES); // copy vector to use in the CPU std::memcpy(particles_cpu, particles, NUM_PARTICLES*sizeof(Particle)); printf("Computing particles system on the CPU…"); for(int i = 0 ; i < NUM_ITERATIONS ; i++){ //performStepCPU(particles_cpu, randNumbers); } printf("Done\n"); long end_time_cpu = get_time(); // GPU execution long start_time_gpu = get_time(); Particle* particles_gpu = 0; float3* randNumbers_gpu = 0; // Allocate host memory // Particle* particles_gpu_res = (Particle*) malloc (sizeof(Particle)*NUM_PARTICLES); // paged memory Particle* particles_gpu_res = NULL; cudaMallocHost(&particles_gpu_res, NUM_PARTICLES*sizeof(Particle)); // pinned memory // Allocate device memory cudaMalloc(&particles_gpu, NUM_PARTICLES*sizeof(Particle)); cudaMalloc(&randNumbers_gpu, NUM_PARTICLES*sizeof(float3)); // Copy array to device cudaMemcpy(randNumbers_gpu, randNumbers, NUM_PARTICLES*sizeof(float3), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpu, particles, NUM_PARTICLES*sizeof(Particle), cudaMemcpyHostToDevice); // Launch kernel to compute the final state of particles printf("Computing particles system on the GPU..."); for(int i = 0 ; i < NUM_ITERATIONS ; i++){ // Copy from host to device if (i > 0) cudaMemcpy(particles_gpu, particles_gpu_res, NUM_PARTICLES*sizeof(Particle), cudaMemcpyHostToDevice); performStepGPU<<<(NUM_PARTICLES+TPB-1)/TPB, TPB>>>(particles_gpu, randNumbers_gpu, NUM_PARTICLES); cudaDeviceSynchronize(); // Copy back from device to host cudaMemcpy(particles_gpu_res, particles_gpu, NUM_PARTICLES*sizeof(Particle), cudaMemcpyDeviceToHost); } printf("Done\n"); long end_time_gpu = get_time(); // Compare results printf("Comparing the output for each implementation…"); equalFinalState(particles_gpu_res, particles_cpu) ? printf("Correct\n") : printf("Uncorrect\n"); printf("-----------------------------------------------\n"); printf("block size: %d ; NUM_PARTICLES: %d\n", TPB, NUM_PARTICLES); printf("CPU time: %ld ms\n", end_time_cpu-start_time_cpu); printf("GPU time: %ld ms\n", end_time_gpu-start_time_gpu); printf("-----------------------------------------------\n"); // printf("%d %d %ld %ld\n", TPB, NUM_PARTICLES, end_time_cpu - start_time_cpu, end_time_gpu - start_time_gpu); // Free the memory cudaFree(particles_gpu); cudaFree(randNumbers_gpu); cudaFree(particles_gpu_res); // pinned memory // free(particles_gpu_res); // paged memory free(particles_cpu); free(randNumbers); free(particles); return 0; }