hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
cce6e1bd474994d0d08f0961afd24faebffe5340.hip
// !!! This is a file automatically generated by hipify!!! //**************************************************************************\ //* This file is property of and copyright by the ALICE Project *\ //* ALICE Experiment at CERN, All rights reserved. *\ //* *\ //* Primary Authors: Matthias Richter <[email protected]> *\ //* for The ALICE HLT Project. *\ //* *\ //* Permission to use, copy, modify and distribute this software and its *\ //* documentation strictly for non-commercial purposes is hereby granted *\ //* without fee, provided that the above copyright notice appears in all *\ //* copies and that both the copyright notice and this permission notice *\ //* appear in the supporting documentation. The authors make no claims *\ //* about the suitability of this software for any purpose. It is *\ //* provided "as is" without express or implied warranty. *\ //************************************************************************** /// \file GPUReconstructionCUDAGenRTC.cu /// \author David Rohr #define GPUCA_GPUCODE_HOSTONLY #include <omp.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "GPUReconstructionCUDADef.h" #include "GPUReconstructionCUDA.h" #include "GPUReconstructionCUDAInternals.h" #include "GPUParamRTC.h" #include "GPUDefMacros.h" #include <unistd.h> #ifdef GPUCA_HAVE_O2HEADERS #include "Framework/SHA1.h" #endif using namespace GPUCA_NAMESPACE::gpu; #ifndef GPUCA_ALIROOT_LIB #include "utils/qGetLdBinarySymbols.h" QGET_LD_BINARY_SYMBOLS(GPUReconstructionCUDArtc_src); QGET_LD_BINARY_SYMBOLS(GPUReconstructionCUDArtc_command); #endif int GPUReconstructionCUDA::genRTC() { #ifndef GPUCA_ALIROOT_LIB std::string rtcparam = GPUParamRTC::generateRTCCode(param(), mProcessingSettings.rtc.optConstexpr); std::string filename = "/tmp/o2cagpu_rtc_"; filename += std::to_string(getpid()); filename += "_"; filename += std::to_string(rand()); std::vector<std::string> kernels; std::string kernelsall; #undef GPUCA_KRNL_REG #define GPUCA_KRNL_REG(args) __launch_bounds__(GPUCA_M_MAX2_3(GPUCA_M_STRIP(args))) #define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward) GPUCA_KRNL_WRAP(GPUCA_KRNL_LOAD_, x_class, x_attributes, x_arguments, x_forward) #define GPUCA_KRNL_LOAD_single(x_class, x_attributes, x_arguments, x_forward) kernels.emplace_back(GPUCA_M_STR(GPUCA_KRNLGPU_SINGLE(x_class, x_attributes, x_arguments, x_forward))); #define GPUCA_KRNL_LOAD_multi(x_class, x_attributes, x_arguments, x_forward) kernels.emplace_back(GPUCA_M_STR(GPUCA_KRNLGPU_MULTI(x_class, x_attributes, x_arguments, x_forward))); #include "GPUReconstructionKernels.h" #undef GPUCA_KRNL #undef GPUCA_KRNL_LOAD_single #undef GPUCA_KRNL_LOAD_multi for (unsigned int i = 0; i < kernels.size(); i++) { kernelsall += kernels[i]; } #ifdef GPUCA_HAVE_O2HEADERS char shasource[21], shaparam[21], shacmd[21], shakernels[21]; if (mProcessingSettings.rtc.cacheOutput) { o2::framework::internal::SHA1(shasource, _binary_GPUReconstructionCUDArtc_src_start, _binary_GPUReconstructionCUDArtc_src_len); o2::framework::internal::SHA1(shaparam, rtcparam.c_str(), rtcparam.size()); o2::framework::internal::SHA1(shacmd, _binary_GPUReconstructionCUDArtc_command_start, _binary_GPUReconstructionCUDArtc_command_len); o2::framework::internal::SHA1(shakernels, kernelsall.c_str(), kernelsall.size()); } #endif unsigned int nCompile = mProcessingSettings.rtc.compilePerKernel ? kernels.size() : 1; bool cacheLoaded = false; if (mProcessingSettings.rtc.cacheOutput) { #ifndef GPUCA_HAVE_O2HEADERS throw std::runtime_error("Cannot use RTC cache without O2 headers"); #else FILE* fp = fopen("rtc.cuda.cache", "rb"); char sharead[20]; if (fp) { size_t len; while (true) { if (fread(sharead, 1, 20, fp) != 20) { throw std::runtime_error("Cache file corrupt"); } if (memcmp(sharead, shasource, 20)) { GPUInfo("Cache file content outdated (source)"); break; } if (fread(sharead, 1, 20, fp) != 20) { throw std::runtime_error("Cache file corrupt"); } if (memcmp(sharead, shaparam, 20)) { GPUInfo("Cache file content outdated (param)"); break; } if (fread(sharead, 1, 20, fp) != 20) { throw std::runtime_error("Cache file corrupt"); } if (memcmp(sharead, shacmd, 20)) { GPUInfo("Cache file content outdated (commandline)"); break; } if (fread(sharead, 1, 20, fp) != 20) { throw std::runtime_error("Cache file corrupt"); } if (memcmp(sharead, shakernels, 20)) { GPUInfo("Cache file content outdated (kernel definitions)"); break; } GPUSettingsProcessingRTC cachedSettings; if (fread(&cachedSettings, sizeof(cachedSettings), 1, fp) != 1) { throw std::runtime_error("Cache file corrupt"); } if (memcmp(&cachedSettings, &mProcessingSettings.rtc, sizeof(cachedSettings))) { GPUInfo("Cache file content outdated (rtc parameters)"); break; } std::vector<char> buffer; for (unsigned int i = 0; i < nCompile; i++) { if (fread(&len, sizeof(len), 1, fp) != 1) { throw std::runtime_error("Cache file corrupt"); } buffer.resize(len); if (fread(buffer.data(), 1, len, fp) != len) { throw std::runtime_error("Cache file corrupt"); } FILE* fp2 = fopen((filename + "_" + std::to_string(i) + ".o").c_str(), "w+b"); if (fp2 == nullptr) { throw std::runtime_error("Cannot open tmp file"); } if (fwrite(buffer.data(), 1, len, fp2) != len) { throw std::runtime_error("Error writing file"); } fclose(fp2); } GPUInfo("Using RTC cache file"); cacheLoaded = true; break; }; fclose(fp); } #endif } if (!cacheLoaded) { if (mProcessingSettings.debugLevel >= 0) { GPUInfo("Starting CUDA RTC Compilation"); } HighResTimer rtcTimer; rtcTimer.ResetStart(); #pragma omp parallel for for (unsigned int i = 0; i < nCompile; i++) { if (mProcessingSettings.debugLevel >= 3) { printf("Compiling %s\n", (filename + "_" + std::to_string(i) + ".cu").c_str()); } FILE* fp = fopen((filename + "_" + std::to_string(i) + ".cu").c_str(), "w+b"); if (fp == nullptr) { throw std::runtime_error("Error opening file"); } std::string kernel = "extern \"C\" {"; kernel += mProcessingSettings.rtc.compilePerKernel ? kernels[i] : kernelsall; kernel += "}"; if (fwrite(rtcparam.c_str(), 1, rtcparam.size(), fp) != rtcparam.size() || fwrite(_binary_GPUReconstructionCUDArtc_src_start, 1, _binary_GPUReconstructionCUDArtc_src_len, fp) != _binary_GPUReconstructionCUDArtc_src_len || fwrite(kernel.c_str(), 1, kernel.size(), fp) != kernel.size()) { throw std::runtime_error("Error writing file"); } fclose(fp); std::string command = std::string(_binary_GPUReconstructionCUDArtc_command_start, _binary_GPUReconstructionCUDArtc_command_len); command += " -cubin -c " + filename + "_" + std::to_string(i) + ".cu -o " + filename + "_" + std::to_string(i) + ".o"; if (mProcessingSettings.debugLevel >= 3) { printf("Running command %s\n", command.c_str()); } if (system(command.c_str())) { throw std::runtime_error("Error during CUDA compilation"); } } if (mProcessingSettings.debugLevel >= 0) { GPUInfo("RTC Compilation finished (%f seconds)", rtcTimer.GetCurrentElapsedTime()); } if (mProcessingSettings.rtc.cacheOutput) { FILE* fp = fopen("rtc.cuda.cache", "w+b"); if (fp == nullptr) { throw std::runtime_error("Cannot open cache file for writing"); } GPUInfo("Storing RTC compilation result in cache file"); if (fwrite(shasource, 1, 20, fp) != 20 || fwrite(shaparam, 1, 20, fp) != 20 || fwrite(shacmd, 1, 20, fp) != 20 || fwrite(shakernels, 1, 20, fp) != 20 || fwrite(&mProcessingSettings.rtc, sizeof(mProcessingSettings.rtc), 1, fp) != 1) { throw std::runtime_error("Error writing cache file"); } std::vector<char> buffer; for (unsigned int i = 0; i < nCompile; i++) { FILE* fp2 = fopen((filename + "_" + std::to_string(i) + ".o").c_str(), "rb"); if (fp2 == nullptr) { throw std::runtime_error("Cannot open cuda module file"); } fseek(fp2, 0, SEEK_END); size_t size = ftell(fp2); buffer.resize(size); fseek(fp2, 0, SEEK_SET); if (fread(buffer.data(), 1, size, fp2) != size) { throw std::runtime_error("Error reading cuda module file"); } fclose(fp2); if (fwrite(&size, sizeof(size), 1, fp) != 1 || fwrite(buffer.data(), 1, size, fp) != size) { throw std::runtime_error("Error writing cache file"); } } fclose(fp); } } for (unsigned int i = 0; i < nCompile; i++) { mInternals->rtcModules.emplace_back(std::make_unique<hipModule_t>()); GPUFailedMsg(hipModuleLoad(mInternals->rtcModules.back().get(), (filename + "_" + std::to_string(i) + ".o").c_str())); remove((filename + "_" + std::to_string(i) + ".cu").c_str()); remove((filename + "_" + std::to_string(i) + ".o").c_str()); } int j = 0; #define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward) GPUCA_KRNL_WRAP(GPUCA_KRNL_LOAD_, x_class, x_attributes, x_arguments, x_forward) #define GPUCA_KRNL_LOAD_single(x_class, x_attributes, x_arguments, x_forward) \ mInternals->getRTCkernelNum<false, GPUCA_M_KRNL_TEMPLATE(x_class)>(mInternals->rtcFunctions.size()); \ mInternals->rtcFunctions.emplace_back(new hipFunction_t); \ GPUFailedMsg(hipModuleGetFunction(mInternals->rtcFunctions.back().get(), *mInternals->rtcModules[mProcessingSettings.rtc.compilePerKernel ? j++ : 0], GPUCA_M_STR(GPUCA_M_CAT(krnl_, GPUCA_M_KRNL_NAME(x_class))))); #define GPUCA_KRNL_LOAD_multi(x_class, x_attributes, x_arguments, x_forward) \ mInternals->getRTCkernelNum<true, GPUCA_M_KRNL_TEMPLATE(x_class)>(mInternals->rtcFunctions.size()); \ mInternals->rtcFunctions.emplace_back(new hipFunction_t); \ GPUFailedMsg(hipModuleGetFunction(mInternals->rtcFunctions.back().get(), *mInternals->rtcModules[mProcessingSettings.rtc.compilePerKernel ? j++ : 0], GPUCA_M_STR(GPUCA_M_CAT3(krnl_, GPUCA_M_KRNL_NAME(x_class), _multi)))); #include "GPUReconstructionKernels.h" #undef GPUCA_KRNL #undef GPUCA_KRNL_LOAD_single #undef GPUCA_KRNL_LOAD_multi #endif return 0; }
cce6e1bd474994d0d08f0961afd24faebffe5340.cu
//**************************************************************************\ //* This file is property of and copyright by the ALICE Project *\ //* ALICE Experiment at CERN, All rights reserved. *\ //* *\ //* Primary Authors: Matthias Richter <[email protected]> *\ //* for The ALICE HLT Project. *\ //* *\ //* Permission to use, copy, modify and distribute this software and its *\ //* documentation strictly for non-commercial purposes is hereby granted *\ //* without fee, provided that the above copyright notice appears in all *\ //* copies and that both the copyright notice and this permission notice *\ //* appear in the supporting documentation. The authors make no claims *\ //* about the suitability of this software for any purpose. It is *\ //* provided "as is" without express or implied warranty. *\ //************************************************************************** /// \file GPUReconstructionCUDAGenRTC.cu /// \author David Rohr #define GPUCA_GPUCODE_HOSTONLY #include <omp.h> #include <cuda.h> #include <cuda_fp16.h> #include "GPUReconstructionCUDADef.h" #include "GPUReconstructionCUDA.h" #include "GPUReconstructionCUDAInternals.h" #include "GPUParamRTC.h" #include "GPUDefMacros.h" #include <unistd.h> #ifdef GPUCA_HAVE_O2HEADERS #include "Framework/SHA1.h" #endif using namespace GPUCA_NAMESPACE::gpu; #ifndef GPUCA_ALIROOT_LIB #include "utils/qGetLdBinarySymbols.h" QGET_LD_BINARY_SYMBOLS(GPUReconstructionCUDArtc_src); QGET_LD_BINARY_SYMBOLS(GPUReconstructionCUDArtc_command); #endif int GPUReconstructionCUDA::genRTC() { #ifndef GPUCA_ALIROOT_LIB std::string rtcparam = GPUParamRTC::generateRTCCode(param(), mProcessingSettings.rtc.optConstexpr); std::string filename = "/tmp/o2cagpu_rtc_"; filename += std::to_string(getpid()); filename += "_"; filename += std::to_string(rand()); std::vector<std::string> kernels; std::string kernelsall; #undef GPUCA_KRNL_REG #define GPUCA_KRNL_REG(args) __launch_bounds__(GPUCA_M_MAX2_3(GPUCA_M_STRIP(args))) #define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward) GPUCA_KRNL_WRAP(GPUCA_KRNL_LOAD_, x_class, x_attributes, x_arguments, x_forward) #define GPUCA_KRNL_LOAD_single(x_class, x_attributes, x_arguments, x_forward) kernels.emplace_back(GPUCA_M_STR(GPUCA_KRNLGPU_SINGLE(x_class, x_attributes, x_arguments, x_forward))); #define GPUCA_KRNL_LOAD_multi(x_class, x_attributes, x_arguments, x_forward) kernels.emplace_back(GPUCA_M_STR(GPUCA_KRNLGPU_MULTI(x_class, x_attributes, x_arguments, x_forward))); #include "GPUReconstructionKernels.h" #undef GPUCA_KRNL #undef GPUCA_KRNL_LOAD_single #undef GPUCA_KRNL_LOAD_multi for (unsigned int i = 0; i < kernels.size(); i++) { kernelsall += kernels[i]; } #ifdef GPUCA_HAVE_O2HEADERS char shasource[21], shaparam[21], shacmd[21], shakernels[21]; if (mProcessingSettings.rtc.cacheOutput) { o2::framework::internal::SHA1(shasource, _binary_GPUReconstructionCUDArtc_src_start, _binary_GPUReconstructionCUDArtc_src_len); o2::framework::internal::SHA1(shaparam, rtcparam.c_str(), rtcparam.size()); o2::framework::internal::SHA1(shacmd, _binary_GPUReconstructionCUDArtc_command_start, _binary_GPUReconstructionCUDArtc_command_len); o2::framework::internal::SHA1(shakernels, kernelsall.c_str(), kernelsall.size()); } #endif unsigned int nCompile = mProcessingSettings.rtc.compilePerKernel ? kernels.size() : 1; bool cacheLoaded = false; if (mProcessingSettings.rtc.cacheOutput) { #ifndef GPUCA_HAVE_O2HEADERS throw std::runtime_error("Cannot use RTC cache without O2 headers"); #else FILE* fp = fopen("rtc.cuda.cache", "rb"); char sharead[20]; if (fp) { size_t len; while (true) { if (fread(sharead, 1, 20, fp) != 20) { throw std::runtime_error("Cache file corrupt"); } if (memcmp(sharead, shasource, 20)) { GPUInfo("Cache file content outdated (source)"); break; } if (fread(sharead, 1, 20, fp) != 20) { throw std::runtime_error("Cache file corrupt"); } if (memcmp(sharead, shaparam, 20)) { GPUInfo("Cache file content outdated (param)"); break; } if (fread(sharead, 1, 20, fp) != 20) { throw std::runtime_error("Cache file corrupt"); } if (memcmp(sharead, shacmd, 20)) { GPUInfo("Cache file content outdated (commandline)"); break; } if (fread(sharead, 1, 20, fp) != 20) { throw std::runtime_error("Cache file corrupt"); } if (memcmp(sharead, shakernels, 20)) { GPUInfo("Cache file content outdated (kernel definitions)"); break; } GPUSettingsProcessingRTC cachedSettings; if (fread(&cachedSettings, sizeof(cachedSettings), 1, fp) != 1) { throw std::runtime_error("Cache file corrupt"); } if (memcmp(&cachedSettings, &mProcessingSettings.rtc, sizeof(cachedSettings))) { GPUInfo("Cache file content outdated (rtc parameters)"); break; } std::vector<char> buffer; for (unsigned int i = 0; i < nCompile; i++) { if (fread(&len, sizeof(len), 1, fp) != 1) { throw std::runtime_error("Cache file corrupt"); } buffer.resize(len); if (fread(buffer.data(), 1, len, fp) != len) { throw std::runtime_error("Cache file corrupt"); } FILE* fp2 = fopen((filename + "_" + std::to_string(i) + ".o").c_str(), "w+b"); if (fp2 == nullptr) { throw std::runtime_error("Cannot open tmp file"); } if (fwrite(buffer.data(), 1, len, fp2) != len) { throw std::runtime_error("Error writing file"); } fclose(fp2); } GPUInfo("Using RTC cache file"); cacheLoaded = true; break; }; fclose(fp); } #endif } if (!cacheLoaded) { if (mProcessingSettings.debugLevel >= 0) { GPUInfo("Starting CUDA RTC Compilation"); } HighResTimer rtcTimer; rtcTimer.ResetStart(); #pragma omp parallel for for (unsigned int i = 0; i < nCompile; i++) { if (mProcessingSettings.debugLevel >= 3) { printf("Compiling %s\n", (filename + "_" + std::to_string(i) + ".cu").c_str()); } FILE* fp = fopen((filename + "_" + std::to_string(i) + ".cu").c_str(), "w+b"); if (fp == nullptr) { throw std::runtime_error("Error opening file"); } std::string kernel = "extern \"C\" {"; kernel += mProcessingSettings.rtc.compilePerKernel ? kernels[i] : kernelsall; kernel += "}"; if (fwrite(rtcparam.c_str(), 1, rtcparam.size(), fp) != rtcparam.size() || fwrite(_binary_GPUReconstructionCUDArtc_src_start, 1, _binary_GPUReconstructionCUDArtc_src_len, fp) != _binary_GPUReconstructionCUDArtc_src_len || fwrite(kernel.c_str(), 1, kernel.size(), fp) != kernel.size()) { throw std::runtime_error("Error writing file"); } fclose(fp); std::string command = std::string(_binary_GPUReconstructionCUDArtc_command_start, _binary_GPUReconstructionCUDArtc_command_len); command += " -cubin -c " + filename + "_" + std::to_string(i) + ".cu -o " + filename + "_" + std::to_string(i) + ".o"; if (mProcessingSettings.debugLevel >= 3) { printf("Running command %s\n", command.c_str()); } if (system(command.c_str())) { throw std::runtime_error("Error during CUDA compilation"); } } if (mProcessingSettings.debugLevel >= 0) { GPUInfo("RTC Compilation finished (%f seconds)", rtcTimer.GetCurrentElapsedTime()); } if (mProcessingSettings.rtc.cacheOutput) { FILE* fp = fopen("rtc.cuda.cache", "w+b"); if (fp == nullptr) { throw std::runtime_error("Cannot open cache file for writing"); } GPUInfo("Storing RTC compilation result in cache file"); if (fwrite(shasource, 1, 20, fp) != 20 || fwrite(shaparam, 1, 20, fp) != 20 || fwrite(shacmd, 1, 20, fp) != 20 || fwrite(shakernels, 1, 20, fp) != 20 || fwrite(&mProcessingSettings.rtc, sizeof(mProcessingSettings.rtc), 1, fp) != 1) { throw std::runtime_error("Error writing cache file"); } std::vector<char> buffer; for (unsigned int i = 0; i < nCompile; i++) { FILE* fp2 = fopen((filename + "_" + std::to_string(i) + ".o").c_str(), "rb"); if (fp2 == nullptr) { throw std::runtime_error("Cannot open cuda module file"); } fseek(fp2, 0, SEEK_END); size_t size = ftell(fp2); buffer.resize(size); fseek(fp2, 0, SEEK_SET); if (fread(buffer.data(), 1, size, fp2) != size) { throw std::runtime_error("Error reading cuda module file"); } fclose(fp2); if (fwrite(&size, sizeof(size), 1, fp) != 1 || fwrite(buffer.data(), 1, size, fp) != size) { throw std::runtime_error("Error writing cache file"); } } fclose(fp); } } for (unsigned int i = 0; i < nCompile; i++) { mInternals->rtcModules.emplace_back(std::make_unique<CUmodule>()); GPUFailedMsg(cuModuleLoad(mInternals->rtcModules.back().get(), (filename + "_" + std::to_string(i) + ".o").c_str())); remove((filename + "_" + std::to_string(i) + ".cu").c_str()); remove((filename + "_" + std::to_string(i) + ".o").c_str()); } int j = 0; #define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward) GPUCA_KRNL_WRAP(GPUCA_KRNL_LOAD_, x_class, x_attributes, x_arguments, x_forward) #define GPUCA_KRNL_LOAD_single(x_class, x_attributes, x_arguments, x_forward) \ mInternals->getRTCkernelNum<false, GPUCA_M_KRNL_TEMPLATE(x_class)>(mInternals->rtcFunctions.size()); \ mInternals->rtcFunctions.emplace_back(new CUfunction); \ GPUFailedMsg(cuModuleGetFunction(mInternals->rtcFunctions.back().get(), *mInternals->rtcModules[mProcessingSettings.rtc.compilePerKernel ? j++ : 0], GPUCA_M_STR(GPUCA_M_CAT(krnl_, GPUCA_M_KRNL_NAME(x_class))))); #define GPUCA_KRNL_LOAD_multi(x_class, x_attributes, x_arguments, x_forward) \ mInternals->getRTCkernelNum<true, GPUCA_M_KRNL_TEMPLATE(x_class)>(mInternals->rtcFunctions.size()); \ mInternals->rtcFunctions.emplace_back(new CUfunction); \ GPUFailedMsg(cuModuleGetFunction(mInternals->rtcFunctions.back().get(), *mInternals->rtcModules[mProcessingSettings.rtc.compilePerKernel ? j++ : 0], GPUCA_M_STR(GPUCA_M_CAT3(krnl_, GPUCA_M_KRNL_NAME(x_class), _multi)))); #include "GPUReconstructionKernels.h" #undef GPUCA_KRNL #undef GPUCA_KRNL_LOAD_single #undef GPUCA_KRNL_LOAD_multi #endif return 0; }
428bf91bfa69b9291cf133bf857023555d4a901b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "./rotate.cuh" #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" namespace megdnn { namespace cuda { static const int BX = 8; static const int BY = 8; namespace { #define rep(i, n) for (size_t i = 0; i < (n); ++i) template <typename T, bool clockwise, size_t IC> __global__ void rotate_kern( const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t istride0, size_t istride1, size_t istride2, size_t OH, size_t OW, size_t ostride0, size_t ostride1, size_t ostride2) { int iw = blockIdx.x * blockDim.x + threadIdx.x; int ih = blockIdx.y * blockDim.y + threadIdx.y; if (iw < IW && ih < IH) { int ow = clockwise ? IH - ih - 1 : ih; int oh = clockwise ? iw : IW - iw - 1; #pragma unroll rep(c, IC) { dst[blockIdx.z * ostride0 + oh * ostride1 + ow * ostride2 + c] = src[blockIdx.z * istride0 + ih * istride1 + iw * istride2 + c]; } } } #undef rep } // anonymous namespace namespace rotate { template <typename T, bool clockwise> void rotate( const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH, size_t istride0, size_t istride1, size_t istride2, size_t OH, size_t OW, size_t ostride0, size_t ostride1, size_t ostride2, hipStream_t stream) { dim3 threads(BX, BY); dim3 blocks(DIVUP(IW, BX), DIVUP(IH, BY), N); megdnn_assert(CH == 1 || CH == 3); if (CH == 1) hipLaunchKernelGGL(( rotate_kern<T, clockwise, 1>), dim3(blocks), dim3(threads), 0, stream, src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0, ostride1, ostride2); else hipLaunchKernelGGL(( rotate_kern<T, clockwise, 3>), dim3(blocks), dim3(threads), 0, stream, src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0, ostride1, ostride2); after_kernel_launch(); } #define INST(T, clockwise) \ template void rotate<T, clockwise>( \ const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH, \ size_t istride0, size_t istride1, size_t istride2, size_t OH, size_t OW, \ size_t ostride0, size_t ostride1, size_t ostride2, hipStream_t stream); #define cb(DType) \ INST(typename DTypeTrait<DType>::ctype, true) \ INST(typename DTypeTrait<DType>::ctype, false) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) #undef cb #undef INST } // namespace rotate } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
428bf91bfa69b9291cf133bf857023555d4a901b.cu
#include "./rotate.cuh" #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" namespace megdnn { namespace cuda { static const int BX = 8; static const int BY = 8; namespace { #define rep(i, n) for (size_t i = 0; i < (n); ++i) template <typename T, bool clockwise, size_t IC> __global__ void rotate_kern( const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t istride0, size_t istride1, size_t istride2, size_t OH, size_t OW, size_t ostride0, size_t ostride1, size_t ostride2) { int iw = blockIdx.x * blockDim.x + threadIdx.x; int ih = blockIdx.y * blockDim.y + threadIdx.y; if (iw < IW && ih < IH) { int ow = clockwise ? IH - ih - 1 : ih; int oh = clockwise ? iw : IW - iw - 1; #pragma unroll rep(c, IC) { dst[blockIdx.z * ostride0 + oh * ostride1 + ow * ostride2 + c] = src[blockIdx.z * istride0 + ih * istride1 + iw * istride2 + c]; } } } #undef rep } // anonymous namespace namespace rotate { template <typename T, bool clockwise> void rotate( const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH, size_t istride0, size_t istride1, size_t istride2, size_t OH, size_t OW, size_t ostride0, size_t ostride1, size_t ostride2, cudaStream_t stream) { dim3 threads(BX, BY); dim3 blocks(DIVUP(IW, BX), DIVUP(IH, BY), N); megdnn_assert(CH == 1 || CH == 3); if (CH == 1) rotate_kern<T, clockwise, 1><<<blocks, threads, 0, stream>>>( src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0, ostride1, ostride2); else rotate_kern<T, clockwise, 3><<<blocks, threads, 0, stream>>>( src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0, ostride1, ostride2); after_kernel_launch(); } #define INST(T, clockwise) \ template void rotate<T, clockwise>( \ const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH, \ size_t istride0, size_t istride1, size_t istride2, size_t OH, size_t OW, \ size_t ostride0, size_t ostride1, size_t ostride2, cudaStream_t stream); #define cb(DType) \ INST(typename DTypeTrait<DType>::ctype, true) \ INST(typename DTypeTrait<DType>::ctype, false) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) #undef cb #undef INST } // namespace rotate } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
fd70ef8deffb6a17f1fe547a8df468d07d0987a2.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <linalg/eig.cuh> #include <random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename T> struct EigInputs { T tolerance; int len; int n_row; int n_col; unsigned long long int seed; int n; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const EigInputs<T> &dims) { return os; } template <typename T> class EigTest : public ::testing::TestWithParam<EigInputs<T>> { protected: void SetUp() override { CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH)); CUDA_CHECK(hipStreamCreate(&stream)); std::shared_ptr<deviceAllocator> allocator( new raft::mr::device::default_allocator); params = ::testing::TestWithParam<EigInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.len; raft::allocate(cov_matrix, len); T cov_matrix_h[] = {1.0, 0.9, 0.81, 0.729, 0.9, 1.0, 0.9, 0.81, 0.81, 0.9, 1.0, 0.9, 0.729, 0.81, 0.9, 1.0}; ASSERT(len == 16, "This test only works with 4x4 matrices!"); raft::update_device(cov_matrix, cov_matrix_h, len, stream); raft::allocate(eig_vectors, len); raft::allocate(eig_vals, params.n_col); raft::allocate(eig_vectors_jacobi, len); raft::allocate(eig_vals_jacobi, params.n_col); T eig_vectors_ref_h[] = {0.2790, -0.6498, 0.6498, -0.2789, -0.5123, 0.4874, 0.4874, -0.5123, 0.6498, 0.2789, -0.2789, -0.6498, 0.4874, 0.5123, 0.5123, 0.4874}; T eig_vals_ref_h[] = {0.0614, 0.1024, 0.3096, 3.5266}; raft::allocate(eig_vectors_ref, len); raft::allocate(eig_vals_ref, params.n_col); raft::update_device(eig_vectors_ref, eig_vectors_ref_h, len, stream); raft::update_device(eig_vals_ref, eig_vals_ref_h, params.n_col, stream); eigDC(cov_matrix, params.n_row, params.n_col, eig_vectors, eig_vals, cusolverH, stream, allocator); T tol = 1.e-7; int sweeps = 15; eigJacobi(cov_matrix, params.n_row, params.n_col, eig_vectors_jacobi, eig_vals_jacobi, cusolverH, stream, allocator, tol, sweeps); // test code for comparing two methods len = params.n * params.n; raft::allocate(cov_matrix_large, len); raft::allocate(eig_vectors_large, len); raft::allocate(eig_vectors_jacobi_large, len); raft::allocate(eig_vals_large, params.n); raft::allocate(eig_vals_jacobi_large, params.n); r.uniform(cov_matrix_large, len, T(-1.0), T(1.0), stream); eigDC(cov_matrix_large, params.n, params.n, eig_vectors_large, eig_vals_large, cusolverH, stream, allocator); eigJacobi(cov_matrix_large, params.n, params.n, eig_vectors_jacobi_large, eig_vals_jacobi_large, cusolverH, stream, allocator, tol, sweeps); } void TearDown() override { CUDA_CHECK(hipFree(cov_matrix)); CUDA_CHECK(hipFree(eig_vectors)); CUDA_CHECK(hipFree(eig_vectors_jacobi)); CUDA_CHECK(hipFree(eig_vals)); CUDA_CHECK(hipFree(eig_vals_jacobi)); CUDA_CHECK(hipFree(eig_vectors_ref)); CUDA_CHECK(hipFree(eig_vals_ref)); CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH)); CUDA_CHECK(hipStreamDestroy(stream)); } protected: EigInputs<T> params; T *cov_matrix, *eig_vectors, *eig_vectors_jacobi, *eig_vectors_ref, *eig_vals, *eig_vals_jacobi, *eig_vals_ref; T *cov_matrix_large, *eig_vectors_large, *eig_vectors_jacobi_large, *eig_vals_large, *eig_vals_jacobi_large; hipsolverDnHandle_t cusolverH = NULL; hipStream_t stream; }; const std::vector<EigInputs<float>> inputsf2 = { {0.001f, 4 * 4, 4, 4, 1234ULL, 256}}; const std::vector<EigInputs<double>> inputsd2 = { {0.001, 4 * 4, 4, 4, 1234ULL, 256}}; typedef EigTest<float> EigTestValF; TEST_P(EigTestValF, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vals_ref, eig_vals, params.n_col, raft::CompareApproxAbs<float>(params.tolerance))); } typedef EigTest<double> EigTestValD; TEST_P(EigTestValD, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vals_ref, eig_vals, params.n_col, raft::CompareApproxAbs<double>(params.tolerance))); } typedef EigTest<float> EigTestVecF; TEST_P(EigTestVecF, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vectors_ref, eig_vectors, params.len, raft::CompareApproxAbs<float>(params.tolerance))); } typedef EigTest<double> EigTestVecD; TEST_P(EigTestVecD, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vectors_ref, eig_vectors, params.len, raft::CompareApproxAbs<double>(params.tolerance))); } typedef EigTest<float> EigTestValJacobiF; TEST_P(EigTestValJacobiF, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vals_ref, eig_vals_jacobi, params.n_col, raft::CompareApproxAbs<float>(params.tolerance))); } typedef EigTest<double> EigTestValJacobiD; TEST_P(EigTestValJacobiD, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vals_ref, eig_vals_jacobi, params.n_col, raft::CompareApproxAbs<double>(params.tolerance))); } typedef EigTest<float> EigTestVecJacobiF; TEST_P(EigTestVecJacobiF, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vectors_ref, eig_vectors_jacobi, params.len, raft::CompareApproxAbs<float>(params.tolerance))); } typedef EigTest<double> EigTestVecJacobiD; TEST_P(EigTestVecJacobiD, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vectors_ref, eig_vectors_jacobi, params.len, raft::CompareApproxAbs<double>(params.tolerance))); } typedef EigTest<float> EigTestVecCompareF; TEST_P(EigTestVecCompareF, Result) { ASSERT_TRUE(raft::devArrMatch( eig_vectors_large, eig_vectors_jacobi_large, (params.n * params.n), raft::CompareApproxAbs<float>(params.tolerance))); } typedef EigTest<double> EigTestVecCompareD; TEST_P(EigTestVecCompareD, Result) { ASSERT_TRUE(raft::devArrMatch( eig_vectors_large, eig_vectors_jacobi_large, (params.n * params.n), raft::CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(EigTests, EigTestValF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestValD, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestVecF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestVecD, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestValJacobiF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestValJacobiD, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestVecJacobiF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestVecJacobiD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
fd70ef8deffb6a17f1fe547a8df468d07d0987a2.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <linalg/eig.cuh> #include <random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename T> struct EigInputs { T tolerance; int len; int n_row; int n_col; unsigned long long int seed; int n; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const EigInputs<T> &dims) { return os; } template <typename T> class EigTest : public ::testing::TestWithParam<EigInputs<T>> { protected: void SetUp() override { CUSOLVER_CHECK(cusolverDnCreate(&cusolverH)); CUDA_CHECK(cudaStreamCreate(&stream)); std::shared_ptr<deviceAllocator> allocator( new raft::mr::device::default_allocator); params = ::testing::TestWithParam<EigInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.len; raft::allocate(cov_matrix, len); T cov_matrix_h[] = {1.0, 0.9, 0.81, 0.729, 0.9, 1.0, 0.9, 0.81, 0.81, 0.9, 1.0, 0.9, 0.729, 0.81, 0.9, 1.0}; ASSERT(len == 16, "This test only works with 4x4 matrices!"); raft::update_device(cov_matrix, cov_matrix_h, len, stream); raft::allocate(eig_vectors, len); raft::allocate(eig_vals, params.n_col); raft::allocate(eig_vectors_jacobi, len); raft::allocate(eig_vals_jacobi, params.n_col); T eig_vectors_ref_h[] = {0.2790, -0.6498, 0.6498, -0.2789, -0.5123, 0.4874, 0.4874, -0.5123, 0.6498, 0.2789, -0.2789, -0.6498, 0.4874, 0.5123, 0.5123, 0.4874}; T eig_vals_ref_h[] = {0.0614, 0.1024, 0.3096, 3.5266}; raft::allocate(eig_vectors_ref, len); raft::allocate(eig_vals_ref, params.n_col); raft::update_device(eig_vectors_ref, eig_vectors_ref_h, len, stream); raft::update_device(eig_vals_ref, eig_vals_ref_h, params.n_col, stream); eigDC(cov_matrix, params.n_row, params.n_col, eig_vectors, eig_vals, cusolverH, stream, allocator); T tol = 1.e-7; int sweeps = 15; eigJacobi(cov_matrix, params.n_row, params.n_col, eig_vectors_jacobi, eig_vals_jacobi, cusolverH, stream, allocator, tol, sweeps); // test code for comparing two methods len = params.n * params.n; raft::allocate(cov_matrix_large, len); raft::allocate(eig_vectors_large, len); raft::allocate(eig_vectors_jacobi_large, len); raft::allocate(eig_vals_large, params.n); raft::allocate(eig_vals_jacobi_large, params.n); r.uniform(cov_matrix_large, len, T(-1.0), T(1.0), stream); eigDC(cov_matrix_large, params.n, params.n, eig_vectors_large, eig_vals_large, cusolverH, stream, allocator); eigJacobi(cov_matrix_large, params.n, params.n, eig_vectors_jacobi_large, eig_vals_jacobi_large, cusolverH, stream, allocator, tol, sweeps); } void TearDown() override { CUDA_CHECK(cudaFree(cov_matrix)); CUDA_CHECK(cudaFree(eig_vectors)); CUDA_CHECK(cudaFree(eig_vectors_jacobi)); CUDA_CHECK(cudaFree(eig_vals)); CUDA_CHECK(cudaFree(eig_vals_jacobi)); CUDA_CHECK(cudaFree(eig_vectors_ref)); CUDA_CHECK(cudaFree(eig_vals_ref)); CUSOLVER_CHECK(cusolverDnDestroy(cusolverH)); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: EigInputs<T> params; T *cov_matrix, *eig_vectors, *eig_vectors_jacobi, *eig_vectors_ref, *eig_vals, *eig_vals_jacobi, *eig_vals_ref; T *cov_matrix_large, *eig_vectors_large, *eig_vectors_jacobi_large, *eig_vals_large, *eig_vals_jacobi_large; cusolverDnHandle_t cusolverH = NULL; cudaStream_t stream; }; const std::vector<EigInputs<float>> inputsf2 = { {0.001f, 4 * 4, 4, 4, 1234ULL, 256}}; const std::vector<EigInputs<double>> inputsd2 = { {0.001, 4 * 4, 4, 4, 1234ULL, 256}}; typedef EigTest<float> EigTestValF; TEST_P(EigTestValF, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vals_ref, eig_vals, params.n_col, raft::CompareApproxAbs<float>(params.tolerance))); } typedef EigTest<double> EigTestValD; TEST_P(EigTestValD, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vals_ref, eig_vals, params.n_col, raft::CompareApproxAbs<double>(params.tolerance))); } typedef EigTest<float> EigTestVecF; TEST_P(EigTestVecF, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vectors_ref, eig_vectors, params.len, raft::CompareApproxAbs<float>(params.tolerance))); } typedef EigTest<double> EigTestVecD; TEST_P(EigTestVecD, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vectors_ref, eig_vectors, params.len, raft::CompareApproxAbs<double>(params.tolerance))); } typedef EigTest<float> EigTestValJacobiF; TEST_P(EigTestValJacobiF, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vals_ref, eig_vals_jacobi, params.n_col, raft::CompareApproxAbs<float>(params.tolerance))); } typedef EigTest<double> EigTestValJacobiD; TEST_P(EigTestValJacobiD, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vals_ref, eig_vals_jacobi, params.n_col, raft::CompareApproxAbs<double>(params.tolerance))); } typedef EigTest<float> EigTestVecJacobiF; TEST_P(EigTestVecJacobiF, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vectors_ref, eig_vectors_jacobi, params.len, raft::CompareApproxAbs<float>(params.tolerance))); } typedef EigTest<double> EigTestVecJacobiD; TEST_P(EigTestVecJacobiD, Result) { ASSERT_TRUE( raft::devArrMatch(eig_vectors_ref, eig_vectors_jacobi, params.len, raft::CompareApproxAbs<double>(params.tolerance))); } typedef EigTest<float> EigTestVecCompareF; TEST_P(EigTestVecCompareF, Result) { ASSERT_TRUE(raft::devArrMatch( eig_vectors_large, eig_vectors_jacobi_large, (params.n * params.n), raft::CompareApproxAbs<float>(params.tolerance))); } typedef EigTest<double> EigTestVecCompareD; TEST_P(EigTestVecCompareD, Result) { ASSERT_TRUE(raft::devArrMatch( eig_vectors_large, eig_vectors_jacobi_large, (params.n * params.n), raft::CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(EigTests, EigTestValF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestValD, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestVecF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestVecD, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestValJacobiF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestValJacobiD, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestVecJacobiF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(EigTests, EigTestVecJacobiD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
00153e0c1b63284e9e2f54b2a92ee304d54f10b4.hip
// !!! This is a file automatically generated by hipify!!! #include "../include/imageMatrix.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> //ImMatG function definition ImMatG::ImMatG(){ rows = 0; cols = 0; } ImMatG::ImMatG(size_t rows, size_t cols, double * data, bool onDeviceMemory){ this->rows = rows; this->cols = cols; if (onDeviceMemory){ this->data_d = data; } else{ hipMalloc(&(this->data_d), rows*cols*sizeof(double)); hipError_t cuerror = hipMemcpy(this->data_d, data, rows*cols*sizeof(double), hipMemcpyHostToDevice); } } ImMatG::ImMatG(size_t rows, size_t cols){ this->rows = rows; this->cols = cols; hipMalloc(&(this->data_d), rows*cols*sizeof(double)); } ImMatG::~ImMatG(){ hipFree((this->data_d)); } size_t ImMatG::getLength(void){ return rows*cols; } // GPU KERNELS __global__ void transposeKernel(const double *input, double *output, int height, int width){ extern __shared__ double temp[]; int xIndex = blockIdx.x*blockDim.x + threadIdx.x; int yIndex = blockIdx.y*blockDim.y + threadIdx.y; if ((xIndex < width) && (yIndex < height)){ int id_in = yIndex*width + xIndex; temp[threadIdx.x+threadIdx.y*(blockDim.x)] = input[id_in]; } __syncthreads(); int tempXIndex = xIndex; xIndex = yIndex; yIndex = tempXIndex; if ((xIndex < height) && (yIndex < width)){ int id_out = xIndex+yIndex*height; output[id_out] = temp[threadIdx.x+threadIdx.y*(blockDim.x)]; } } ImMatG* ImMatG::transpose(){ ImMatG *result= new ImMatG(cols, rows); int numThreads = 16; int blocksX = ceil(((float)cols) / numThreads); int blocksY = ceil(((float)rows) / numThreads); hipLaunchKernelGGL(( transposeKernel), dim3(dim3(blocksX, blocksY, 1)), dim3(dim3(numThreads, numThreads, 1)), (numThreads)*(numThreads)*sizeof(double), 0, data_d, result->data_d, rows, cols); return result; } __global__ void fillRowKernel(double *data, size_t cols, size_t row, double value){ int Xidx = threadIdx.x + blockIdx.x*blockDim.x; if (Xidx < cols){ data[Xidx + row*cols] = value; } } void ImMatG::fillRow(int row, double value){ if ((row >= this->rows) || (row < 0)){ std::cout << "Index doesn't agree with image size" << std::endl; return; } int threadNum = 128; fillRowKernel << <dim3(ceil((double)cols / threadNum), 1, 1), dim3(threadNum, 1, 1) >> >(data_d, cols, row, value); } // creates im mat object from csv file // parameter: // filename - filename of the files // returns: image matrix allocated on gpu ImMatG* readCSV(std::string fileName){ std::ifstream fileStream(fileName.c_str()); std::string line; double val; std::vector<double> values; int rows = 0, cols = 0; while (getline(fileStream, line)){ std::stringstream ss(line); cols = 0; while (ss >> val){ values.push_back(val); cols++; if (ss.peek() == ','){ ss.ignore(); } } rows++; } ImMatG * result = new ImMatG(rows, cols, values.data(), false); return result; } double *ImMatG::getData(){ double * data = new double[getLength()]; hipMemcpy(data, data_d, sizeof(double)*getLength(), hipMemcpyDeviceToHost); hipDeviceSynchronize(); return data; } __global__ void getColumnKernel(double *image, size_t rows, size_t cols, double *column){ int xIdx = threadIdx.x + blockIdx.x*blockDim.x; int yIdx = threadIdx.y + blockIdx.y*blockDim.y; if ((xIdx > cols) && (yIdx < cols)){ } }
00153e0c1b63284e9e2f54b2a92ee304d54f10b4.cu
#include "../include/imageMatrix.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> //ImMatG function definition ImMatG::ImMatG(){ rows = 0; cols = 0; } ImMatG::ImMatG(size_t rows, size_t cols, double * data, bool onDeviceMemory){ this->rows = rows; this->cols = cols; if (onDeviceMemory){ this->data_d = data; } else{ cudaMalloc(&(this->data_d), rows*cols*sizeof(double)); cudaError_t cuerror = cudaMemcpy(this->data_d, data, rows*cols*sizeof(double), cudaMemcpyHostToDevice); } } ImMatG::ImMatG(size_t rows, size_t cols){ this->rows = rows; this->cols = cols; cudaMalloc(&(this->data_d), rows*cols*sizeof(double)); } ImMatG::~ImMatG(){ cudaFree((this->data_d)); } size_t ImMatG::getLength(void){ return rows*cols; } // GPU KERNELS __global__ void transposeKernel(const double *input, double *output, int height, int width){ extern __shared__ double temp[]; int xIndex = blockIdx.x*blockDim.x + threadIdx.x; int yIndex = blockIdx.y*blockDim.y + threadIdx.y; if ((xIndex < width) && (yIndex < height)){ int id_in = yIndex*width + xIndex; temp[threadIdx.x+threadIdx.y*(blockDim.x)] = input[id_in]; } __syncthreads(); int tempXIndex = xIndex; xIndex = yIndex; yIndex = tempXIndex; if ((xIndex < height) && (yIndex < width)){ int id_out = xIndex+yIndex*height; output[id_out] = temp[threadIdx.x+threadIdx.y*(blockDim.x)]; } } ImMatG* ImMatG::transpose(){ ImMatG *result= new ImMatG(cols, rows); int numThreads = 16; int blocksX = ceil(((float)cols) / numThreads); int blocksY = ceil(((float)rows) / numThreads); transposeKernel<<<dim3(blocksX, blocksY, 1), dim3(numThreads, numThreads, 1), (numThreads)*(numThreads)*sizeof(double)>>>(data_d, result->data_d, rows, cols); return result; } __global__ void fillRowKernel(double *data, size_t cols, size_t row, double value){ int Xidx = threadIdx.x + blockIdx.x*blockDim.x; if (Xidx < cols){ data[Xidx + row*cols] = value; } } void ImMatG::fillRow(int row, double value){ if ((row >= this->rows) || (row < 0)){ std::cout << "Index doesn't agree with image size" << std::endl; return; } int threadNum = 128; fillRowKernel << <dim3(ceil((double)cols / threadNum), 1, 1), dim3(threadNum, 1, 1) >> >(data_d, cols, row, value); } // creates im mat object from csv file // parameter: // filename - filename of the files // returns: image matrix allocated on gpu ImMatG* readCSV(std::string fileName){ std::ifstream fileStream(fileName.c_str()); std::string line; double val; std::vector<double> values; int rows = 0, cols = 0; while (getline(fileStream, line)){ std::stringstream ss(line); cols = 0; while (ss >> val){ values.push_back(val); cols++; if (ss.peek() == ','){ ss.ignore(); } } rows++; } ImMatG * result = new ImMatG(rows, cols, values.data(), false); return result; } double *ImMatG::getData(){ double * data = new double[getLength()]; cudaMemcpy(data, data_d, sizeof(double)*getLength(), cudaMemcpyDeviceToHost); cudaThreadSynchronize(); return data; } __global__ void getColumnKernel(double *image, size_t rows, size_t cols, double *column){ int xIdx = threadIdx.x + blockIdx.x*blockDim.x; int yIdx = threadIdx.y + blockIdx.y*blockDim.y; if ((xIdx > cols) && (yIdx < cols)){ } }
0a33270b5f4acae763cecfbe0e74d0e01503492f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include <math.h> #include <ctime> #include <cmath> #include <unistd.h> #include <stdio.h> /* we need these includes for CUDA's random number stuff */ #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define PI 3.14159265358979323846 double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets); __device__ double* two_dim_indexGPU(double* vector, int i, int j, double m, int b){ double* p; //specify index layout here p=&vector[b*(i)+(j)]; return p; } __device__ double* three_dim_indexGPU(double* matrix, int i, int j, int k, double m, int b, int num_assets){ double* p; //specify index layout here p=&matrix[i*b*num_assets+j*num_assets+k]; return p; } //this function returns transition densities between nodes __device__ double densityGPU(double Xold, double Xnew, double sigma, double r, double delta, double delta_t){ double f=0, x=0; x=(1/(sigma*sqrt(delta_t)))*(Xnew-Xold-(r-delta-0.5*sigma*sigma)*delta_t); f= (1/(sigma*sqrt(delta_t)))*(1/(sqrt(2*PI)))*exp(-0.5*x*x); return f; } //this is the payoff function for a geometric call option __device__ double GeometricPayOffCallV(double* X, double m, int b, int num_assets, double Strike){ double h; h=1; for(int l=0; l<num_assets; l++){ h*=exp(X[l]); } h=pow(h,1.0/(num_assets)); if(h-Strike>0){ h=h-Strike; } else{ h=0; } return h; } //this is the payoff function for a geometric put option __device__ double GeometricPayOffPutV(double* X, double m, int b, int num_assets, double Strike){ double h; h=1; for(int l=0; l<num_assets; l++){ h*=exp(X[l]); } h=pow(h,1.0/(num_assets)); if(Strike-h>0){ h=Strike-h; } else{ h=0; } return h; } //this function updates the weights for the suboptimal stopping routine __device__ void S_weights(double* S_Weights, double* X_device, double* S_new, int m, int b, double* sigma_device, double* delta_device, double delta_t, int num_assets, double r , int i, double* weight_denominator_device ){ double sum, w_s; for(int h=0; h<b; h++){ sum=0; w_s=1; for(int kk=0; kk<num_assets; kk++){ w_s*=densityGPU(S_new[kk], *three_dim_indexGPU(X_device, (i+1), h, kk, m, b, num_assets), sigma_device[kk], r, delta_device[kk], delta_t); } sum = *two_dim_indexGPU(weight_denominator_device, i, h, m-1, b); if(sum==0){printf("division by zero in weights function of path estimator\n");} w_s = (((double)b)*w_s)/sum; S_Weights[h]=w_s; } } //this kernel function performs the sub optimal stopping rule for the low bias estimate __global__ void PathEstimatorKernel(double* X_device, double* weight_denominator_device, double* V_device, double* delta_device, double* sigma_device, double* X0_device, int N, double strike, double r, double delta_t, int b, int m, int num_assets, hiprandState_t* states, double* results_dev, double* asset_amount_device){ int idx =blockDim.x*blockIdx.x + threadIdx.x; if(idx<N){ double v_0, S_i, Z, C, H, sum, weight; //, w_s, sum_Z; const int S_N= num_assets; const int S_W_N= b; double* S_new; S_new= new double[S_N]; double* S_Weights; S_Weights=new double[S_W_N]; int i=0; do { if(i==0){ for(int ll=0; ll<num_assets; ll++){ Z=hiprand_normal_double(&states[idx]); S_i=X0_device[ll] + (r-delta_device[ll]-0.5*pow(sigma_device[ll], 2))*delta_t + sigma_device[ll]*sqrt(delta_t)*Z; S_new[ll]=S_i; } } else{ for(int jj=0; jj<num_assets; jj++){ Z=hiprand_normal_double(&states[idx]); S_i=S_new[jj] + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z; S_new[jj]=S_i; } } if(i<m-1){ S_weights(S_Weights, X_device, S_new, m, b, sigma_device, delta_device, delta_t, num_assets, r, i, weight_denominator_device); } double con_val=0; //continuation value variable sum=0; if(i==m-1){ C=0;//continuation value at the last time step } else{ for(int k=0; k<b; k++){ weight= S_Weights[k]; con_val= *two_dim_indexGPU(V_device, (m-1-i-1), k, m, b); sum+=(weight) * (con_val); } C=(1/(double)b)*sum; //continuation value } H= GeometricPayOffCallV(S_new, m, num_assets, num_assets, strike)*exp(-r*delta_t*((i+1))); i=i+1; }while(H<C);//this will stop once H is less then the continuation value. at m-1, c=0 therefore m-1 is the max amount of loops. v_0=H; results_dev[idx]=v_0; delete[] S_new; delete[] S_Weights; } } //this function returns the low bias estimate to the main function. it also allocates memory on the device and initialises the low bias kernel. double PathEstimator(double strike, double r, double delta_t, int b, double m, double sigma[], double delta[], double X0[], double* X, double* weight_denominator, double* V, double asset_amount[], int num_assets, int Path_estimator_iterations, int iterator, int Final_iteration, hiprandState_t* States, hiprandState_t* states, int threads ){ hipError_t error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } int N= Path_estimator_iterations; double* sigma_host; sigma_host =sigma; double* delta_host; delta_host =delta; double* X0_host; X0_host =X0; double* asset_amount_host; asset_amount_host =asset_amount; int m_int=(int)m; int X_N=(m_int) * b * (num_assets); int W_N=(m_int-1) * b; int V_N=(m_int) * b; int delta_N= num_assets; int sigma_N=num_assets; int X0_N=num_assets; int asset_amount_N = num_assets; double* X_device; double* V_device; double* weight_denominator_device; double* sigma_device; double* delta_device; double* X0_device; double* asset_amount_device; error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } hipMalloc((void**) &X_device, X_N*sizeof(double) ); hipMemcpy(X_device, X, X_N*sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &V_device, V_N*sizeof(double) ); hipMemcpy(V_device, V, V_N*sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &weight_denominator_device, W_N*sizeof(double) ); hipMemcpy(weight_denominator_device, weight_denominator, W_N*sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &X0_device, X0_N*sizeof(double) ); hipMemcpy(X0_device, X0_host, X0_N*sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &sigma_device, sigma_N*sizeof(double) ); hipMemcpy(sigma_device, sigma_host, sigma_N*sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &delta_device, delta_N*sizeof(double) ); hipMemcpy(delta_device, delta_host, delta_N*sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &asset_amount_device, asset_amount_N*sizeof(double) ); hipMemcpy(asset_amount_device, asset_amount_host, asset_amount_N*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(states, States, threads*sizeof(hiprandState_t*), hipMemcpyHostToDevice); dim3 gridDim((int)ceil(N/512.0)); dim3 blockDim(512.0); error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } double* results; results = new double[N]; double* results_dev; hipMalloc((void**) &results_dev, N*sizeof(double) ); error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } hipLaunchKernelGGL(( PathEstimatorKernel), dim3(gridDim), dim3(blockDim), 0, 0, X_device, weight_denominator_device, V_device, delta_device, sigma_device, X0_device, N, strike, r, delta_t, b, m_int, num_assets, states, results_dev, asset_amount_device); hipDeviceSynchronize(); error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } hipMemcpy(results, results_dev, sizeof(double)*N, hipMemcpyDeviceToHost); error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("Found at line %d\n", __LINE__); exit(1); } hipMemcpy(States, states, sizeof(hiprandState_t)*threads, hipMemcpyDeviceToHost); error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("Found at line %d\n", __LINE__); exit(1); } double result=0; for(int f=0; f<Path_estimator_iterations; f++){ result+=results[f]; } result=(1/double(N))*result; delete[] results; error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } hipFree(X_device); hipFree(V_device); hipFree(weight_denominator_device); hipFree(sigma_device); hipFree(delta_device); hipFree(X0_device); hipFree(results_dev); hipFree(asset_amount_device); if(iterator==Final_iteration-1){ hipFree(states); } return result; }
0a33270b5f4acae763cecfbe0e74d0e01503492f.cu
#include <cuda.h> #include <iostream> #include <math.h> #include <ctime> #include <cmath> #include <unistd.h> #include <stdio.h> /* we need these includes for CUDA's random number stuff */ #include <curand.h> #include <curand_kernel.h> #define PI 3.14159265358979323846 double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets); __device__ double* two_dim_indexGPU(double* vector, int i, int j, double m, int b){ double* p; //specify index layout here p=&vector[b*(i)+(j)]; return p; } __device__ double* three_dim_indexGPU(double* matrix, int i, int j, int k, double m, int b, int num_assets){ double* p; //specify index layout here p=&matrix[i*b*num_assets+j*num_assets+k]; return p; } //this function returns transition densities between nodes __device__ double densityGPU(double Xold, double Xnew, double sigma, double r, double delta, double delta_t){ double f=0, x=0; x=(1/(sigma*sqrt(delta_t)))*(Xnew-Xold-(r-delta-0.5*sigma*sigma)*delta_t); f= (1/(sigma*sqrt(delta_t)))*(1/(sqrt(2*PI)))*exp(-0.5*x*x); return f; } //this is the payoff function for a geometric call option __device__ double GeometricPayOffCallV(double* X, double m, int b, int num_assets, double Strike){ double h; h=1; for(int l=0; l<num_assets; l++){ h*=exp(X[l]); } h=pow(h,1.0/(num_assets)); if(h-Strike>0){ h=h-Strike; } else{ h=0; } return h; } //this is the payoff function for a geometric put option __device__ double GeometricPayOffPutV(double* X, double m, int b, int num_assets, double Strike){ double h; h=1; for(int l=0; l<num_assets; l++){ h*=exp(X[l]); } h=pow(h,1.0/(num_assets)); if(Strike-h>0){ h=Strike-h; } else{ h=0; } return h; } //this function updates the weights for the suboptimal stopping routine __device__ void S_weights(double* S_Weights, double* X_device, double* S_new, int m, int b, double* sigma_device, double* delta_device, double delta_t, int num_assets, double r , int i, double* weight_denominator_device ){ double sum, w_s; for(int h=0; h<b; h++){ sum=0; w_s=1; for(int kk=0; kk<num_assets; kk++){ w_s*=densityGPU(S_new[kk], *three_dim_indexGPU(X_device, (i+1), h, kk, m, b, num_assets), sigma_device[kk], r, delta_device[kk], delta_t); } sum = *two_dim_indexGPU(weight_denominator_device, i, h, m-1, b); if(sum==0){printf("division by zero in weights function of path estimator\n");} w_s = (((double)b)*w_s)/sum; S_Weights[h]=w_s; } } //this kernel function performs the sub optimal stopping rule for the low bias estimate __global__ void PathEstimatorKernel(double* X_device, double* weight_denominator_device, double* V_device, double* delta_device, double* sigma_device, double* X0_device, int N, double strike, double r, double delta_t, int b, int m, int num_assets, curandState_t* states, double* results_dev, double* asset_amount_device){ int idx =blockDim.x*blockIdx.x + threadIdx.x; if(idx<N){ double v_0, S_i, Z, C, H, sum, weight; //, w_s, sum_Z; const int S_N= num_assets; const int S_W_N= b; double* S_new; S_new= new double[S_N]; double* S_Weights; S_Weights=new double[S_W_N]; int i=0; do { if(i==0){ for(int ll=0; ll<num_assets; ll++){ Z=curand_normal_double(&states[idx]); S_i=X0_device[ll] + (r-delta_device[ll]-0.5*pow(sigma_device[ll], 2))*delta_t + sigma_device[ll]*sqrt(delta_t)*Z; S_new[ll]=S_i; } } else{ for(int jj=0; jj<num_assets; jj++){ Z=curand_normal_double(&states[idx]); S_i=S_new[jj] + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z; S_new[jj]=S_i; } } if(i<m-1){ S_weights(S_Weights, X_device, S_new, m, b, sigma_device, delta_device, delta_t, num_assets, r, i, weight_denominator_device); } double con_val=0; //continuation value variable sum=0; if(i==m-1){ C=0;//continuation value at the last time step } else{ for(int k=0; k<b; k++){ weight= S_Weights[k]; con_val= *two_dim_indexGPU(V_device, (m-1-i-1), k, m, b); sum+=(weight) * (con_val); } C=(1/(double)b)*sum; //continuation value } H= GeometricPayOffCallV(S_new, m, num_assets, num_assets, strike)*exp(-r*delta_t*((i+1))); i=i+1; }while(H<C);//this will stop once H is less then the continuation value. at m-1, c=0 therefore m-1 is the max amount of loops. v_0=H; results_dev[idx]=v_0; delete[] S_new; delete[] S_Weights; } } //this function returns the low bias estimate to the main function. it also allocates memory on the device and initialises the low bias kernel. double PathEstimator(double strike, double r, double delta_t, int b, double m, double sigma[], double delta[], double X0[], double* X, double* weight_denominator, double* V, double asset_amount[], int num_assets, int Path_estimator_iterations, int iterator, int Final_iteration, curandState_t* States, curandState_t* states, int threads ){ cudaError_t error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } int N= Path_estimator_iterations; double* sigma_host; sigma_host =sigma; double* delta_host; delta_host =delta; double* X0_host; X0_host =X0; double* asset_amount_host; asset_amount_host =asset_amount; int m_int=(int)m; int X_N=(m_int) * b * (num_assets); int W_N=(m_int-1) * b; int V_N=(m_int) * b; int delta_N= num_assets; int sigma_N=num_assets; int X0_N=num_assets; int asset_amount_N = num_assets; double* X_device; double* V_device; double* weight_denominator_device; double* sigma_device; double* delta_device; double* X0_device; double* asset_amount_device; error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } cudaMalloc((void**) &X_device, X_N*sizeof(double) ); cudaMemcpy(X_device, X, X_N*sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &V_device, V_N*sizeof(double) ); cudaMemcpy(V_device, V, V_N*sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &weight_denominator_device, W_N*sizeof(double) ); cudaMemcpy(weight_denominator_device, weight_denominator, W_N*sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &X0_device, X0_N*sizeof(double) ); cudaMemcpy(X0_device, X0_host, X0_N*sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &sigma_device, sigma_N*sizeof(double) ); cudaMemcpy(sigma_device, sigma_host, sigma_N*sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &delta_device, delta_N*sizeof(double) ); cudaMemcpy(delta_device, delta_host, delta_N*sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &asset_amount_device, asset_amount_N*sizeof(double) ); cudaMemcpy(asset_amount_device, asset_amount_host, asset_amount_N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(states, States, threads*sizeof(curandState_t*), cudaMemcpyHostToDevice); dim3 gridDim((int)ceil(N/512.0)); dim3 blockDim(512.0); error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } double* results; results = new double[N]; double* results_dev; cudaMalloc((void**) &results_dev, N*sizeof(double) ); error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } PathEstimatorKernel<<<gridDim, blockDim>>>(X_device, weight_denominator_device, V_device, delta_device, sigma_device, X0_device, N, strike, r, delta_t, b, m_int, num_assets, states, results_dev, asset_amount_device); cudaDeviceSynchronize(); error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } cudaMemcpy(results, results_dev, sizeof(double)*N, cudaMemcpyDeviceToHost); error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("Found at line %d\n", __LINE__); exit(1); } cudaMemcpy(States, states, sizeof(curandState_t)*threads, cudaMemcpyDeviceToHost); error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("Found at line %d\n", __LINE__); exit(1); } double result=0; for(int f=0; f<Path_estimator_iterations; f++){ result+=results[f]; } result=(1/double(N))*result; delete[] results; error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } cudaFree(X_device); cudaFree(V_device); cudaFree(weight_denominator_device); cudaFree(sigma_device); cudaFree(delta_device); cudaFree(X0_device); cudaFree(results_dev); cudaFree(asset_amount_device); if(iterator==Final_iteration-1){ cudaFree(states); } return result; }
cc9e8d4b7458f5b17c03507faa12ab2f40880a4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <primitiv/config.h> #include <primitiv/devices/cuda/device.h> #include <primitiv/devices/cuda/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace { CUDADEV_KERNEL_FW_X_CONST(multiply_const, px[i] * k); CUDADEV_KERNEL_BW_X_CONST(multiply_const, k * pgy[i]); CUDADEV_KERNEL_FW_X_SCALAR_R(multiply_scalar, ::__fmul_rn); CUDADEV_KERNEL_FW_AB(multiply, ::__fmul_rn); __global__ void multiply_bw_dev( const float *pa, const float *pb, const float *, const float *pgy, std::uint32_t size, std::uint32_t mba, std::uint32_t mbb, float *pga, float *pgb) { const std::uint32_t i = IDX; const std::uint32_t shift = blockIdx.y * size; if (i < size) { const float gy = pgy[i + shift]; const std::uint32_t a_ofs = i + mba * shift; const std::uint32_t b_ofs = i + mbb * shift; ::atomicAdd(pga + a_ofs, gy * pb[b_ofs]); ::atomicAdd(pgb + b_ofs, gy * pa[a_ofs]); } } } // namespace namespace primitiv { namespace devices { CUDADEV_FW_X_CONST(multiply_const); CUDADEV_BW_X_CONST(multiply_const); CUDADEV_FW_X_SCALAR(multiply_scalar); CUDADEV_FW_AB(multiply); CUDADEV_BW_AB(multiply); } // namespace devices } // namespace primitiv
cc9e8d4b7458f5b17c03507faa12ab2f40880a4c.cu
#include <primitiv/config.h> #include <primitiv/devices/cuda/device.h> #include <primitiv/devices/cuda/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace { CUDADEV_KERNEL_FW_X_CONST(multiply_const, px[i] * k); CUDADEV_KERNEL_BW_X_CONST(multiply_const, k * pgy[i]); CUDADEV_KERNEL_FW_X_SCALAR_R(multiply_scalar, ::__fmul_rn); CUDADEV_KERNEL_FW_AB(multiply, ::__fmul_rn); __global__ void multiply_bw_dev( const float *pa, const float *pb, const float *, const float *pgy, std::uint32_t size, std::uint32_t mba, std::uint32_t mbb, float *pga, float *pgb) { const std::uint32_t i = IDX; const std::uint32_t shift = blockIdx.y * size; if (i < size) { const float gy = pgy[i + shift]; const std::uint32_t a_ofs = i + mba * shift; const std::uint32_t b_ofs = i + mbb * shift; ::atomicAdd(pga + a_ofs, gy * pb[b_ofs]); ::atomicAdd(pgb + b_ofs, gy * pa[a_ofs]); } } } // namespace namespace primitiv { namespace devices { CUDADEV_FW_X_CONST(multiply_const); CUDADEV_BW_X_CONST(multiply_const); CUDADEV_FW_X_SCALAR(multiply_scalar); CUDADEV_FW_AB(multiply); CUDADEV_BW_AB(multiply); } // namespace devices } // namespace primitiv
d7aa4cd6ebea65955fa16085b4075d71ac31f371.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui_c.h> #include <opencv2/imgproc/imgproc.hpp> #include "caffe/layers/base_data_layer.hpp" namespace caffe { template <typename Dtype> void BasePrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } if (this->sec_data_) { top[2]->ReshapeLike(batch->data2_); caffe_copy(batch->data2_.count(), batch->data2_.gpu_data(), top[2]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); } // namespace caffe
d7aa4cd6ebea65955fa16085b4075d71ac31f371.cu
#include <vector> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui_c.h> #include <opencv2/imgproc/imgproc.hpp> #include "caffe/layers/base_data_layer.hpp" namespace caffe { template <typename Dtype> void BasePrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } if (this->sec_data_) { top[2]->ReshapeLike(batch->data2_); caffe_copy(batch->data2_.count(), batch->data2_.gpu_data(), top[2]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); } // namespace caffe
a162ed8a83da8d4988f7da4911bf2f210f162066.hip
// !!! This is a file automatically generated by hipify!!! /* ============================================================================ Name : testers.cu Author : BlazingDB Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ */ #include <algorithm> #include <iostream> #include <numeric> #include <vector> #include <ctime> #include <thread> #include <thrust/reduce.h> #include <thrust/device_vector.h> #include "BlazingCachedAllocator.cuh" template <typename T> __host__ __device__ T reciprocal(const T &x) { return ((T) 1/x); } template <typename T> class ReciprocalFunctor { public: __host__ __device__ T operator()(const T &x) { return reciprocal(x); } }; template <typename T, class OpClass> T transformAndSumCPU(std::vector<T> data, OpClass op) { std::vector<T> temp(data.size()); std::transform(data.begin(), data.end(), temp.begin(), op); return std::accumulate(temp.begin(), temp.end(), (T)0); } template <typename T, class OpClass> void transformAndSumGPU(std::vector<T> data, OpClass op, T & result) { std::clock_t begin = std::clock(); thrust::device_vector<T> temp( data.end() - data.begin()); std::clock_t end = std::clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"Seconds Allocating Slow"<<elapsed_secs<<std::endl; begin = std::clock(); thrust::copy( data.begin(), data.end(),temp.begin()); end = std::clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"Seconds Copying Slow"<<elapsed_secs<<std::endl; begin = std::clock(); thrust::transform(temp.begin(), temp.end(), temp.begin(), op); end = std::clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"Seonds transforming Slow"<<elapsed_secs<<std::endl; begin = std::clock(); result = thrust::reduce(temp.begin(), temp.end()); end = std::clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"Seonds Reducing Slow"<<elapsed_secs<<std::endl; } template <typename T, class OpClass> void transformAndSumGPUCustomTempAlloc(std::vector<T> data, OpClass op,T & result, hipStream_t stream) { std::clock_t begin = std::clock(); thrust::device_vector<T > temp( data.end() - data.begin()); std::clock_t end = std::clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"Seconds Allocating "<<elapsed_secs<<std::endl; //begin = std::clock(); thrust::copy(thrust::hip::par.on(stream), data.begin(), data.end(),temp.begin()); // end = std::clock(); // elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; //std::cout<<"Seconds Copying "<<elapsed_secs<<std::endl; // begin = std::clock(); thrust::transform(thrust::hip::par(cachedDeviceAllocator).on(stream),temp.begin(), temp.end(), temp.begin(), op); //end = std::clock(); //elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; //std::cout<<"Seconds transforming "<<elapsed_secs<<std::endl; //begin = std::clock(); result = thrust::reduce(thrust::hip::par(cachedDeviceAllocator).on(stream),temp.begin(), temp.end()); //end = std::clock(); //elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; //std::cout<<"Seconds Reducing "<<elapsed_secs<<std::endl; } template<typename T> void initialize(std::vector<T> &data, unsigned workSize) { /* Initialize the vector */ for (unsigned i = 0; i < workSize; i++) data.push_back( ((T)0.1)*(i+1) ); } int iterations = 100; template<typename T> void doCompute(unsigned workSize) { std::vector<T> hostData; std::vector<hipStream_t> streams(iterations); for(int i = 0; i < iterations; i++){ hipStreamCreate(&streams[i]); } initialize(hostData, workSize); T cpuResults = transformAndSumCPU(hostData, ReciprocalFunctor<T>()); std::clock_t begin = std::clock(); T gpuResults; std::vector<std::thread> threads(iterations); for(int i = 0; i < iterations; i++){ gpuResults += transformAndSumGPU(hostData, ReciprocalFunctor<T>()); } std::clock_t end = std::clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; begin = std::clock(); for(int i = 0; i < iterations; i++){ threads[i] = std::thread([&]{ transformAndSumGPUCustomTempAlloc(hostData, ReciprocalFunctor<T>(),gpuResults,streams[i]); }); } for(int i = 0; i < iterations; i++){ threads[i].join(); } end = std::clock(); double elapsed_secs_custom = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"transformAndSumCPU = "<<cpuResults<<std::endl; std::cout<<"transformAndSumGPU = "<<gpuResults<<" in thiis many seconds when single threaded "<<elapsed_secs<<std::endl; std::cout<<"transformAndSumGPUCustomTempAlloc = "<<gpuResults<<" in thiis many seconds "<<elapsed_secs_custom<<std::endl; for(int i = 0; i < iterations; i++){ hipStreamDestroy(streams[i]); } } int main(void) { int * x; hipMalloc((void **) &x, 100000); hipFree(x); doCompute<float> (1024*1024*5); return 0; }
a162ed8a83da8d4988f7da4911bf2f210f162066.cu
/* ============================================================================ Name : testers.cu Author : BlazingDB Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ */ #include <algorithm> #include <iostream> #include <numeric> #include <vector> #include <ctime> #include <thread> #include <thrust/reduce.h> #include <thrust/device_vector.h> #include "BlazingCachedAllocator.cuh" template <typename T> __host__ __device__ T reciprocal(const T &x) { return ((T) 1/x); } template <typename T> class ReciprocalFunctor { public: __host__ __device__ T operator()(const T &x) { return reciprocal(x); } }; template <typename T, class OpClass> T transformAndSumCPU(std::vector<T> data, OpClass op) { std::vector<T> temp(data.size()); std::transform(data.begin(), data.end(), temp.begin(), op); return std::accumulate(temp.begin(), temp.end(), (T)0); } template <typename T, class OpClass> void transformAndSumGPU(std::vector<T> data, OpClass op, T & result) { std::clock_t begin = std::clock(); thrust::device_vector<T> temp( data.end() - data.begin()); std::clock_t end = std::clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"Seconds Allocating Slow"<<elapsed_secs<<std::endl; begin = std::clock(); thrust::copy( data.begin(), data.end(),temp.begin()); end = std::clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"Seconds Copying Slow"<<elapsed_secs<<std::endl; begin = std::clock(); thrust::transform(temp.begin(), temp.end(), temp.begin(), op); end = std::clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"Seonds transforming Slow"<<elapsed_secs<<std::endl; begin = std::clock(); result = thrust::reduce(temp.begin(), temp.end()); end = std::clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"Seonds Reducing Slow"<<elapsed_secs<<std::endl; } template <typename T, class OpClass> void transformAndSumGPUCustomTempAlloc(std::vector<T> data, OpClass op,T & result, cudaStream_t stream) { std::clock_t begin = std::clock(); thrust::device_vector<T > temp( data.end() - data.begin()); std::clock_t end = std::clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"Seconds Allocating "<<elapsed_secs<<std::endl; //begin = std::clock(); thrust::copy(thrust::cuda::par.on(stream), data.begin(), data.end(),temp.begin()); // end = std::clock(); // elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; //std::cout<<"Seconds Copying "<<elapsed_secs<<std::endl; // begin = std::clock(); thrust::transform(thrust::cuda::par(cachedDeviceAllocator).on(stream),temp.begin(), temp.end(), temp.begin(), op); //end = std::clock(); //elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; //std::cout<<"Seconds transforming "<<elapsed_secs<<std::endl; //begin = std::clock(); result = thrust::reduce(thrust::cuda::par(cachedDeviceAllocator).on(stream),temp.begin(), temp.end()); //end = std::clock(); //elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; //std::cout<<"Seconds Reducing "<<elapsed_secs<<std::endl; } template<typename T> void initialize(std::vector<T> &data, unsigned workSize) { /* Initialize the vector */ for (unsigned i = 0; i < workSize; i++) data.push_back( ((T)0.1)*(i+1) ); } int iterations = 100; template<typename T> void doCompute(unsigned workSize) { std::vector<T> hostData; std::vector<cudaStream_t> streams(iterations); for(int i = 0; i < iterations; i++){ cudaStreamCreate(&streams[i]); } initialize(hostData, workSize); T cpuResults = transformAndSumCPU(hostData, ReciprocalFunctor<T>()); std::clock_t begin = std::clock(); T gpuResults; std::vector<std::thread> threads(iterations); for(int i = 0; i < iterations; i++){ gpuResults += transformAndSumGPU(hostData, ReciprocalFunctor<T>()); } std::clock_t end = std::clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; begin = std::clock(); for(int i = 0; i < iterations; i++){ threads[i] = std::thread([&]{ transformAndSumGPUCustomTempAlloc(hostData, ReciprocalFunctor<T>(),gpuResults,streams[i]); }); } for(int i = 0; i < iterations; i++){ threads[i].join(); } end = std::clock(); double elapsed_secs_custom = double(end - begin) / CLOCKS_PER_SEC; std::cout<<"transformAndSumCPU = "<<cpuResults<<std::endl; std::cout<<"transformAndSumGPU = "<<gpuResults<<" in thiis many seconds when single threaded "<<elapsed_secs<<std::endl; std::cout<<"transformAndSumGPUCustomTempAlloc = "<<gpuResults<<" in thiis many seconds "<<elapsed_secs_custom<<std::endl; for(int i = 0; i < iterations; i++){ cudaStreamDestroy(streams[i]); } } int main(void) { int * x; cudaMalloc((void **) &x, 100000); cudaFree(x); doCompute<float> (1024*1024*5); return 0; }
f05c624cf4891b84f2f9d36b0348f6387eb89a37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> void helloCPU() { printf("Hello from the CPU.\n"); } /* * The addition of `__global__` signifies that this function * should be launced on the GPU. */ __global__ void helloGPU() { printf("Hello from the GPU.\n"); } int main() { helloCPU(); /* * Add an execution configuration with the <<<...>>> syntax * will launch this function as a kernel on the GPU. */ hipLaunchKernelGGL(( helloGPU), dim3(1), dim3(1), 0, 0, ); /* * `hipDeviceSynchronize` will block the CPU stream until * all GPU kernels have completed. */ hipDeviceSynchronize(); }
f05c624cf4891b84f2f9d36b0348f6387eb89a37.cu
#include <stdio.h> void helloCPU() { printf("Hello from the CPU.\n"); } /* * The addition of `__global__` signifies that this function * should be launced on the GPU. */ __global__ void helloGPU() { printf("Hello from the GPU.\n"); } int main() { helloCPU(); /* * Add an execution configuration with the <<<...>>> syntax * will launch this function as a kernel on the GPU. */ helloGPU<<<1, 1>>>(); /* * `cudaDeviceSynchronize` will block the CPU stream until * all GPU kernels have completed. */ cudaDeviceSynchronize(); }
74d9ece1c6e73c22f6c25a2c1ed61d85a3582e24.hip
// !!! This is a file automatically generated by hipify!!! #include <color_spinor_field.h> #include <color_spinor_field_order.h> #include <dslash_quda.h> #include <index_helper.cuh> #include <dslash_quda.h> #include <include/kernels/dslash_domain_wall_m5.cuh> namespace quda { /* FIXME - fix flops counters - check dagger operators are correct - there might need to be a shift by 1 in which coefficients are used and conjugation of coefficients - use kappa notation and not b/c for consistency with other codes and sanity */ template <typename Float, int nColor, typename Arg> class Dslash5 : public TunableVectorYZ { protected: Arg &arg; const ColorSpinorField &meta; static constexpr bool shared = true; // whether to use shared memory cache blocking for M5inv /** Whether to use variable or fixed coefficient algorithm. Must be true if using ZMOBIUS */ static constexpr bool var_inverse = true; long long flops() const { long long Ls = meta.X(4); long long bulk = (Ls - 2) * (meta.Volume() / Ls); long long wall = 2 * meta.Volume() / Ls; long long n = meta.Ncolor() * meta.Nspin(); long long flops_ = 0; switch (arg.type) { case DSLASH5_DWF: flops_ = n * (8ll * bulk + 10ll * wall + (arg.xpay ? 4ll * meta.Volume() : 0)); break; case DSLASH5_MOBIUS_PRE: flops_ = n * (8ll * bulk + 10ll * wall + 14ll * meta.Volume() + (arg.xpay ? 8ll * meta.Volume() : 0)); break; case DSLASH5_MOBIUS: flops_ = n * (8ll * bulk + 10ll * wall + 8ll * meta.Volume() + (arg.xpay ? 8ll * meta.Volume() : 0)); break; case M5_INV_DWF: case M5_INV_MOBIUS: // FIXME flops // flops_ = ((2 + 8 * n) * Ls + (arg.xpay ? 4ll : 0)) * meta.Volume(); flops_ = (144 * Ls + (arg.xpay ? 4ll : 0)) * meta.Volume(); break; case M5_INV_ZMOBIUS: // flops_ = ((12 + 16 * n) * Ls + (arg.xpay ? 8ll : 0)) * meta.Volume(); flops_ = (144 * Ls + (arg.xpay ? 8ll : 0)) * meta.Volume(); break; default: errorQuda("Unknown Dslash5Type %d", arg.type); } return flops_; } long long bytes() const { long long Ls = meta.X(4); switch (arg.type) { case DSLASH5_DWF: return arg.out.Bytes() + 2 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case DSLASH5_MOBIUS_PRE: return arg.out.Bytes() + 3 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case DSLASH5_MOBIUS: return arg.out.Bytes() + 3 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_DWF: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_MOBIUS: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_ZMOBIUS: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); default: errorQuda("Unknown Dslash5Type %d", arg.type); } return 0ll; } bool tuneGridDim() const { return false; } unsigned int minThreads() const { return arg.volume_4d_cb; } int blockStep() const { return 4; } int blockMin() const { return 4; } unsigned int sharedBytesPerThread() const { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { // spin components in shared depend on inversion algorithm int nSpin = var_inverse ? meta.Nspin() / 2 : meta.Nspin(); return 2 * nSpin * nColor * sizeof(typename mapper<Float>::type); } else { return 0; } } // overloaded to return max dynamic shared memory if doing shared-memory inverse unsigned int maxSharedBytesPerBlock() const { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { return maxDynamicSharedBytesPerBlock(); } else { return TunableVectorYZ::maxSharedBytesPerBlock(); } } public: Dslash5(Arg &arg, const ColorSpinorField &meta) : TunableVectorYZ(arg.Ls, arg.nParity), arg(arg), meta(meta) { strcpy(aux, meta.AuxString()); if (arg.dagger) strcat(aux, ",Dagger"); if (arg.xpay) strcat(aux, ",xpay"); switch (arg.type) { case DSLASH5_DWF: strcat(aux, ",DSLASH5_DWF"); break; case DSLASH5_MOBIUS_PRE: strcat(aux, ",DSLASH5_MOBIUS_PRE"); break; case DSLASH5_MOBIUS: strcat(aux, ",DSLASH5_MOBIUS"); break; case M5_INV_DWF: strcat(aux, ",M5_INV_DWF"); break; case M5_INV_MOBIUS: strcat(aux, ",M5_INV_MOBIUS"); break; case M5_INV_ZMOBIUS: strcat(aux, ",M5_INV_ZMOBIUS"); break; default: errorQuda("Unknown Dslash5Type %d", arg.type); } } virtual ~Dslash5() {} template <typename T> inline void launch(T *f, const TuneParam &tp, Arg &arg, const hipStream_t &stream) { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { // if inverse kernel uses shared memory then maximize total shared memory pool setMaxDynamicSharedBytesPerBlock(f); } void *args[] = {&arg}; qudaLaunchKernel((const void *)f, tp.grid, tp.block, args, tp.shared_bytes, stream); } void apply(const hipStream_t &stream) { if (meta.Location() == QUDA_CPU_FIELD_LOCATION) { errorQuda("CPU variant not instantiated"); } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (arg.type == DSLASH5_DWF) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_DWF, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_DWF, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_DWF, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_DWF, Arg>, tp, arg, stream); } else if (arg.type == DSLASH5_MOBIUS_PRE) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream); } else if (arg.type == DSLASH5_MOBIUS) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_MOBIUS, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_MOBIUS, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_MOBIUS, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_MOBIUS, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_DWF) { if (arg.xpay) arg.dagger ? launch(dslash5invGPU<Float, nColor, true, true, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5invGPU<Float, nColor, true, false, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_MOBIUS) { if (arg.xpay) arg.dagger ? launch( dslash5invGPU<Float, nColor, true, true, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch( dslash5invGPU<Float, nColor, true, false, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_ZMOBIUS) { if (arg.xpay) arg.dagger ? launch( dslash5invGPU<Float, nColor, true, true, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch( dslash5invGPU<Float, nColor, true, false, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream); } } } void initTuneParam(TuneParam &param) const { TunableVectorYZ::initTuneParam(param); if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { param.block.y = arg.Ls; // Ls must be contained in the block param.grid.y = 1; param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z; } } void defaultTuneParam(TuneParam &param) const { TunableVectorYZ::defaultTuneParam(param); if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { param.block.y = arg.Ls; // Ls must be contained in the block param.grid.y = 1; param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z; } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } }; template <typename Float, int nColor> void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { Dslash5Arg<Float, nColor> arg(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); Dslash5<Float, nColor, Dslash5Arg<Float, nColor>> dslash(arg, in); dslash.apply(streams[Nstream - 1]); } // template on the number of colors template <typename Float> void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { switch (in.Ncolor()) { case 3: ApplyDslash5<Float, 3>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; default: errorQuda("Unsupported number of colors %d\n", in.Ncolor()); } } // Apply the 5th dimension dslash operator to a colorspinor field // out = Dslash5*in void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { #ifdef GPU_DOMAIN_WALL_DIRAC if (in.PCType() != QUDA_4D_PC) errorQuda("Only 4-d preconditioned fields are supported"); checkLocation(out, in); // check all locations match switch (checkPrecision(out, in)) { case QUDA_DOUBLE_PRECISION: ApplyDslash5<double>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_SINGLE_PRECISION: ApplyDslash5<float>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_HALF_PRECISION: ApplyDslash5<short>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_QUARTER_PRECISION: ApplyDslash5<char>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; default: errorQuda("Unsupported precision %d\n", in.Precision()); } #else errorQuda("Domain wall dslash has not been built"); #endif } } // namespace quda
74d9ece1c6e73c22f6c25a2c1ed61d85a3582e24.cu
#include <color_spinor_field.h> #include <color_spinor_field_order.h> #include <dslash_quda.h> #include <index_helper.cuh> #include <dslash_quda.h> #include <include/kernels/dslash_domain_wall_m5.cuh> namespace quda { /* FIXME - fix flops counters - check dagger operators are correct - there might need to be a shift by 1 in which coefficients are used and conjugation of coefficients - use kappa notation and not b/c for consistency with other codes and sanity */ template <typename Float, int nColor, typename Arg> class Dslash5 : public TunableVectorYZ { protected: Arg &arg; const ColorSpinorField &meta; static constexpr bool shared = true; // whether to use shared memory cache blocking for M5inv /** Whether to use variable or fixed coefficient algorithm. Must be true if using ZMOBIUS */ static constexpr bool var_inverse = true; long long flops() const { long long Ls = meta.X(4); long long bulk = (Ls - 2) * (meta.Volume() / Ls); long long wall = 2 * meta.Volume() / Ls; long long n = meta.Ncolor() * meta.Nspin(); long long flops_ = 0; switch (arg.type) { case DSLASH5_DWF: flops_ = n * (8ll * bulk + 10ll * wall + (arg.xpay ? 4ll * meta.Volume() : 0)); break; case DSLASH5_MOBIUS_PRE: flops_ = n * (8ll * bulk + 10ll * wall + 14ll * meta.Volume() + (arg.xpay ? 8ll * meta.Volume() : 0)); break; case DSLASH5_MOBIUS: flops_ = n * (8ll * bulk + 10ll * wall + 8ll * meta.Volume() + (arg.xpay ? 8ll * meta.Volume() : 0)); break; case M5_INV_DWF: case M5_INV_MOBIUS: // FIXME flops // flops_ = ((2 + 8 * n) * Ls + (arg.xpay ? 4ll : 0)) * meta.Volume(); flops_ = (144 * Ls + (arg.xpay ? 4ll : 0)) * meta.Volume(); break; case M5_INV_ZMOBIUS: // flops_ = ((12 + 16 * n) * Ls + (arg.xpay ? 8ll : 0)) * meta.Volume(); flops_ = (144 * Ls + (arg.xpay ? 8ll : 0)) * meta.Volume(); break; default: errorQuda("Unknown Dslash5Type %d", arg.type); } return flops_; } long long bytes() const { long long Ls = meta.X(4); switch (arg.type) { case DSLASH5_DWF: return arg.out.Bytes() + 2 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case DSLASH5_MOBIUS_PRE: return arg.out.Bytes() + 3 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case DSLASH5_MOBIUS: return arg.out.Bytes() + 3 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_DWF: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_MOBIUS: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_ZMOBIUS: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); default: errorQuda("Unknown Dslash5Type %d", arg.type); } return 0ll; } bool tuneGridDim() const { return false; } unsigned int minThreads() const { return arg.volume_4d_cb; } int blockStep() const { return 4; } int blockMin() const { return 4; } unsigned int sharedBytesPerThread() const { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { // spin components in shared depend on inversion algorithm int nSpin = var_inverse ? meta.Nspin() / 2 : meta.Nspin(); return 2 * nSpin * nColor * sizeof(typename mapper<Float>::type); } else { return 0; } } // overloaded to return max dynamic shared memory if doing shared-memory inverse unsigned int maxSharedBytesPerBlock() const { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { return maxDynamicSharedBytesPerBlock(); } else { return TunableVectorYZ::maxSharedBytesPerBlock(); } } public: Dslash5(Arg &arg, const ColorSpinorField &meta) : TunableVectorYZ(arg.Ls, arg.nParity), arg(arg), meta(meta) { strcpy(aux, meta.AuxString()); if (arg.dagger) strcat(aux, ",Dagger"); if (arg.xpay) strcat(aux, ",xpay"); switch (arg.type) { case DSLASH5_DWF: strcat(aux, ",DSLASH5_DWF"); break; case DSLASH5_MOBIUS_PRE: strcat(aux, ",DSLASH5_MOBIUS_PRE"); break; case DSLASH5_MOBIUS: strcat(aux, ",DSLASH5_MOBIUS"); break; case M5_INV_DWF: strcat(aux, ",M5_INV_DWF"); break; case M5_INV_MOBIUS: strcat(aux, ",M5_INV_MOBIUS"); break; case M5_INV_ZMOBIUS: strcat(aux, ",M5_INV_ZMOBIUS"); break; default: errorQuda("Unknown Dslash5Type %d", arg.type); } } virtual ~Dslash5() {} template <typename T> inline void launch(T *f, const TuneParam &tp, Arg &arg, const cudaStream_t &stream) { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { // if inverse kernel uses shared memory then maximize total shared memory pool setMaxDynamicSharedBytesPerBlock(f); } void *args[] = {&arg}; qudaLaunchKernel((const void *)f, tp.grid, tp.block, args, tp.shared_bytes, stream); } void apply(const cudaStream_t &stream) { if (meta.Location() == QUDA_CPU_FIELD_LOCATION) { errorQuda("CPU variant not instantiated"); } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (arg.type == DSLASH5_DWF) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_DWF, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_DWF, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_DWF, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_DWF, Arg>, tp, arg, stream); } else if (arg.type == DSLASH5_MOBIUS_PRE) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream); } else if (arg.type == DSLASH5_MOBIUS) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_MOBIUS, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_MOBIUS, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_MOBIUS, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_MOBIUS, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_DWF) { if (arg.xpay) arg.dagger ? launch(dslash5invGPU<Float, nColor, true, true, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5invGPU<Float, nColor, true, false, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_MOBIUS) { if (arg.xpay) arg.dagger ? launch( dslash5invGPU<Float, nColor, true, true, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch( dslash5invGPU<Float, nColor, true, false, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_ZMOBIUS) { if (arg.xpay) arg.dagger ? launch( dslash5invGPU<Float, nColor, true, true, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch( dslash5invGPU<Float, nColor, true, false, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream); } } } void initTuneParam(TuneParam &param) const { TunableVectorYZ::initTuneParam(param); if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { param.block.y = arg.Ls; // Ls must be contained in the block param.grid.y = 1; param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z; } } void defaultTuneParam(TuneParam &param) const { TunableVectorYZ::defaultTuneParam(param); if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { param.block.y = arg.Ls; // Ls must be contained in the block param.grid.y = 1; param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z; } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } }; template <typename Float, int nColor> void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { Dslash5Arg<Float, nColor> arg(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); Dslash5<Float, nColor, Dslash5Arg<Float, nColor>> dslash(arg, in); dslash.apply(streams[Nstream - 1]); } // template on the number of colors template <typename Float> void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { switch (in.Ncolor()) { case 3: ApplyDslash5<Float, 3>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; default: errorQuda("Unsupported number of colors %d\n", in.Ncolor()); } } // Apply the 5th dimension dslash operator to a colorspinor field // out = Dslash5*in void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { #ifdef GPU_DOMAIN_WALL_DIRAC if (in.PCType() != QUDA_4D_PC) errorQuda("Only 4-d preconditioned fields are supported"); checkLocation(out, in); // check all locations match switch (checkPrecision(out, in)) { case QUDA_DOUBLE_PRECISION: ApplyDslash5<double>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_SINGLE_PRECISION: ApplyDslash5<float>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_HALF_PRECISION: ApplyDslash5<short>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_QUARTER_PRECISION: ApplyDslash5<char>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; default: errorQuda("Unsupported precision %d\n", in.Precision()); } #else errorQuda("Domain wall dslash has not been built"); #endif } } // namespace quda
10a9c97e032b1f1c2ef30b0310a7dc3986babbb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file test.c * \brief Implementation of test.h library. */ #include <stdio.h> #include <stdlib.h> #include <gmp.h> #include "mplib.h" #include "montgomery.h" #include "ecm.h" #include "test.h" #include <cudaProfiler.h> void pro_curve_point_gmp_test(int THRESHOLD) { mpz_t mp_n, mp_a, mp_b, mp_x, mp_y, mp_z, mp_y2, mp_x2, mp_x3, mp_by2, mp_by2z, mp_ax2, mp_ax2z, mp_z2, mp_xz2, mp_d, mp_mod, mp_dif; ui_t nl; int i, j, res = 0, trues = 0, falses = 0, singulars = 0, *d_flag; int flag[SIZE]; mpz_init(mp_n); mpz_init(mp_a); mpz_init(mp_b); mpz_init(mp_x); mpz_init(mp_y); mpz_init(mp_z); mpz_init(mp_y2); mpz_init(mp_x2); mpz_init(mp_x3); mpz_init(mp_by2); mpz_init(mp_by2z); mpz_init(mp_ax2); mpz_init(mp_ax2z); mpz_init(mp_z2); mpz_init(mp_xz2); mpz_init(mp_d); mpz_init(mp_mod); mpz_init(mp_dif); mpz_set_ui(mp_mod, 0L); mpz_set_ui(mp_dif, 0L); srand(time(NULL)); nl = 10; ui d_n, d_mu, d_d, d_controlArr, d; ui_t n[nl], mu[nl + 1]; d = (ui)malloc(sizeof(ui_t) * SIZE * nl); hipMalloc(&d_n, nl * sizeof(ui_t)); hipMalloc(&d_mu, (nl + 1) * sizeof(ui_t)); hipMalloc(&d_flag, SIZE * sizeof(ui_t)); hipMalloc(&d_controlArr, SIZE * sizeof(ui_t)); hipMalloc(&d_d, SIZE * nl * sizeof(ui_t)); big_rand(n, nl); n[0] -= 37; big_print(stdout, n, nl, "n", NULL); hipMemcpy(d_n, n, sizeof(ui_t) * nl, hipMemcpyHostToDevice); big_get_mu(mu, n, nl); hipMemcpy(d_mu, mu, sizeof(ui_t) * (nl + 1), hipMemcpyHostToDevice); size_t pValue; hipError_t error; hipDeviceSetLimit(hipLimitStackSize, 10000 * sizeof(ui_t)); hipDeviceGetLimit(&pValue, hipLimitStackSize); hipDeviceSetLimit(hipLimitDevRuntimePendingLaunchCount, 10000 * sizeof(ui_t)); hipDeviceGetLimit(&pValue, hipLimitDevRuntimePendingLaunchCount); hipDeviceGetLimit(&pValue, hipLimitMallocHeapSize); hipDeviceSetLimit(hipLimitMallocHeapSize , pValue * 50); hipDeviceGetLimit(&pValue, hipLimitMallocHeapSize); //TODO: static or dynamic? MONTG_CURVE c; PRO_POINT p; hipMallocManaged(&c, SIZE * sizeof(MONTG_CURVE_t)); hipMallocManaged(&p, SIZE * sizeof(PRO_POINT_t)); for(i = 0; i < SIZE; i++){ hipMallocManaged(&(c[i].A), nl * sizeof(MONTG_CURVE_t)); hipMallocManaged(&(c[i].B), nl * sizeof(MONTG_CURVE_t)); hipMallocManaged(&(p[i].X), nl * sizeof(MONTG_CURVE_t)); hipMallocManaged(&(p[i].Y), nl * sizeof(MONTG_CURVE_t)); hipMallocManaged(&(p[i].Z), nl * sizeof(MONTG_CURVE_t)); } hipLaunchKernelGGL(( proCurvePoint), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, d_d, c, p, d_n, nl, d_mu, nl + 1, d_flag); hipDeviceSynchronize(); error = hipGetLastError(); if(error != hipSuccess) fprintf(stderr, "ERROR: %s\n", hipGetErrorString(error)); hipMemcpy(d, d_d, sizeof(ui_t) * SIZE * nl, hipMemcpyDeviceToHost); hipMemcpy(flag, d_flag, sizeof(ui_t) * SIZE, hipMemcpyDeviceToHost); // int count = 0; // for(i = 0; i < SIZE; i++){ // //printf("%d: %d\n", i, flag[i]); // if(flag[i] == 1) // count++; // } // printf("count : %d\n", count); for(i = 0; i < SIZE; i++){ mpz_import(mp_n, nl, -1, 4, 0, 0, n); // n mod d = 0 check; if(flag[i] == 0) { mpz_import(mp_d, nl, -1, 4, 0, 0, d); mpz_mod(mp_mod, mp_n, mp_d); res = mpz_cmp_ui(mp_mod, 0); if(res == 0) { trues++; } else { falses++; printf("False at index: %d\n", i); } //(BY2Z - (X3 + AX2Z + XZ2)) mod n eq 0; } else if(flag[i] == 1){ mpz_import(mp_a, nl, -1, 4, 0, 0, c[i].A); mpz_import(mp_b, nl, -1, 4, 0, 0, c[i].B); mpz_import(mp_x, nl, -1, 4, 0, 0, p[i].X); mpz_import(mp_y, nl, -1, 4, 0, 0, p[i].Y); mpz_import(mp_z, nl, -1, 4, 0, 0, p[i].Z); mpz_mul(mp_y2, mp_y, mp_y); mpz_mul(mp_by2, mp_b, mp_y2); mpz_mul(mp_by2z, mp_by2, mp_z); mpz_mul(mp_x2, mp_x, mp_x); mpz_mul(mp_x3, mp_x2, mp_x); mpz_mul(mp_ax2, mp_a, mp_x2); mpz_mul(mp_ax2z, mp_ax2, mp_z); mpz_mul(mp_z2, mp_z, mp_z); mpz_mul(mp_xz2, mp_x, mp_z2); mpz_sub(mp_dif, mp_by2z, mp_x3); mpz_sub(mp_dif, mp_dif, mp_ax2z); mpz_sub(mp_dif, mp_dif, mp_xz2); mpz_mod(mp_dif, mp_dif, mp_n); res = mpz_cmp_ui(mp_dif, 0); if(res == 0) { trues++; } else{ falses++; printf("False at index: %d\n", i); } } else{ singulars++; printf("Singularity at index: %d\n", i); } } printf("TRUE: %d\n", trues); printf("FALSE: %d\n", falses); printf("SINGULAR: %d\n", singulars); i = 0; int t_index = 0; // while(i < SIZE * nl){ printf("t_index: %d: ", t_index); t_index++; j = i; while(j < (i + nl)){ printf("%u ", d[j]); j++; } printf("\n"); i += nl; } hipDeviceReset(); } void aff_curve_point_gmp_test(int THRESHOLD) { MONTG_CURVE c = (MONTG_CURVE)malloc(sizeof(MONTG_CURVE_t) * 1); AFF_POINT p = (AFF_POINT)malloc(sizeof(AFF_POINT_t) * 1); mpz_t mp_n, mp_a, mp_b, mp_x, mp_y, mp_y2, mp_x2, mp_x3, mp_by2, mp_ax2, mp_d, mp_mod, mp_dif; ui_t nl; int i, j, res = 0, flag = 0, trues = 0, falses = 0, singulars = 0; mpz_init(mp_n); mpz_init(mp_a); mpz_init(mp_b); mpz_init(mp_x); mpz_init(mp_y); mpz_init(mp_y2); mpz_init(mp_x2); mpz_init(mp_x3); mpz_init(mp_by2); mpz_init(mp_ax2); mpz_init(mp_d); mpz_init(mp_mod); mpz_init(mp_dif); mpz_set_ui(mp_mod, 0L); mpz_set_ui(mp_dif, 0L); for (i = 0; i < THRESHOLD; i++) { nl = (ui_t)(rand() % 100 + 1); ui_t n[nl], mu[nl + 1], d[nl]; flag = 0; big_rand(n, nl); big_get_mu(mu, n, nl); for (j = 0; j < nl; j++) { d[j] = 0; } aff_curve_point(d, c, p, n, nl, mu, nl + 1, &flag); mpz_import(mp_n, nl, -1, 4, 0, 0, n); if(flag == 0) { mpz_import(mp_d, nl, -1, 4, 0, 0, d); mpz_mod(mp_mod, mp_n, mp_d); res = mpz_cmp_ui(mp_mod, 0); if(res == 0) { trues++; } else { falses++; printf("False at index: %d\n", i); } } else if(flag == 1){ mpz_import(mp_a, nl, -1, 4, 0, 0, c->A); mpz_import(mp_b, nl, -1, 4, 0, 0, c->B); mpz_import(mp_x, nl, -1, 4, 0, 0, p->x); mpz_import(mp_y, nl, -1, 4, 0, 0, p->y); mpz_mul(mp_y2, mp_y, mp_y); mpz_mul(mp_by2, mp_b, mp_y2); mpz_mul(mp_x2, mp_x, mp_x); mpz_mul(mp_x3, mp_x2, mp_x); mpz_mul(mp_ax2, mp_a, mp_x2); mpz_sub(mp_dif, mp_by2, mp_x3); mpz_sub(mp_dif, mp_dif, mp_ax2); mpz_sub(mp_dif, mp_dif, mp_x); mpz_mod(mp_dif, mp_dif, mp_n); res = mpz_cmp_ui(mp_dif, 0); if(res == 0) { trues++; } else { falses++; printf("False at index: %d\n", i); } } else { singulars++; printf("Singularity at index: %d\n", i); } } printf("TRUE: %d\n", trues); printf("FALSE: %d\n", falses); printf("SINGULAR: %d\n", singulars); } void pro_add_gmp_test(int THRESHOLD) { MONTG_CURVE c = (MONTG_CURVE)malloc(sizeof(MONTG_CURVE_t) * 1); PRO_POINT p = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p1 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p2 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT pd = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); mpz_t mp_n, mp_x1, mp_x2, mp_xd, mp_z1, mp_z2, mp_zd, mp_a, mp_b, mp_c, mp_d, mp_da, mp_cb, mp_e, mp_f, mp_e2, mp_f2, mp_g, mp_h, mp_X, mp_Z; ui_t nl; int i, trues = 0, falses = 0; mpz_init(mp_n); mpz_init(mp_x1); mpz_init(mp_x2); mpz_init(mp_xd); mpz_init(mp_z1); mpz_init(mp_z2); mpz_init(mp_zd); mpz_init(mp_a); mpz_init(mp_b); mpz_init(mp_c); mpz_init(mp_d); mpz_init(mp_da); mpz_init(mp_cb); mpz_init(mp_e); mpz_init(mp_f); mpz_init(mp_e2); mpz_init(mp_f2); mpz_init(mp_g); mpz_init(mp_h); mpz_init(mp_X); mpz_init(mp_Z); for (i = 0; i < THRESHOLD; i++) { nl = (ui_t)(rand() % 100 + 1); ui_t n[nl], mu[nl + 1], X1[nl], X2[nl], Xd[nl], Z1[nl], Z2[nl], Zd[nl]; mpz_set_ui(mp_X, 0L); mpz_set_ui(mp_Z, 0L); big_rand(n, nl); big_get_mu(mu, n, nl); big_mod_rand(X1, nl, n, nl, mu, nl + 1); big_mod_rand(X2, nl, n, nl, mu, nl + 1); big_mod_rand(Xd, nl, n, nl, mu, nl + 1); big_mod_rand(Z1, nl, n, nl, mu, nl + 1); big_mod_rand(Z2, nl, n, nl, mu, nl + 1); big_mod_rand(Zd, nl, n, nl, mu, nl + 1); mpz_import(mp_n, nl, -1, 4, 0, 0, n); mpz_import(mp_x1, nl, -1, 4, 0, 0, X1); mpz_import(mp_x2, nl, -1, 4, 0, 0, X2); mpz_import(mp_xd, nl, -1, 4, 0, 0, Xd); mpz_import(mp_z1, nl, -1, 4, 0, 0, Z1); mpz_import(mp_z2, nl, -1, 4, 0, 0, Z2); mpz_import(mp_zd, nl, -1, 4, 0, 0, Zd); p1->X = X1; p2->X = X2; pd->X = Xd; p1->Z = Z1; p2->Z = Z2; pd->Z = Zd; pro_add(p, p1, p2, pd, n, nl, mu, nl + 1); mpz_import(mp_X, nl, -1, 4, 0, 0, p->X); mpz_import(mp_Z, nl, -1, 4, 0, 0, p->Z); mpz_add(mp_a, mp_x2, mp_z2); mpz_mod(mp_a, mp_a, mp_n); mpz_sub(mp_b, mp_x2, mp_z2); while(mpz_sgn(mp_b) == -1) { mpz_add(mp_b, mp_b, mp_n); } mpz_add(mp_c, mp_x1, mp_z1); mpz_mod(mp_c, mp_c, mp_n); mpz_sub(mp_d, mp_x1, mp_z1); while(mpz_sgn(mp_d) == -1) { mpz_add(mp_d, mp_d, mp_n); } mpz_mul(mp_da, mp_d, mp_a); mpz_mod(mp_da, mp_da, mp_n); mpz_mul(mp_cb, mp_c, mp_b); mpz_mod(mp_cb, mp_cb, mp_n); mpz_add(mp_e, mp_da, mp_cb); mpz_mod(mp_e, mp_e, mp_n); mpz_sub(mp_f, mp_da, mp_cb); while(mpz_sgn(mp_f) == -1) { mpz_add(mp_f, mp_f, mp_n); } mpz_mul(mp_e2, mp_e, mp_e); mpz_mod(mp_e2, mp_e2, mp_n); mpz_mul(mp_f2, mp_f, mp_f); mpz_mod(mp_f2, mp_f2, mp_n); mpz_mul(mp_g, mp_zd, mp_e2); mpz_mod(mp_g, mp_g, mp_n); mpz_mul(mp_h, mp_xd, mp_f2); mpz_mod(mp_h, mp_h, mp_n); int res = mpz_cmp(mp_g, mp_X) + mpz_cmp(mp_h, mp_Z); if(res == 0) { trues++; } else { falses++; } } printf("TRUE: %d\n", trues); printf("FALSE: %d\n", falses); } void pro_add_magma_test(int THRESHOLD) { FILE *fp = fopen("/home/ozbayelif/Development/FIWE/ecm/pro_add_test.magma", "a"); PRO_POINT p = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p1 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p2 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT pd = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); ui_t nl; int i; fprintf(fp, "clear;\n"); fprintf(fp, "/****************************************************************************/\n"); fprintf(fp, "ADDM:=function(X1, Z1, X2, Z2, Xd, Zd, n)\n"); fprintf(fp, "return (Zd * (((X1 - Z1) * (X2 + Z2)) + ((X1 + Z1) * (X2 - Z2)))^2) mod n, (Xd * (((X1 - Z1) * (X2 + Z2)) - ((X1 + Z1) * (X2 - Z2)))^2) mod n;\n"); fprintf(fp, "end function;\n"); fprintf(fp, "trues := 0;\n"); fprintf(fp, "falses := 0;\n"); for (i = 0; i < THRESHOLD; i++) { nl = (ui_t)(rand() % 100 + 1); ui_t n[nl], mu[nl + 1], X1[nl], X2[nl], Xd[nl], Z1[nl], Z2[nl], Zd[nl]; big_rand(n, nl); big_get_mu(mu, n, nl); big_mod_rand(X1, nl, n, nl, mu, nl + 1); big_mod_rand(X2, nl, n, nl, mu, nl + 1); big_mod_rand(Xd, nl, n, nl, mu, nl + 1); big_mod_rand(Z1, nl, n, nl, mu, nl + 1); big_mod_rand(Z2, nl, n, nl, mu, nl + 1); big_mod_rand(Zd, nl, n, nl, mu, nl + 1); p1->X = X1; p2->X = X2; pd->X = Xd; p1->Z = Z1; p2->Z = Z2; pd->Z = Zd; pro_add(p, p1, p2, pd, n, nl, mu, nl + 1); big_print(fp, n, nl, "n", NULL); big_print(fp, p1->X, nl, "X1", NULL); big_print(fp, p1->Z, nl, "Z1", NULL); big_print(fp, p2->X, nl, "X2", NULL); big_print(fp, p2->Z, nl, "Z2", NULL); big_print(fp, pd->X, nl, "Xd", NULL); big_print(fp, pd->Z, nl, "Zd", NULL); big_print(fp, p->X, nl, "pX", NULL); big_print(fp, p->Z, nl, "pZ", NULL); fprintf(fp, "qX, qZ := ADDM(X1, Z1, X2, Z2, Xd, Zd, n);\n"); fprintf(fp, "if qX eq pX then\n"); fprintf(fp, "trues := trues + 1;\n"); fprintf(fp, "else\n"); fprintf(fp, "falses := falses + 1;\n"); fprintf(fp, "end if;\n"); } fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_add_results.magma\", trues);\n"); fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_add_results.magma\", falses);\n"); fclose(fp); } void pro_dbl_magma_test(int THRESHOLD) { FILE *fp = fopen("/home/ozbayelif/Development/FIWE/ecm/pro_dbl_test.magma", "a"); PRO_POINT p = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p1 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); ui_t nl; int i; fprintf(fp, "clear;\n"); fprintf(fp, "/****************************************************************************/\n"); fprintf(fp, "DBLM:=function(X1, Z1, A24)\n"); fprintf(fp, "return ((X1+Z1)*(X1+Z1))*((X1-Z1)*(X1-Z1)),(((X1+Z1)*(X1+Z1))-((X1-Z1)*(X1-Z1)))*(((X1-Z1)*(X1-Z1))+A24*(((X1+Z1)*(X1+Z1)) - ((X1-Z1)*(X1-Z1))));\n"); fprintf(fp, "end function;\n"); fprintf(fp, "trues := 0;\n"); fprintf(fp, "falses := 0;\n"); for (i = 0; i < THRESHOLD; i++) { nl = (ui_t)(rand() % 100 + 1); ui_t n[nl], mu[nl + 1], X1[nl], Z1[nl], A24[nl]; big_rand(n, nl); big_get_mu(mu, n, nl); big_mod_rand(X1, nl, n, nl, mu, nl + 1); big_mod_rand(Z1, nl, n, nl, mu, nl + 1); big_mod_rand(A24, nl, n, nl, mu, nl + 1); p1->X = X1; p1->Z = Z1; pro_dbl(p, p1, A24, n, nl, mu, nl + 1); big_print(fp, n, nl, "n", NULL); fprintf(fp, "R:=Integers(n);\n"); big_print(fp, p1->X, nl, "X1", "R"); big_print(fp, p1->Z, nl, "Z1", "R"); big_print(fp, A24, nl, "A24", "R"); big_print(fp, p->X, nl, "pX", "R"); big_print(fp, p->Z, nl, "pZ", "R"); fprintf(fp, "qX, qZ := DBLM(X1, Z1, A24);\n"); fprintf(fp, "if qX eq pX then\n"); fprintf(fp, "trues := trues + 1;\n"); fprintf(fp, "else\n"); fprintf(fp, "falses := falses + 1;\n"); fprintf(fp, "end if;\n"); } fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_dbl_results.magma\", trues);\n"); fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_dbl_results.magma\", falses);\n"); fclose(fp); } void pro_ladder_gmp_test(int THRESHOLD) { int i, nl, kl, ll, *flag, trues = 0, falses = 0; ui n, mu, A24, d, k, l; nl = (ui_t)(rand() % 10 + 1), kl = 1, ll = 1; hipMallocManaged(&flag, SIZE * sizeof(int)); hipMallocManaged(&n, nl * sizeof(ui_t)); hipMallocManaged(&mu, (nl + 1) * sizeof(ui_t)); hipMallocManaged(&A24, nl * sizeof(ui_t)); hipMallocManaged(&d, SIZE * nl * sizeof(ui_t)); hipMallocManaged(&k, SIZE * kl * sizeof(ui_t)); hipMallocManaged(&l, SIZE * ll * sizeof(ui_t)); mpz_t mp_n, mp_p3X, mp_p3Z, mp_p5X, mp_p5Z, mp_Xlk, mp_Xkl; MONTG_CURVE c; hipMallocManaged(&c, sizeof(MONTG_CURVE_t)); hipMallocManaged(&c->A, sizeof(ui_t) * nl); hipMallocManaged(&c->B, sizeof(ui_t) * nl); PRO_POINT p1, p2, p3, p4, p5; hipMallocManaged(&p1, sizeof(PRO_POINT_t)); hipMallocManaged(&p1->X, sizeof(ui_t) * nl); hipMallocManaged(&p1->Y, sizeof(ui_t) * nl); hipMallocManaged(&p1->Z, sizeof(ui_t) * nl); hipMallocManaged(&p2, sizeof(PRO_POINT_t)); hipMallocManaged(&p2->X, sizeof(ui_t) * nl); hipMallocManaged(&p2->Y, sizeof(ui_t) * nl); hipMallocManaged(&p2->Z, sizeof(ui_t) * nl); hipMallocManaged(&p3, sizeof(PRO_POINT_t)); hipMallocManaged(&p3->X, sizeof(ui_t) * nl); hipMallocManaged(&p3->Y, sizeof(ui_t) * nl); hipMallocManaged(&p3->Z, sizeof(ui_t) * nl); hipMallocManaged(&p4, sizeof(PRO_POINT_t)); hipMallocManaged(&p4->X, sizeof(ui_t) * nl); hipMallocManaged(&p4->Y, sizeof(ui_t) * nl); hipMallocManaged(&p4->Z, sizeof(ui_t) * nl); hipMallocManaged(&p5, sizeof(PRO_POINT_t)); hipMallocManaged(&p5->X, sizeof(ui_t) * nl); hipMallocManaged(&p5->Y, sizeof(ui_t) * nl); hipMallocManaged(&p5->Z, sizeof(ui_t) * nl); mpz_init(mp_n); mpz_init(mp_p3X); mpz_init(mp_p3Z); mpz_init(mp_p5X); mpz_init(mp_p5Z); mpz_init(mp_Xlk); mpz_init(mp_Xkl); mpz_set_ui(mp_n, 0L); mpz_set_ui(mp_p3X, 0L); mpz_set_ui(mp_p3Z, 0L); mpz_set_ui(mp_p5X, 0L); mpz_set_ui(mp_p5Z, 0L); mpz_set_ui(mp_Xlk, 0L); mpz_set_ui(mp_Xkl, 0L); PRO_POINT R0, R0_, R1, R1_; hipMallocManaged(&R0, sizeof(PRO_POINT)); hipMallocManaged(&R0_, sizeof(PRO_POINT)); hipMallocManaged(&R1, sizeof(PRO_POINT)); hipMallocManaged(&R1_, sizeof(PRO_POINT)); hipMallocManaged(&R0->X, sizeof(ui_t) * nl); hipMallocManaged(&R0_->X, sizeof(ui_t) * nl); hipMallocManaged(&R1->X, sizeof(ui_t) * nl); hipMallocManaged(&R1_->X, sizeof(ui_t) * nl); hipMallocManaged(&R0->Z, sizeof(ui_t) * nl); hipMallocManaged(&R0_->Z, sizeof(ui_t) * nl); hipMallocManaged(&R1->Z, sizeof(ui_t) * nl); hipMallocManaged(&R1_->Z, sizeof(ui_t) * nl); for (i = 0; i < THRESHOLD; i++) { big_rand(n, nl); n[0]--; big_get_mu(mu, n, nl); big_rand(k, kl); big_rand(l, ll); pro_curve_point(d, c, p1, n, nl, mu, nl + 1, flag); if(flag[0] != 1){ i--; continue; } big_get_A24(A24, c->A, n, nl, mu, nl + 1, flag); if(flag[0] != 1) { i--; continue; }; //big_print(stdout, p->X, nl, "X", NULL); // proLadder<<<1, 1>>>(p2, p1, R0, R0_, R1, R1, A24, k, kl, n, nl, mu, nl + 1); // hipDeviceSynchronize(); // // //big_print(stdout, p2->X, nl, "X", NULL); // // proLadder<<<1, 1>>>(p3, p2, R0, R0_, R1, R1, A24, k, kl, n, nl, mu, nl + 1); // hipDeviceSynchronize(); // // //big_print(stdout, p3->X, nl, "X", NULL); // // proLadder<<<1, 1>>>(p4, p1, R0, R0_, R1, R1, A24, k, kl, n, nl, mu, nl + 1); // hipDeviceSynchronize(); // // //big_print(stdout, p4->X, nl, "X", NULL); // // proLadder<<<1, 1>>>(p5, p4, R0, R0_, R1, R1, A24, k, kl, n, nl, mu, nl + 1); // hipDeviceSynchronize(); //big_print(stdout, p5->X, nl, "X", NULL); mpz_import(mp_n, nl, -1, 4, 0, 0, n); mpz_import(mp_p3X, nl, -1, 4, 0, 0, p3->X); mpz_import(mp_p5X, nl, -1, 4, 0, 0, p5->X); mpz_import(mp_p3Z, nl, -1, 4, 0, 0, p3->Z); mpz_import(mp_p5Z, nl, -1, 4, 0, 0, p5->Z); mpz_mul(mp_Xlk, mp_p3X, mp_p5Z); // X1*Z2 mpz_mod(mp_Xlk, mp_Xlk, mp_n); // X1*Z2 mod n mpz_mul(mp_Xkl, mp_p5X, mp_p3Z); // X2*Z1 mpz_mod(mp_Xkl, mp_Xkl, mp_n); // X2*Z1 mod n if(mpz_cmp(mp_Xlk, mp_Xkl) == 0) { trues++; } else { falses++; } } printf("TRUE: %d\n", trues); printf("FALSE: %d\n", falses); } void pro_ladder_magma_test(int THRESHOLD) { FILE *fp = fopen("/home/ozbayelif/Development/FIWE/ecm/pro_ladder_test.magma", "a"); // FILE *fp = stdout; MONTG_CURVE c = (MONTG_CURVE)malloc(sizeof(MONTG_CURVE_t) * 1); PRO_POINT p = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p1 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p2 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p3 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p4 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p5 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); int i, nl, kl, ll, flag; nl = (ui_t)5, kl = 1, ll = 1; ui_t mu[nl + 1], A24[nl], d[nl], k[kl], l[ll]; ui_t n[] = {3411243619, 3283606458, 2946840869, 2642350139, 82690173}; // Prime big_get_mu(mu, n, nl); fprintf(fp, "clear;\n\n"); fprintf(fp, "load \"/home/ozbayelif/Development/FIWE/ecm/montgomery.m\";\n\n"); fprintf(fp, "trues:=0;\n\n"); fprintf(fp, "falses:=0;\n\n"); big_print(fp, n, nl, "n", NULL); fprintf(fp, "F:=GF(n);\n\n"); for (i = 0; i < THRESHOLD; i++) { big_rand(k, kl); big_rand(l, ll); big_print(fp, k, kl, "k", NULL); big_print(fp, l, ll, "l", NULL); //pro_curve_point(d, c, p1, n, nl, mu, nl + 1, &flag); if(flag != 1) { i--; continue; } big_print(fp, c->A, nl, "A", "F"); big_print(fp, c->B, nl, "B", "F"); fprintf(fp, "S<X,Y,Z>:=ProjectiveSpace(F,2);\n\n"); fprintf(fp, "C<X,Y,Z>:=Curve(S,[B*Y^2*Z-(X^3+A*X^2*Z+X*Z^2)]);\n\n"); fprintf(fp, "W,MtoW:=EllipticCurve(C,C![0,1,0]);\n\n"); fprintf(fp, "WtoM:=Inverse(MtoW);\n\n"); big_print(fp, p1->X, nl, "X1", "F"); big_print(fp, p1->Y, nl, "Y1", "F"); big_print(fp, p1->Z, nl, "Z1", "F"); fprintf(fp, "P1:=C![X1,Y1,Z1];\n\n"); big_get_A24(A24, c->A, n, nl, mu, nl + 1, &flag); if(flag != 1) { i--; continue; } big_print(fp, A24, nl, "A24", NULL); fprintf(fp, "assert A24 eq (A+2)/4;\n\n"); pro_ladder(p2, p1, A24, k, kl, n, nl, mu, nl + 1); // p2 = k*P big_print(fp, p2->X, nl, "Xk_", NULL); big_print(fp, p2->Z, nl, "Zk_", NULL); fprintf(fp, "Xk,Zk:=LADDM(X1,Z1,k,A24);\n\n"); fprintf(fp, "assert (Xk_ eq Xk and Zk_ eq Zk);\n\n"); pro_ladder(p3, p2, A24, l, ll, n, nl, mu, nl + 1); // p3 = l*(k*P) big_print(fp, p3->X, nl, "Xlk_", NULL); big_print(fp, p3->Z, nl, "Zlk_", NULL); fprintf(fp, "Xlk,Zlk:=LADDM(Xk,Zk,l,A24);\n\n"); fprintf(fp, "assert (Xlk_ eq Xlk and Zlk_ eq Zlk);\n\n"); pro_ladder(p4, p1, A24, l, ll, n, nl, mu, nl + 1); // p4 = l*P big_print(fp, p4->X, nl, "Xl_", NULL); big_print(fp, p4->Z, nl, "Zl_", NULL); fprintf(fp, "Xl,Zl:=LADDM(X1,Z1,l,A24);\n\n"); fprintf(fp, "assert (Xl_ eq Xl and Zl_ eq Zl);\n\n"); pro_ladder(p5, p4, A24, k, kl, n, nl, mu, nl + 1); // p5 = k*(l*P) big_print(fp, p5->X, nl, "Xkl_", NULL); big_print(fp, p5->Z, nl, "Zkl_", NULL); fprintf(fp, "Xkl,Zkl:=LADDM(Xl,Zl,k,A24);\n\n"); fprintf(fp, "assert (Xkl_ eq Xkl and Zkl_ eq Zkl);\n\n"); fprintf(fp, "Xlk_:=F!Xlk_/Zlk_;\n\n"); fprintf(fp, "Xkl_:=F!Xkl_/Zkl_;\n\n"); fprintf(fp, "if Xlk_ eq Xkl_ then\n"); fprintf(fp, "trues:=trues + 1;\n"); fprintf(fp, "else\n"); fprintf(fp, "falses:=falses + 1;\n"); fprintf(fp, "end if;\n"); } fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_ladder_results.magma\", trues);\n"); fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_ladder_results.magma\", falses);\n"); fclose(fp); } void ecm_gmp_test(int THRESHOLD) { int i, nl, res, trues = 0, falses = 0, success = 0, fail = 0, success_type[3] = {0}; mpz_t mp_n, mp_d, mp_mod; mpz_init(mp_n); mpz_init(mp_mod); mpz_init(mp_d); mpz_set_ui(mp_d, 0L); ui n, d, returnArr, mu, h_returnArr[0]; nl = 10; ui_t mul = nl + 1; ui_t h_n[nl], h_d[SIZE * nl], h_mu[mul], bl; // generate_B_smooth(h_n, nl); big_rand(h_n, nl); h_n[0]--; big_get_mu(h_mu, h_n, nl); bl = (nl >> 1) + 1; hipMalloc(&n, sizeof(ui_t) * nl); hipMalloc(&mu, sizeof(ui_t) * mul); hipMemcpy(n, h_n, sizeof(ui_t) * nl, hipMemcpyHostToDevice); hipMemcpy(mu, h_mu, sizeof(ui_t) * mul, hipMemcpyHostToDevice); for(i = 0; i < SIZE * nl; i++) h_d[i] = 0; hipMalloc(&d, sizeof(ui_t) * nl * SIZE); hipMemcpy(d, h_d, sizeof(ui_t) * nl, hipMemcpyHostToDevice); hipMalloc(&returnArr, sizeof(ui_t) * SIZE); size_t pValue; hipDeviceSetLimit(hipLimitStackSize, 10000 * sizeof(ui_t)); hipDeviceGetLimit(&pValue, hipLimitStackSize); hipDeviceSetLimit(hipLimitDevRuntimePendingLaunchCount, 10000 * sizeof(ui_t)); hipDeviceGetLimit(&pValue, hipLimitDevRuntimePendingLaunchCount); hipDeviceGetLimit(&pValue, hipLimitMallocHeapSize); hipDeviceSetLimit(hipLimitMallocHeapSize , pValue * 25); hipDeviceGetLimit(&pValue, hipLimitMallocHeapSize); hipLaunchKernelGGL(( fiwEcm), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, d, n, nl, mu, mul, returnArr); hipDeviceSynchronize(); hipMemcpy(h_returnArr, returnArr, sizeof(ui_t) * SIZE, hipMemcpyDeviceToHost); printf("%u\t", h_returnArr[0]); printf("%u\n", h_returnArr[1]); hipError_t error = hipGetLastError(); if(error != hipSuccess) fprintf(stderr, "ERROR: %s\n", hipGetErrorString(error)); big_print(stdout, h_n, nl, "n", NULL); hipMemcpy(h_d, d, sizeof(ui_t) * SIZE * nl, hipMemcpyDeviceToHost); i = 0; int j; int t_index = 0; // while(i < SIZE * nl){ printf("t_index: %d: ", t_index); t_index++; j = i; while(j < (i + nl)){ printf("%u ", h_d[j]); j++; } printf("\n"); i += nl; } // for(i = 0; i < THRESHOLD; i++) { // // ui_t n[nl], d[nl]; // // //big_print(stdout, n, nl, "n", NULL); // // int ret = ecm(d, n, nl); // if(ret != 0) { // success++; // success_type[ret - 1]++; // mpz_import(mp_n, nl, -1, 4, 0, 0, n); // mpz_import(mp_d, nl, -1, 4, 0, 0, d); // mpz_mod(mp_mod, mp_n, mp_d); // res = mpz_cmp_ui(mp_mod, 0L); // if(res == 0) { // big_print(stdout, d, nl, "d", NULL); // true++; // } else { // false++; // printf("False at index: %d\n", i); // } // } else { // printf("fail \n"); // fail++; // } // } // printf("TRUE: %d\n", trues); // printf("FALSE: %d\n", falses); // printf("SUCCESS: %d\n", success); // printf("FAIL: %d\n", fail); // printf("FOUND IN pro_curve_point: %d\n", success_type[0]); // printf("FOUND IN ladder: %d\n", success_type[1]); // printf("FOUND IN A24: %d\n", success_type[2]); }
10a9c97e032b1f1c2ef30b0310a7dc3986babbb9.cu
/** * \file test.c * \brief Implementation of test.h library. */ #include <stdio.h> #include <stdlib.h> #include <gmp.h> #include "mplib.h" #include "montgomery.h" #include "ecm.h" #include "test.h" #include <cudaProfiler.h> void pro_curve_point_gmp_test(int THRESHOLD) { mpz_t mp_n, mp_a, mp_b, mp_x, mp_y, mp_z, mp_y2, mp_x2, mp_x3, mp_by2, mp_by2z, mp_ax2, mp_ax2z, mp_z2, mp_xz2, mp_d, mp_mod, mp_dif; ui_t nl; int i, j, res = 0, trues = 0, falses = 0, singulars = 0, *d_flag; int flag[SIZE]; mpz_init(mp_n); mpz_init(mp_a); mpz_init(mp_b); mpz_init(mp_x); mpz_init(mp_y); mpz_init(mp_z); mpz_init(mp_y2); mpz_init(mp_x2); mpz_init(mp_x3); mpz_init(mp_by2); mpz_init(mp_by2z); mpz_init(mp_ax2); mpz_init(mp_ax2z); mpz_init(mp_z2); mpz_init(mp_xz2); mpz_init(mp_d); mpz_init(mp_mod); mpz_init(mp_dif); mpz_set_ui(mp_mod, 0L); mpz_set_ui(mp_dif, 0L); srand(time(NULL)); nl = 10; ui d_n, d_mu, d_d, d_controlArr, d; ui_t n[nl], mu[nl + 1]; d = (ui)malloc(sizeof(ui_t) * SIZE * nl); cudaMalloc(&d_n, nl * sizeof(ui_t)); cudaMalloc(&d_mu, (nl + 1) * sizeof(ui_t)); cudaMalloc(&d_flag, SIZE * sizeof(ui_t)); cudaMalloc(&d_controlArr, SIZE * sizeof(ui_t)); cudaMalloc(&d_d, SIZE * nl * sizeof(ui_t)); big_rand(n, nl); n[0] -= 37; big_print(stdout, n, nl, "n", NULL); cudaMemcpy(d_n, n, sizeof(ui_t) * nl, cudaMemcpyHostToDevice); big_get_mu(mu, n, nl); cudaMemcpy(d_mu, mu, sizeof(ui_t) * (nl + 1), cudaMemcpyHostToDevice); size_t pValue; cudaError_t error; cudaDeviceSetLimit(cudaLimitStackSize, 10000 * sizeof(ui_t)); cudaDeviceGetLimit(&pValue, cudaLimitStackSize); cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, 10000 * sizeof(ui_t)); cudaDeviceGetLimit(&pValue, cudaLimitDevRuntimePendingLaunchCount); cudaDeviceGetLimit(&pValue, cudaLimitMallocHeapSize); cudaDeviceSetLimit(cudaLimitMallocHeapSize , pValue * 50); cudaDeviceGetLimit(&pValue, cudaLimitMallocHeapSize); //TODO: static or dynamic? MONTG_CURVE c; PRO_POINT p; cudaMallocManaged(&c, SIZE * sizeof(MONTG_CURVE_t)); cudaMallocManaged(&p, SIZE * sizeof(PRO_POINT_t)); for(i = 0; i < SIZE; i++){ cudaMallocManaged(&(c[i].A), nl * sizeof(MONTG_CURVE_t)); cudaMallocManaged(&(c[i].B), nl * sizeof(MONTG_CURVE_t)); cudaMallocManaged(&(p[i].X), nl * sizeof(MONTG_CURVE_t)); cudaMallocManaged(&(p[i].Y), nl * sizeof(MONTG_CURVE_t)); cudaMallocManaged(&(p[i].Z), nl * sizeof(MONTG_CURVE_t)); } proCurvePoint<<<BLOCKNUM, THREADNUM>>>(d_d, c, p, d_n, nl, d_mu, nl + 1, d_flag); cudaDeviceSynchronize(); error = cudaGetLastError(); if(error != cudaSuccess) fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(error)); cudaMemcpy(d, d_d, sizeof(ui_t) * SIZE * nl, cudaMemcpyDeviceToHost); cudaMemcpy(flag, d_flag, sizeof(ui_t) * SIZE, cudaMemcpyDeviceToHost); // int count = 0; // for(i = 0; i < SIZE; i++){ // //printf("%d: %d\n", i, flag[i]); // if(flag[i] == 1) // count++; // } // printf("count : %d\n", count); for(i = 0; i < SIZE; i++){ mpz_import(mp_n, nl, -1, 4, 0, 0, n); // n mod d = 0 check; if(flag[i] == 0) { mpz_import(mp_d, nl, -1, 4, 0, 0, d); mpz_mod(mp_mod, mp_n, mp_d); res = mpz_cmp_ui(mp_mod, 0); if(res == 0) { trues++; } else { falses++; printf("False at index: %d\n", i); } //(BY2Z - (X3 + AX2Z + XZ2)) mod n eq 0; } else if(flag[i] == 1){ mpz_import(mp_a, nl, -1, 4, 0, 0, c[i].A); mpz_import(mp_b, nl, -1, 4, 0, 0, c[i].B); mpz_import(mp_x, nl, -1, 4, 0, 0, p[i].X); mpz_import(mp_y, nl, -1, 4, 0, 0, p[i].Y); mpz_import(mp_z, nl, -1, 4, 0, 0, p[i].Z); mpz_mul(mp_y2, mp_y, mp_y); mpz_mul(mp_by2, mp_b, mp_y2); mpz_mul(mp_by2z, mp_by2, mp_z); mpz_mul(mp_x2, mp_x, mp_x); mpz_mul(mp_x3, mp_x2, mp_x); mpz_mul(mp_ax2, mp_a, mp_x2); mpz_mul(mp_ax2z, mp_ax2, mp_z); mpz_mul(mp_z2, mp_z, mp_z); mpz_mul(mp_xz2, mp_x, mp_z2); mpz_sub(mp_dif, mp_by2z, mp_x3); mpz_sub(mp_dif, mp_dif, mp_ax2z); mpz_sub(mp_dif, mp_dif, mp_xz2); mpz_mod(mp_dif, mp_dif, mp_n); res = mpz_cmp_ui(mp_dif, 0); if(res == 0) { trues++; } else{ falses++; printf("False at index: %d\n", i); } } else{ singulars++; printf("Singularity at index: %d\n", i); } } printf("TRUE: %d\n", trues); printf("FALSE: %d\n", falses); printf("SINGULAR: %d\n", singulars); i = 0; int t_index = 0; // while(i < SIZE * nl){ printf("t_index: %d: ", t_index); t_index++; j = i; while(j < (i + nl)){ printf("%u ", d[j]); j++; } printf("\n"); i += nl; } cudaDeviceReset(); } void aff_curve_point_gmp_test(int THRESHOLD) { MONTG_CURVE c = (MONTG_CURVE)malloc(sizeof(MONTG_CURVE_t) * 1); AFF_POINT p = (AFF_POINT)malloc(sizeof(AFF_POINT_t) * 1); mpz_t mp_n, mp_a, mp_b, mp_x, mp_y, mp_y2, mp_x2, mp_x3, mp_by2, mp_ax2, mp_d, mp_mod, mp_dif; ui_t nl; int i, j, res = 0, flag = 0, trues = 0, falses = 0, singulars = 0; mpz_init(mp_n); mpz_init(mp_a); mpz_init(mp_b); mpz_init(mp_x); mpz_init(mp_y); mpz_init(mp_y2); mpz_init(mp_x2); mpz_init(mp_x3); mpz_init(mp_by2); mpz_init(mp_ax2); mpz_init(mp_d); mpz_init(mp_mod); mpz_init(mp_dif); mpz_set_ui(mp_mod, 0L); mpz_set_ui(mp_dif, 0L); for (i = 0; i < THRESHOLD; i++) { nl = (ui_t)(rand() % 100 + 1); ui_t n[nl], mu[nl + 1], d[nl]; flag = 0; big_rand(n, nl); big_get_mu(mu, n, nl); for (j = 0; j < nl; j++) { d[j] = 0; } aff_curve_point(d, c, p, n, nl, mu, nl + 1, &flag); mpz_import(mp_n, nl, -1, 4, 0, 0, n); if(flag == 0) { mpz_import(mp_d, nl, -1, 4, 0, 0, d); mpz_mod(mp_mod, mp_n, mp_d); res = mpz_cmp_ui(mp_mod, 0); if(res == 0) { trues++; } else { falses++; printf("False at index: %d\n", i); } } else if(flag == 1){ mpz_import(mp_a, nl, -1, 4, 0, 0, c->A); mpz_import(mp_b, nl, -1, 4, 0, 0, c->B); mpz_import(mp_x, nl, -1, 4, 0, 0, p->x); mpz_import(mp_y, nl, -1, 4, 0, 0, p->y); mpz_mul(mp_y2, mp_y, mp_y); mpz_mul(mp_by2, mp_b, mp_y2); mpz_mul(mp_x2, mp_x, mp_x); mpz_mul(mp_x3, mp_x2, mp_x); mpz_mul(mp_ax2, mp_a, mp_x2); mpz_sub(mp_dif, mp_by2, mp_x3); mpz_sub(mp_dif, mp_dif, mp_ax2); mpz_sub(mp_dif, mp_dif, mp_x); mpz_mod(mp_dif, mp_dif, mp_n); res = mpz_cmp_ui(mp_dif, 0); if(res == 0) { trues++; } else { falses++; printf("False at index: %d\n", i); } } else { singulars++; printf("Singularity at index: %d\n", i); } } printf("TRUE: %d\n", trues); printf("FALSE: %d\n", falses); printf("SINGULAR: %d\n", singulars); } void pro_add_gmp_test(int THRESHOLD) { MONTG_CURVE c = (MONTG_CURVE)malloc(sizeof(MONTG_CURVE_t) * 1); PRO_POINT p = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p1 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p2 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT pd = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); mpz_t mp_n, mp_x1, mp_x2, mp_xd, mp_z1, mp_z2, mp_zd, mp_a, mp_b, mp_c, mp_d, mp_da, mp_cb, mp_e, mp_f, mp_e2, mp_f2, mp_g, mp_h, mp_X, mp_Z; ui_t nl; int i, trues = 0, falses = 0; mpz_init(mp_n); mpz_init(mp_x1); mpz_init(mp_x2); mpz_init(mp_xd); mpz_init(mp_z1); mpz_init(mp_z2); mpz_init(mp_zd); mpz_init(mp_a); mpz_init(mp_b); mpz_init(mp_c); mpz_init(mp_d); mpz_init(mp_da); mpz_init(mp_cb); mpz_init(mp_e); mpz_init(mp_f); mpz_init(mp_e2); mpz_init(mp_f2); mpz_init(mp_g); mpz_init(mp_h); mpz_init(mp_X); mpz_init(mp_Z); for (i = 0; i < THRESHOLD; i++) { nl = (ui_t)(rand() % 100 + 1); ui_t n[nl], mu[nl + 1], X1[nl], X2[nl], Xd[nl], Z1[nl], Z2[nl], Zd[nl]; mpz_set_ui(mp_X, 0L); mpz_set_ui(mp_Z, 0L); big_rand(n, nl); big_get_mu(mu, n, nl); big_mod_rand(X1, nl, n, nl, mu, nl + 1); big_mod_rand(X2, nl, n, nl, mu, nl + 1); big_mod_rand(Xd, nl, n, nl, mu, nl + 1); big_mod_rand(Z1, nl, n, nl, mu, nl + 1); big_mod_rand(Z2, nl, n, nl, mu, nl + 1); big_mod_rand(Zd, nl, n, nl, mu, nl + 1); mpz_import(mp_n, nl, -1, 4, 0, 0, n); mpz_import(mp_x1, nl, -1, 4, 0, 0, X1); mpz_import(mp_x2, nl, -1, 4, 0, 0, X2); mpz_import(mp_xd, nl, -1, 4, 0, 0, Xd); mpz_import(mp_z1, nl, -1, 4, 0, 0, Z1); mpz_import(mp_z2, nl, -1, 4, 0, 0, Z2); mpz_import(mp_zd, nl, -1, 4, 0, 0, Zd); p1->X = X1; p2->X = X2; pd->X = Xd; p1->Z = Z1; p2->Z = Z2; pd->Z = Zd; pro_add(p, p1, p2, pd, n, nl, mu, nl + 1); mpz_import(mp_X, nl, -1, 4, 0, 0, p->X); mpz_import(mp_Z, nl, -1, 4, 0, 0, p->Z); mpz_add(mp_a, mp_x2, mp_z2); mpz_mod(mp_a, mp_a, mp_n); mpz_sub(mp_b, mp_x2, mp_z2); while(mpz_sgn(mp_b) == -1) { mpz_add(mp_b, mp_b, mp_n); } mpz_add(mp_c, mp_x1, mp_z1); mpz_mod(mp_c, mp_c, mp_n); mpz_sub(mp_d, mp_x1, mp_z1); while(mpz_sgn(mp_d) == -1) { mpz_add(mp_d, mp_d, mp_n); } mpz_mul(mp_da, mp_d, mp_a); mpz_mod(mp_da, mp_da, mp_n); mpz_mul(mp_cb, mp_c, mp_b); mpz_mod(mp_cb, mp_cb, mp_n); mpz_add(mp_e, mp_da, mp_cb); mpz_mod(mp_e, mp_e, mp_n); mpz_sub(mp_f, mp_da, mp_cb); while(mpz_sgn(mp_f) == -1) { mpz_add(mp_f, mp_f, mp_n); } mpz_mul(mp_e2, mp_e, mp_e); mpz_mod(mp_e2, mp_e2, mp_n); mpz_mul(mp_f2, mp_f, mp_f); mpz_mod(mp_f2, mp_f2, mp_n); mpz_mul(mp_g, mp_zd, mp_e2); mpz_mod(mp_g, mp_g, mp_n); mpz_mul(mp_h, mp_xd, mp_f2); mpz_mod(mp_h, mp_h, mp_n); int res = mpz_cmp(mp_g, mp_X) + mpz_cmp(mp_h, mp_Z); if(res == 0) { trues++; } else { falses++; } } printf("TRUE: %d\n", trues); printf("FALSE: %d\n", falses); } void pro_add_magma_test(int THRESHOLD) { FILE *fp = fopen("/home/ozbayelif/Development/FIWE/ecm/pro_add_test.magma", "a"); PRO_POINT p = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p1 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p2 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT pd = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); ui_t nl; int i; fprintf(fp, "clear;\n"); fprintf(fp, "/****************************************************************************/\n"); fprintf(fp, "ADDM:=function(X1, Z1, X2, Z2, Xd, Zd, n)\n"); fprintf(fp, "return (Zd * (((X1 - Z1) * (X2 + Z2)) + ((X1 + Z1) * (X2 - Z2)))^2) mod n, (Xd * (((X1 - Z1) * (X2 + Z2)) - ((X1 + Z1) * (X2 - Z2)))^2) mod n;\n"); fprintf(fp, "end function;\n"); fprintf(fp, "trues := 0;\n"); fprintf(fp, "falses := 0;\n"); for (i = 0; i < THRESHOLD; i++) { nl = (ui_t)(rand() % 100 + 1); ui_t n[nl], mu[nl + 1], X1[nl], X2[nl], Xd[nl], Z1[nl], Z2[nl], Zd[nl]; big_rand(n, nl); big_get_mu(mu, n, nl); big_mod_rand(X1, nl, n, nl, mu, nl + 1); big_mod_rand(X2, nl, n, nl, mu, nl + 1); big_mod_rand(Xd, nl, n, nl, mu, nl + 1); big_mod_rand(Z1, nl, n, nl, mu, nl + 1); big_mod_rand(Z2, nl, n, nl, mu, nl + 1); big_mod_rand(Zd, nl, n, nl, mu, nl + 1); p1->X = X1; p2->X = X2; pd->X = Xd; p1->Z = Z1; p2->Z = Z2; pd->Z = Zd; pro_add(p, p1, p2, pd, n, nl, mu, nl + 1); big_print(fp, n, nl, "n", NULL); big_print(fp, p1->X, nl, "X1", NULL); big_print(fp, p1->Z, nl, "Z1", NULL); big_print(fp, p2->X, nl, "X2", NULL); big_print(fp, p2->Z, nl, "Z2", NULL); big_print(fp, pd->X, nl, "Xd", NULL); big_print(fp, pd->Z, nl, "Zd", NULL); big_print(fp, p->X, nl, "pX", NULL); big_print(fp, p->Z, nl, "pZ", NULL); fprintf(fp, "qX, qZ := ADDM(X1, Z1, X2, Z2, Xd, Zd, n);\n"); fprintf(fp, "if qX eq pX then\n"); fprintf(fp, "trues := trues + 1;\n"); fprintf(fp, "else\n"); fprintf(fp, "falses := falses + 1;\n"); fprintf(fp, "end if;\n"); } fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_add_results.magma\", trues);\n"); fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_add_results.magma\", falses);\n"); fclose(fp); } void pro_dbl_magma_test(int THRESHOLD) { FILE *fp = fopen("/home/ozbayelif/Development/FIWE/ecm/pro_dbl_test.magma", "a"); PRO_POINT p = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p1 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); ui_t nl; int i; fprintf(fp, "clear;\n"); fprintf(fp, "/****************************************************************************/\n"); fprintf(fp, "DBLM:=function(X1, Z1, A24)\n"); fprintf(fp, "return ((X1+Z1)*(X1+Z1))*((X1-Z1)*(X1-Z1)),(((X1+Z1)*(X1+Z1))-((X1-Z1)*(X1-Z1)))*(((X1-Z1)*(X1-Z1))+A24*(((X1+Z1)*(X1+Z1)) - ((X1-Z1)*(X1-Z1))));\n"); fprintf(fp, "end function;\n"); fprintf(fp, "trues := 0;\n"); fprintf(fp, "falses := 0;\n"); for (i = 0; i < THRESHOLD; i++) { nl = (ui_t)(rand() % 100 + 1); ui_t n[nl], mu[nl + 1], X1[nl], Z1[nl], A24[nl]; big_rand(n, nl); big_get_mu(mu, n, nl); big_mod_rand(X1, nl, n, nl, mu, nl + 1); big_mod_rand(Z1, nl, n, nl, mu, nl + 1); big_mod_rand(A24, nl, n, nl, mu, nl + 1); p1->X = X1; p1->Z = Z1; pro_dbl(p, p1, A24, n, nl, mu, nl + 1); big_print(fp, n, nl, "n", NULL); fprintf(fp, "R:=Integers(n);\n"); big_print(fp, p1->X, nl, "X1", "R"); big_print(fp, p1->Z, nl, "Z1", "R"); big_print(fp, A24, nl, "A24", "R"); big_print(fp, p->X, nl, "pX", "R"); big_print(fp, p->Z, nl, "pZ", "R"); fprintf(fp, "qX, qZ := DBLM(X1, Z1, A24);\n"); fprintf(fp, "if qX eq pX then\n"); fprintf(fp, "trues := trues + 1;\n"); fprintf(fp, "else\n"); fprintf(fp, "falses := falses + 1;\n"); fprintf(fp, "end if;\n"); } fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_dbl_results.magma\", trues);\n"); fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_dbl_results.magma\", falses);\n"); fclose(fp); } void pro_ladder_gmp_test(int THRESHOLD) { int i, nl, kl, ll, *flag, trues = 0, falses = 0; ui n, mu, A24, d, k, l; nl = (ui_t)(rand() % 10 + 1), kl = 1, ll = 1; cudaMallocManaged(&flag, SIZE * sizeof(int)); cudaMallocManaged(&n, nl * sizeof(ui_t)); cudaMallocManaged(&mu, (nl + 1) * sizeof(ui_t)); cudaMallocManaged(&A24, nl * sizeof(ui_t)); cudaMallocManaged(&d, SIZE * nl * sizeof(ui_t)); cudaMallocManaged(&k, SIZE * kl * sizeof(ui_t)); cudaMallocManaged(&l, SIZE * ll * sizeof(ui_t)); mpz_t mp_n, mp_p3X, mp_p3Z, mp_p5X, mp_p5Z, mp_Xlk, mp_Xkl; MONTG_CURVE c; cudaMallocManaged(&c, sizeof(MONTG_CURVE_t)); cudaMallocManaged(&c->A, sizeof(ui_t) * nl); cudaMallocManaged(&c->B, sizeof(ui_t) * nl); PRO_POINT p1, p2, p3, p4, p5; cudaMallocManaged(&p1, sizeof(PRO_POINT_t)); cudaMallocManaged(&p1->X, sizeof(ui_t) * nl); cudaMallocManaged(&p1->Y, sizeof(ui_t) * nl); cudaMallocManaged(&p1->Z, sizeof(ui_t) * nl); cudaMallocManaged(&p2, sizeof(PRO_POINT_t)); cudaMallocManaged(&p2->X, sizeof(ui_t) * nl); cudaMallocManaged(&p2->Y, sizeof(ui_t) * nl); cudaMallocManaged(&p2->Z, sizeof(ui_t) * nl); cudaMallocManaged(&p3, sizeof(PRO_POINT_t)); cudaMallocManaged(&p3->X, sizeof(ui_t) * nl); cudaMallocManaged(&p3->Y, sizeof(ui_t) * nl); cudaMallocManaged(&p3->Z, sizeof(ui_t) * nl); cudaMallocManaged(&p4, sizeof(PRO_POINT_t)); cudaMallocManaged(&p4->X, sizeof(ui_t) * nl); cudaMallocManaged(&p4->Y, sizeof(ui_t) * nl); cudaMallocManaged(&p4->Z, sizeof(ui_t) * nl); cudaMallocManaged(&p5, sizeof(PRO_POINT_t)); cudaMallocManaged(&p5->X, sizeof(ui_t) * nl); cudaMallocManaged(&p5->Y, sizeof(ui_t) * nl); cudaMallocManaged(&p5->Z, sizeof(ui_t) * nl); mpz_init(mp_n); mpz_init(mp_p3X); mpz_init(mp_p3Z); mpz_init(mp_p5X); mpz_init(mp_p5Z); mpz_init(mp_Xlk); mpz_init(mp_Xkl); mpz_set_ui(mp_n, 0L); mpz_set_ui(mp_p3X, 0L); mpz_set_ui(mp_p3Z, 0L); mpz_set_ui(mp_p5X, 0L); mpz_set_ui(mp_p5Z, 0L); mpz_set_ui(mp_Xlk, 0L); mpz_set_ui(mp_Xkl, 0L); PRO_POINT R0, R0_, R1, R1_; cudaMallocManaged(&R0, sizeof(PRO_POINT)); cudaMallocManaged(&R0_, sizeof(PRO_POINT)); cudaMallocManaged(&R1, sizeof(PRO_POINT)); cudaMallocManaged(&R1_, sizeof(PRO_POINT)); cudaMallocManaged(&R0->X, sizeof(ui_t) * nl); cudaMallocManaged(&R0_->X, sizeof(ui_t) * nl); cudaMallocManaged(&R1->X, sizeof(ui_t) * nl); cudaMallocManaged(&R1_->X, sizeof(ui_t) * nl); cudaMallocManaged(&R0->Z, sizeof(ui_t) * nl); cudaMallocManaged(&R0_->Z, sizeof(ui_t) * nl); cudaMallocManaged(&R1->Z, sizeof(ui_t) * nl); cudaMallocManaged(&R1_->Z, sizeof(ui_t) * nl); for (i = 0; i < THRESHOLD; i++) { big_rand(n, nl); n[0]--; big_get_mu(mu, n, nl); big_rand(k, kl); big_rand(l, ll); pro_curve_point(d, c, p1, n, nl, mu, nl + 1, flag); if(flag[0] != 1){ i--; continue; } big_get_A24(A24, c->A, n, nl, mu, nl + 1, flag); if(flag[0] != 1) { i--; continue; }; //big_print(stdout, p->X, nl, "X", NULL); // proLadder<<<1, 1>>>(p2, p1, R0, R0_, R1, R1, A24, k, kl, n, nl, mu, nl + 1); // cudaDeviceSynchronize(); // // //big_print(stdout, p2->X, nl, "X", NULL); // // proLadder<<<1, 1>>>(p3, p2, R0, R0_, R1, R1, A24, k, kl, n, nl, mu, nl + 1); // cudaDeviceSynchronize(); // // //big_print(stdout, p3->X, nl, "X", NULL); // // proLadder<<<1, 1>>>(p4, p1, R0, R0_, R1, R1, A24, k, kl, n, nl, mu, nl + 1); // cudaDeviceSynchronize(); // // //big_print(stdout, p4->X, nl, "X", NULL); // // proLadder<<<1, 1>>>(p5, p4, R0, R0_, R1, R1, A24, k, kl, n, nl, mu, nl + 1); // cudaDeviceSynchronize(); //big_print(stdout, p5->X, nl, "X", NULL); mpz_import(mp_n, nl, -1, 4, 0, 0, n); mpz_import(mp_p3X, nl, -1, 4, 0, 0, p3->X); mpz_import(mp_p5X, nl, -1, 4, 0, 0, p5->X); mpz_import(mp_p3Z, nl, -1, 4, 0, 0, p3->Z); mpz_import(mp_p5Z, nl, -1, 4, 0, 0, p5->Z); mpz_mul(mp_Xlk, mp_p3X, mp_p5Z); // X1*Z2 mpz_mod(mp_Xlk, mp_Xlk, mp_n); // X1*Z2 mod n mpz_mul(mp_Xkl, mp_p5X, mp_p3Z); // X2*Z1 mpz_mod(mp_Xkl, mp_Xkl, mp_n); // X2*Z1 mod n if(mpz_cmp(mp_Xlk, mp_Xkl) == 0) { trues++; } else { falses++; } } printf("TRUE: %d\n", trues); printf("FALSE: %d\n", falses); } void pro_ladder_magma_test(int THRESHOLD) { FILE *fp = fopen("/home/ozbayelif/Development/FIWE/ecm/pro_ladder_test.magma", "a"); // FILE *fp = stdout; MONTG_CURVE c = (MONTG_CURVE)malloc(sizeof(MONTG_CURVE_t) * 1); PRO_POINT p = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p1 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p2 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p3 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p4 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); PRO_POINT p5 = (PRO_POINT)malloc(sizeof(PRO_POINT_t) * 1); int i, nl, kl, ll, flag; nl = (ui_t)5, kl = 1, ll = 1; ui_t mu[nl + 1], A24[nl], d[nl], k[kl], l[ll]; ui_t n[] = {3411243619, 3283606458, 2946840869, 2642350139, 82690173}; // Prime big_get_mu(mu, n, nl); fprintf(fp, "clear;\n\n"); fprintf(fp, "load \"/home/ozbayelif/Development/FIWE/ecm/montgomery.m\";\n\n"); fprintf(fp, "trues:=0;\n\n"); fprintf(fp, "falses:=0;\n\n"); big_print(fp, n, nl, "n", NULL); fprintf(fp, "F:=GF(n);\n\n"); for (i = 0; i < THRESHOLD; i++) { big_rand(k, kl); big_rand(l, ll); big_print(fp, k, kl, "k", NULL); big_print(fp, l, ll, "l", NULL); //pro_curve_point(d, c, p1, n, nl, mu, nl + 1, &flag); if(flag != 1) { i--; continue; } big_print(fp, c->A, nl, "A", "F"); big_print(fp, c->B, nl, "B", "F"); fprintf(fp, "S<X,Y,Z>:=ProjectiveSpace(F,2);\n\n"); fprintf(fp, "C<X,Y,Z>:=Curve(S,[B*Y^2*Z-(X^3+A*X^2*Z+X*Z^2)]);\n\n"); fprintf(fp, "W,MtoW:=EllipticCurve(C,C![0,1,0]);\n\n"); fprintf(fp, "WtoM:=Inverse(MtoW);\n\n"); big_print(fp, p1->X, nl, "X1", "F"); big_print(fp, p1->Y, nl, "Y1", "F"); big_print(fp, p1->Z, nl, "Z1", "F"); fprintf(fp, "P1:=C![X1,Y1,Z1];\n\n"); big_get_A24(A24, c->A, n, nl, mu, nl + 1, &flag); if(flag != 1) { i--; continue; } big_print(fp, A24, nl, "A24", NULL); fprintf(fp, "assert A24 eq (A+2)/4;\n\n"); pro_ladder(p2, p1, A24, k, kl, n, nl, mu, nl + 1); // p2 = k*P big_print(fp, p2->X, nl, "Xk_", NULL); big_print(fp, p2->Z, nl, "Zk_", NULL); fprintf(fp, "Xk,Zk:=LADDM(X1,Z1,k,A24);\n\n"); fprintf(fp, "assert (Xk_ eq Xk and Zk_ eq Zk);\n\n"); pro_ladder(p3, p2, A24, l, ll, n, nl, mu, nl + 1); // p3 = l*(k*P) big_print(fp, p3->X, nl, "Xlk_", NULL); big_print(fp, p3->Z, nl, "Zlk_", NULL); fprintf(fp, "Xlk,Zlk:=LADDM(Xk,Zk,l,A24);\n\n"); fprintf(fp, "assert (Xlk_ eq Xlk and Zlk_ eq Zlk);\n\n"); pro_ladder(p4, p1, A24, l, ll, n, nl, mu, nl + 1); // p4 = l*P big_print(fp, p4->X, nl, "Xl_", NULL); big_print(fp, p4->Z, nl, "Zl_", NULL); fprintf(fp, "Xl,Zl:=LADDM(X1,Z1,l,A24);\n\n"); fprintf(fp, "assert (Xl_ eq Xl and Zl_ eq Zl);\n\n"); pro_ladder(p5, p4, A24, k, kl, n, nl, mu, nl + 1); // p5 = k*(l*P) big_print(fp, p5->X, nl, "Xkl_", NULL); big_print(fp, p5->Z, nl, "Zkl_", NULL); fprintf(fp, "Xkl,Zkl:=LADDM(Xl,Zl,k,A24);\n\n"); fprintf(fp, "assert (Xkl_ eq Xkl and Zkl_ eq Zkl);\n\n"); fprintf(fp, "Xlk_:=F!Xlk_/Zlk_;\n\n"); fprintf(fp, "Xkl_:=F!Xkl_/Zkl_;\n\n"); fprintf(fp, "if Xlk_ eq Xkl_ then\n"); fprintf(fp, "trues:=trues + 1;\n"); fprintf(fp, "else\n"); fprintf(fp, "falses:=falses + 1;\n"); fprintf(fp, "end if;\n"); } fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_ladder_results.magma\", trues);\n"); fprintf(fp, "Write(\"/home/ozbayelif/Development/FIWE/ecm/pro_ladder_results.magma\", falses);\n"); fclose(fp); } void ecm_gmp_test(int THRESHOLD) { int i, nl, res, trues = 0, falses = 0, success = 0, fail = 0, success_type[3] = {0}; mpz_t mp_n, mp_d, mp_mod; mpz_init(mp_n); mpz_init(mp_mod); mpz_init(mp_d); mpz_set_ui(mp_d, 0L); ui n, d, returnArr, mu, h_returnArr[0]; nl = 10; ui_t mul = nl + 1; ui_t h_n[nl], h_d[SIZE * nl], h_mu[mul], bl; // generate_B_smooth(h_n, nl); big_rand(h_n, nl); h_n[0]--; big_get_mu(h_mu, h_n, nl); bl = (nl >> 1) + 1; cudaMalloc(&n, sizeof(ui_t) * nl); cudaMalloc(&mu, sizeof(ui_t) * mul); cudaMemcpy(n, h_n, sizeof(ui_t) * nl, cudaMemcpyHostToDevice); cudaMemcpy(mu, h_mu, sizeof(ui_t) * mul, cudaMemcpyHostToDevice); for(i = 0; i < SIZE * nl; i++) h_d[i] = 0; cudaMalloc(&d, sizeof(ui_t) * nl * SIZE); cudaMemcpy(d, h_d, sizeof(ui_t) * nl, cudaMemcpyHostToDevice); cudaMalloc(&returnArr, sizeof(ui_t) * SIZE); size_t pValue; cudaDeviceSetLimit(cudaLimitStackSize, 10000 * sizeof(ui_t)); cudaDeviceGetLimit(&pValue, cudaLimitStackSize); cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, 10000 * sizeof(ui_t)); cudaDeviceGetLimit(&pValue, cudaLimitDevRuntimePendingLaunchCount); cudaDeviceGetLimit(&pValue, cudaLimitMallocHeapSize); cudaDeviceSetLimit(cudaLimitMallocHeapSize , pValue * 25); cudaDeviceGetLimit(&pValue, cudaLimitMallocHeapSize); fiwEcm<<<BLOCKNUM, THREADNUM>>>(d, n, nl, mu, mul, returnArr); cudaDeviceSynchronize(); cudaMemcpy(h_returnArr, returnArr, sizeof(ui_t) * SIZE, cudaMemcpyDeviceToHost); printf("%u\t", h_returnArr[0]); printf("%u\n", h_returnArr[1]); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(error)); big_print(stdout, h_n, nl, "n", NULL); cudaMemcpy(h_d, d, sizeof(ui_t) * SIZE * nl, cudaMemcpyDeviceToHost); i = 0; int j; int t_index = 0; // while(i < SIZE * nl){ printf("t_index: %d: ", t_index); t_index++; j = i; while(j < (i + nl)){ printf("%u ", h_d[j]); j++; } printf("\n"); i += nl; } // for(i = 0; i < THRESHOLD; i++) { // // ui_t n[nl], d[nl]; // // //big_print(stdout, n, nl, "n", NULL); // // int ret = ecm(d, n, nl); // if(ret != 0) { // success++; // success_type[ret - 1]++; // mpz_import(mp_n, nl, -1, 4, 0, 0, n); // mpz_import(mp_d, nl, -1, 4, 0, 0, d); // mpz_mod(mp_mod, mp_n, mp_d); // res = mpz_cmp_ui(mp_mod, 0L); // if(res == 0) { // big_print(stdout, d, nl, "d", NULL); // true++; // } else { // false++; // printf("False at index: %d\n", i); // } // } else { // printf("fail \n"); // fail++; // } // } // printf("TRUE: %d\n", trues); // printf("FALSE: %d\n", falses); // printf("SUCCESS: %d\n", success); // printf("FAIL: %d\n", fail); // printf("FOUND IN pro_curve_point: %d\n", success_type[0]); // printf("FOUND IN ladder: %d\n", success_type[1]); // printf("FOUND IN A24: %d\n", success_type[2]); }
9538e313db0d0395e04cecb657c0aa3e686f3bc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "DS_timer.h" #include <stdio.h> #include <iostream> #define LOOP_I(x) for(int i = 0 ; i<x; i++) #define NUM_BLOCK 128*1024 #define NUM_T_IN_B 1024 #define ARRAY_SIZE NUM_T_IN_B*NUM_BLOCK #define NUM_STREAMS 4 __global__ void myKernel(int *_in, int *_out) { int tID = blockDim.x * blockIdx.x + threadIdx.x; int temp = 0; for (int i = 0; i < 250; i++) { temp = (temp + _in[tID] * 5) % 10; } _out[tID] = temp; } void main() { DS_timer timer(5); timer.setTimerName(0, "Single Stream"); timer.setTimerName(1, "*host -> device"); timer.setTimerName(2, "*kernel execution"); timer.setTimerName(3, "*devcie -> host"); timer.setTimerName(4, "Multiple Stream"); int *in = NULL, *out = NULL, *out2 = NULL, *dIn = NULL, *dOut = NULL; hipHostMalloc(&in, sizeof(int)*ARRAY_SIZE); memset(in, 0, sizeof(int)*ARRAY_SIZE); hipHostMalloc(&out, sizeof(int)*ARRAY_SIZE); memset(out, 0, sizeof(int)*ARRAY_SIZE); hipHostMalloc(&out2, sizeof(int)*ARRAY_SIZE); memset(out2, 0, sizeof(int)*ARRAY_SIZE); hipMalloc(&dIn, sizeof(int)*ARRAY_SIZE); hipMalloc(&dOut, sizeof(int)*ARRAY_SIZE); LOOP_I(ARRAY_SIZE) in[i] = rand() % 10; //single timer.onTimer(0); timer.onTimer(1); hipMemcpy(dIn, in, sizeof(int)*ARRAY_SIZE, hipMemcpyHostToDevice); timer.offTimer(1); timer.onTimer(2); myKernel << <NUM_BLOCK, NUM_T_IN_B >> > (dIn, dOut); hipDeviceSynchronize(); timer.offTimer(2); timer.onTimer(3); hipMemcpy(out, dOut, sizeof(int)*ARRAY_SIZE, hipMemcpyDeviceToHost); timer.offTimer(3); timer.offTimer(0); hipStream_t stream[NUM_STREAMS]; LOOP_I(NUM_STREAMS) hipStreamCreate(&stream[i]); int chunkSize = ARRAY_SIZE / NUM_STREAMS; timer.onTimer(4); LOOP_I(NUM_STREAMS) { int offset = chunkSize * i; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipMemcpyAsync(dIn + offset, in + offset, sizeof(int)*chunkSize, hipMemcpyHostToDevice, stream[i]); myKernel << <NUM_BLOCK / NUM_STREAMS, NUM_T_IN_B, 0, stream[i] >> > (dIn + offset, dOut + offset); hipMemcpyAsync(out2 + offset, dOut + offset, sizeof(int)*chunkSize, hipMemcpyDeviceToHost, stream[i]); hipEventRecord(stop); hipEventSynchronize(stop); float time; hipEventElapsedTime(&time, start, stop); printf("Stream[%d] : %lf ms\n", i, time); hipEventDestroy(start); hipEventDestroy(stop); } hipDeviceSynchronize(); timer.offTimer(4); LOOP_I(NUM_STREAMS) hipStreamDestroy(stream[i]); hipFree(dIn); hipFree(dOut); hipHostFree(in); hipHostFree(out); hipHostFree(out2); timer.printTimer(); system("pause"); }
9538e313db0d0395e04cecb657c0aa3e686f3bc9.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "DS_timer.h" #include <stdio.h> #include <iostream> #define LOOP_I(x) for(int i = 0 ; i<x; i++) #define NUM_BLOCK 128*1024 #define NUM_T_IN_B 1024 #define ARRAY_SIZE NUM_T_IN_B*NUM_BLOCK #define NUM_STREAMS 4 __global__ void myKernel(int *_in, int *_out) { int tID = blockDim.x * blockIdx.x + threadIdx.x; int temp = 0; for (int i = 0; i < 250; i++) { temp = (temp + _in[tID] * 5) % 10; } _out[tID] = temp; } void main() { DS_timer timer(5); timer.setTimerName(0, "Single Stream"); timer.setTimerName(1, "*host -> device"); timer.setTimerName(2, "*kernel execution"); timer.setTimerName(3, "*devcie -> host"); timer.setTimerName(4, "Multiple Stream"); int *in = NULL, *out = NULL, *out2 = NULL, *dIn = NULL, *dOut = NULL; cudaMallocHost(&in, sizeof(int)*ARRAY_SIZE); memset(in, 0, sizeof(int)*ARRAY_SIZE); cudaMallocHost(&out, sizeof(int)*ARRAY_SIZE); memset(out, 0, sizeof(int)*ARRAY_SIZE); cudaMallocHost(&out2, sizeof(int)*ARRAY_SIZE); memset(out2, 0, sizeof(int)*ARRAY_SIZE); cudaMalloc(&dIn, sizeof(int)*ARRAY_SIZE); cudaMalloc(&dOut, sizeof(int)*ARRAY_SIZE); LOOP_I(ARRAY_SIZE) in[i] = rand() % 10; //single timer.onTimer(0); timer.onTimer(1); cudaMemcpy(dIn, in, sizeof(int)*ARRAY_SIZE, cudaMemcpyHostToDevice); timer.offTimer(1); timer.onTimer(2); myKernel << <NUM_BLOCK, NUM_T_IN_B >> > (dIn, dOut); cudaDeviceSynchronize(); timer.offTimer(2); timer.onTimer(3); cudaMemcpy(out, dOut, sizeof(int)*ARRAY_SIZE, cudaMemcpyDeviceToHost); timer.offTimer(3); timer.offTimer(0); cudaStream_t stream[NUM_STREAMS]; LOOP_I(NUM_STREAMS) cudaStreamCreate(&stream[i]); int chunkSize = ARRAY_SIZE / NUM_STREAMS; timer.onTimer(4); LOOP_I(NUM_STREAMS) { int offset = chunkSize * i; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpyAsync(dIn + offset, in + offset, sizeof(int)*chunkSize, cudaMemcpyHostToDevice, stream[i]); myKernel << <NUM_BLOCK / NUM_STREAMS, NUM_T_IN_B, 0, stream[i] >> > (dIn + offset, dOut + offset); cudaMemcpyAsync(out2 + offset, dOut + offset, sizeof(int)*chunkSize, cudaMemcpyDeviceToHost, stream[i]); cudaEventRecord(stop); cudaEventSynchronize(stop); float time; cudaEventElapsedTime(&time, start, stop); printf("Stream[%d] : %lf ms\n", i, time); cudaEventDestroy(start); cudaEventDestroy(stop); } cudaDeviceSynchronize(); timer.offTimer(4); LOOP_I(NUM_STREAMS) cudaStreamDestroy(stream[i]); cudaFree(dIn); cudaFree(dOut); cudaFreeHost(in); cudaFreeHost(out); cudaFreeHost(out2); timer.printTimer(); system("pause"); }
dfd7a2879211716ed369694d98a0b85f5dc9187e.hip
// !!! This is a file automatically generated by hipify!!! // ************************************************ // original authors: Lee Howes and David B. Thomas // ************************************************ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #include "loopback.h" #include "kernels.hip" int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: %s <dump> <repeat>\n", argv[0]); return 1; } // display device results when enabled const int dump = atoi(argv[1]); const int repeat = atoi(argv[2]); const size_t loopback_size = sizeof(float) * LOOKBACK_NUM_PARAMETER_VALUES; const size_t seed_size = sizeof(unsigned int) * TAUSWORTHE_NUM_SEEDS; float *lookback_VOL_0 = (float *) malloc(loopback_size); float *lookback_A_0 = (float *) malloc(loopback_size); float *lookback_A_1 = (float *) malloc(loopback_size); float *lookback_A_2 = (float *) malloc(loopback_size); float *lookback_S_0 = (float *) malloc(loopback_size); float *lookback_EPS_0 = (float *) malloc(loopback_size); float *lookback_MU = (float *) malloc(loopback_size); float *lookbackSimulationResultsMean = (float *) malloc(loopback_size); float *lookbackSimulationResultsVariance = (float *) malloc(loopback_size); for (unsigned i = 0; i < LOOKBACK_NUM_PARAMETER_VALUES; i++) { lookback_VOL_0[i] = Rand(); lookback_A_0[i] = Rand(); lookback_A_1[i] = Rand(); lookback_A_2[i] = Rand(); lookback_S_0[i] = Rand(); lookback_EPS_0[i] = Rand(); lookback_MU[i] = Rand(); } unsigned int *tauswortheSeeds = (unsigned int *) malloc(seed_size); for (unsigned i = 0; i < TAUSWORTHE_NUM_SEEDS; i++) tauswortheSeeds[i] = (uint)rand() + 16; float *d_lookback_VOL_0, *d_lookback_A_0, *d_lookback_A_1, *d_lookback_A_2, *d_lookback_S_0, *d_lookback_EPS_0, *d_lookback_MU; float *d_lookbackSimulationResultsMean, *d_lookbackSimulationResultsVariance; unsigned int *d_tauswortheSeeds; hipMalloc((void**) &d_tauswortheSeeds, seed_size); hipMalloc((void**) &d_lookback_VOL_0, loopback_size); hipMalloc((void**) &d_lookback_A_0, loopback_size); hipMalloc((void**) &d_lookback_A_1, loopback_size); hipMalloc((void**) &d_lookback_A_2, loopback_size); hipMalloc((void**) &d_lookback_S_0, loopback_size); hipMalloc((void**) &d_lookback_EPS_0, loopback_size); hipMalloc((void**) &d_lookback_MU, loopback_size); hipMalloc((void**) &d_lookbackSimulationResultsMean, loopback_size); hipMalloc((void**) &d_lookbackSimulationResultsVariance, loopback_size); hipMemcpy(d_tauswortheSeeds, tauswortheSeeds, seed_size, hipMemcpyHostToDevice); hipMemcpy(d_lookback_VOL_0, lookback_VOL_0, loopback_size, hipMemcpyHostToDevice); hipMemcpy(d_lookback_A_0, lookback_A_0, loopback_size, hipMemcpyHostToDevice); hipMemcpy(d_lookback_A_1, lookback_A_1, loopback_size, hipMemcpyHostToDevice); hipMemcpy(d_lookback_A_2, lookback_A_2, loopback_size, hipMemcpyHostToDevice); hipMemcpy(d_lookback_S_0, lookback_S_0, loopback_size, hipMemcpyHostToDevice); hipMemcpy(d_lookback_EPS_0, lookback_EPS_0, loopback_size, hipMemcpyHostToDevice); hipMemcpy(d_lookback_MU, lookback_MU, loopback_size, hipMemcpyHostToDevice); // Execute the Tausworthe version of the lookback option dim3 grid (LOOKBACK_TAUSWORTHE_NUM_BLOCKS, 1, 1); dim3 threads (LOOKBACK_TAUSWORTHE_NUM_THREADS, 1, 1); const unsigned num_cycles = LOOKBACK_MAX_T; hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( tausworthe_lookback) , dim3(grid), dim3(threads) , 0, 0, num_cycles, d_tauswortheSeeds, d_lookbackSimulationResultsMean, d_lookbackSimulationResultsVariance, d_lookback_VOL_0, d_lookback_EPS_0, d_lookback_A_0, d_lookback_A_1, d_lookback_A_2, d_lookback_S_0, d_lookback_MU); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / repeat); hipMemcpy(lookbackSimulationResultsMean, d_lookbackSimulationResultsMean, loopback_size, hipMemcpyDeviceToHost); hipMemcpy(lookbackSimulationResultsVariance, d_lookbackSimulationResultsVariance, loopback_size, hipMemcpyDeviceToHost); if (dump) { for (unsigned i = 0; i < LOOKBACK_NUM_PARAMETER_VALUES; i++) printf("%d %.3f %.3f\n", i, lookbackSimulationResultsMean[i], lookbackSimulationResultsVariance[i]); } free(lookback_VOL_0); free(lookback_A_0); free(lookback_A_1); free(lookback_A_2); free(lookback_S_0); free(lookback_EPS_0); free(lookback_MU); free(lookbackSimulationResultsMean); free(lookbackSimulationResultsVariance); free(tauswortheSeeds); hipFree(d_tauswortheSeeds); hipFree(d_lookback_VOL_0); hipFree(d_lookback_A_0); hipFree(d_lookback_A_1); hipFree(d_lookback_A_2); hipFree(d_lookback_S_0); hipFree(d_lookback_EPS_0); hipFree(d_lookback_MU); hipFree(d_lookbackSimulationResultsMean); hipFree(d_lookbackSimulationResultsVariance); return 0; }
dfd7a2879211716ed369694d98a0b85f5dc9187e.cu
// ************************************************ // original authors: Lee Howes and David B. Thomas // ************************************************ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <cuda.h> #include "loopback.h" #include "kernels.cu" int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: %s <dump> <repeat>\n", argv[0]); return 1; } // display device results when enabled const int dump = atoi(argv[1]); const int repeat = atoi(argv[2]); const size_t loopback_size = sizeof(float) * LOOKBACK_NUM_PARAMETER_VALUES; const size_t seed_size = sizeof(unsigned int) * TAUSWORTHE_NUM_SEEDS; float *lookback_VOL_0 = (float *) malloc(loopback_size); float *lookback_A_0 = (float *) malloc(loopback_size); float *lookback_A_1 = (float *) malloc(loopback_size); float *lookback_A_2 = (float *) malloc(loopback_size); float *lookback_S_0 = (float *) malloc(loopback_size); float *lookback_EPS_0 = (float *) malloc(loopback_size); float *lookback_MU = (float *) malloc(loopback_size); float *lookbackSimulationResultsMean = (float *) malloc(loopback_size); float *lookbackSimulationResultsVariance = (float *) malloc(loopback_size); for (unsigned i = 0; i < LOOKBACK_NUM_PARAMETER_VALUES; i++) { lookback_VOL_0[i] = Rand(); lookback_A_0[i] = Rand(); lookback_A_1[i] = Rand(); lookback_A_2[i] = Rand(); lookback_S_0[i] = Rand(); lookback_EPS_0[i] = Rand(); lookback_MU[i] = Rand(); } unsigned int *tauswortheSeeds = (unsigned int *) malloc(seed_size); for (unsigned i = 0; i < TAUSWORTHE_NUM_SEEDS; i++) tauswortheSeeds[i] = (uint)rand() + 16; float *d_lookback_VOL_0, *d_lookback_A_0, *d_lookback_A_1, *d_lookback_A_2, *d_lookback_S_0, *d_lookback_EPS_0, *d_lookback_MU; float *d_lookbackSimulationResultsMean, *d_lookbackSimulationResultsVariance; unsigned int *d_tauswortheSeeds; cudaMalloc((void**) &d_tauswortheSeeds, seed_size); cudaMalloc((void**) &d_lookback_VOL_0, loopback_size); cudaMalloc((void**) &d_lookback_A_0, loopback_size); cudaMalloc((void**) &d_lookback_A_1, loopback_size); cudaMalloc((void**) &d_lookback_A_2, loopback_size); cudaMalloc((void**) &d_lookback_S_0, loopback_size); cudaMalloc((void**) &d_lookback_EPS_0, loopback_size); cudaMalloc((void**) &d_lookback_MU, loopback_size); cudaMalloc((void**) &d_lookbackSimulationResultsMean, loopback_size); cudaMalloc((void**) &d_lookbackSimulationResultsVariance, loopback_size); cudaMemcpy(d_tauswortheSeeds, tauswortheSeeds, seed_size, cudaMemcpyHostToDevice); cudaMemcpy(d_lookback_VOL_0, lookback_VOL_0, loopback_size, cudaMemcpyHostToDevice); cudaMemcpy(d_lookback_A_0, lookback_A_0, loopback_size, cudaMemcpyHostToDevice); cudaMemcpy(d_lookback_A_1, lookback_A_1, loopback_size, cudaMemcpyHostToDevice); cudaMemcpy(d_lookback_A_2, lookback_A_2, loopback_size, cudaMemcpyHostToDevice); cudaMemcpy(d_lookback_S_0, lookback_S_0, loopback_size, cudaMemcpyHostToDevice); cudaMemcpy(d_lookback_EPS_0, lookback_EPS_0, loopback_size, cudaMemcpyHostToDevice); cudaMemcpy(d_lookback_MU, lookback_MU, loopback_size, cudaMemcpyHostToDevice); // Execute the Tausworthe version of the lookback option dim3 grid (LOOKBACK_TAUSWORTHE_NUM_BLOCKS, 1, 1); dim3 threads (LOOKBACK_TAUSWORTHE_NUM_THREADS, 1, 1); const unsigned num_cycles = LOOKBACK_MAX_T; cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { tausworthe_lookback <<< grid, threads >>> ( num_cycles, d_tauswortheSeeds, d_lookbackSimulationResultsMean, d_lookbackSimulationResultsVariance, d_lookback_VOL_0, d_lookback_EPS_0, d_lookback_A_0, d_lookback_A_1, d_lookback_A_2, d_lookback_S_0, d_lookback_MU); } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / repeat); cudaMemcpy(lookbackSimulationResultsMean, d_lookbackSimulationResultsMean, loopback_size, cudaMemcpyDeviceToHost); cudaMemcpy(lookbackSimulationResultsVariance, d_lookbackSimulationResultsVariance, loopback_size, cudaMemcpyDeviceToHost); if (dump) { for (unsigned i = 0; i < LOOKBACK_NUM_PARAMETER_VALUES; i++) printf("%d %.3f %.3f\n", i, lookbackSimulationResultsMean[i], lookbackSimulationResultsVariance[i]); } free(lookback_VOL_0); free(lookback_A_0); free(lookback_A_1); free(lookback_A_2); free(lookback_S_0); free(lookback_EPS_0); free(lookback_MU); free(lookbackSimulationResultsMean); free(lookbackSimulationResultsVariance); free(tauswortheSeeds); cudaFree(d_tauswortheSeeds); cudaFree(d_lookback_VOL_0); cudaFree(d_lookback_A_0); cudaFree(d_lookback_A_1); cudaFree(d_lookback_A_2); cudaFree(d_lookback_S_0); cudaFree(d_lookback_EPS_0); cudaFree(d_lookback_MU); cudaFree(d_lookbackSimulationResultsMean); cudaFree(d_lookbackSimulationResultsVariance); return 0; }
c0160249ab3c2815dac047d21570919e069f2b06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Netherlands eScience Center * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <math.h> #define NSTREAMS 256 // 256 #define TOTAL_SIZE (1 << 27) //1 GB of doubles #define ITERATIONS 10 //modes for testing #define HOSTTODEVICE 1 #define DEVICETOHOST 2 #define HYBRID 3 #define CUDA_CHECK_ERROR(errorMessage) do { \ hipError_t err = hipGetLastError(); \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, hipGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ err = hipDeviceSynchronize(); \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, hipGetErrorString( err) );\ exit(EXIT_FAILURE); \ } } while (0) extern "C" { void start_timer (); void stop_timer (float *); void measure (int size, int mode); __global__ void mappedMemoryCopy (double *dst, double *src, int n); } hipStream_t stream[NSTREAMS]; double *hostptr = 0; double *devptr = 0; double Lo; double G; double g; int main () { hipError_t err; int k; hipSetDeviceFlags (hipDeviceMapHost); hipSetDevice (0); hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After device initialization"); //setup streams for (k = 0; k < NSTREAMS; k++) { err = hipStreamCreate (&stream[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipStreamCreate: %s\n", hipGetErrorString (err)); } } err = hipHostMalloc ((void **) &hostptr, TOTAL_SIZE * sizeof (double), hipHostMallocMapped); if (err != hipSuccess) { fprintf (stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString (err)); } //fill the memory for (int i = 0; i < TOTAL_SIZE; i++) { hostptr[i] = (i & 0xff) * 1.0; } err = hipMalloc ((void **) &devptr, TOTAL_SIZE * sizeof (double)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc: %s\n", hipGetErrorString (err)); } hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After memory setup"); printf ("Measure HostToDevice\n"); measure (TOTAL_SIZE, HOSTTODEVICE); printf ("Measure DeviceToHost\n"); measure (TOTAL_SIZE, DEVICETOHOST); printf ("Measure Hybrid\n"); measure (TOTAL_SIZE, HYBRID); //clean up hipHostFree (hostptr); hipFree (devptr); } void measure (int size, int mode) { hipError_t err; int i, k; //setup timers and parameters float time; float total_time = 0.0f; int nStreams; int elementsPerStream; //setup grids for kernel launches dim3 single_block (1, 1); dim3 single_thread (1, 1); dim3 threads (256, 1); dim3 grid (1, 1); int max_blocks = (64 * 1024) - 1; int blocks = (int) ceilf ((float) size / (float) 256.0); grid.x = min (blocks, max_blocks); grid.y = (int) ceilf ((float) blocks / (float) max_blocks); hipDeviceSynchronize (); CUDA_CHECK_ERROR ("Before starting new measurement"); //First do some transfers to wake up the device nStreams = 8; elementsPerStream = TOTAL_SIZE / nStreams; hipDeviceSynchronize (); for (i = 0; i < ITERATIONS; i++) { for (k = 0; k < nStreams; k++) { err = hipMemcpyAsync (devptr + k * elementsPerStream, hostptr + k * elementsPerStream, elementsPerStream * sizeof (double), hipMemcpyHostToDevice, stream[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device: %s\n", hipGetErrorString (err)); } err = hipMemcpyAsync (hostptr + k * elementsPerStream, devptr + k * elementsPerStream, elementsPerStream * sizeof (double), hipMemcpyDeviceToHost, stream[k + 1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device: %s\n", hipGetErrorString (err)); } } } hipDeviceSynchronize (); //Now estimate model parameters L+o, G, and g //estimate L+o using a very small message, thus assuming kG = 0. for (i = 0; i < ITERATIONS; i++) { hipDeviceSynchronize (); start_timer (); if (mode == HOSTTODEVICE) { hipMemcpyAsync (devptr, hostptr, 1 * sizeof (double), hipMemcpyHostToDevice, stream[1]); } else if (mode == DEVICETOHOST) { hipMemcpyAsync (hostptr, devptr, 1 * sizeof (double), hipMemcpyDeviceToHost, stream[1]); } else if (mode == HYBRID) { hipLaunchKernelGGL(( mappedMemoryCopy) , dim3(single_block), dim3(single_thread), 0, stream[1] , hostptr, devptr, 1); } hipDeviceSynchronize (); stop_timer (&time); total_time += time; } Lo = (double) total_time / (ITERATIONS); printf ("L+o=%.10f\n", Lo); total_time = 0.0; //Now estimate G nStreams = 1; if (mode == HYBRID) { nStreams = 128; max_blocks = (64 * 1024) - 1; blocks = (int) ceilf ((int) ceilf ((float) size / (float) nStreams) / (float) 256.0); grid.x = min (blocks, max_blocks); grid.y = (int) ceilf ((float) blocks / (float) max_blocks); } long long totalElements = (long long) 0; double sum_time = 0.0; for (int j = 0; j < 10; j++) { elementsPerStream = size / nStreams; totalElements += elementsPerStream * nStreams; for (i = 0; i < ITERATIONS; i++) { hipDeviceSynchronize (); start_timer (); for (k = 0; k < nStreams; k++) { if (mode == HOSTTODEVICE) { err = hipMemcpyAsync (devptr + k * elementsPerStream, hostptr + k * elementsPerStream, elementsPerStream * sizeof (double), hipMemcpyHostToDevice, stream[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device: %s\n", hipGetErrorString (err)); } } else if (mode == DEVICETOHOST) { err = hipMemcpyAsync (hostptr + k * elementsPerStream, devptr + k * elementsPerStream, elementsPerStream * sizeof (double), hipMemcpyDeviceToHost, stream[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device: %s\n", hipGetErrorString (err)); } } else if (mode == HYBRID) { err = hipMemcpyAsync (devptr + k * elementsPerStream, hostptr + k * elementsPerStream, elementsPerStream * sizeof (double), hipMemcpyHostToDevice, stream[k + 1]); if (err != hipSuccess) fprintf (stderr, "Error in hipMemcpy host to device: %s\n", hipGetErrorString (err)); hipLaunchKernelGGL(( mappedMemoryCopy) , dim3(grid), dim3(threads), 0, stream[k] , hostptr + k * elementsPerStream, devptr + k * elementsPerStream, elementsPerStream); } } hipDeviceSynchronize (); stop_timer (&time); total_time += time; } sum_time += (double) total_time / (ITERATIONS); total_time = 0.0; } double wG = sum_time - (10 * ITERATIONS) * Lo; if (mode == HYBRID) { wG = wG - (10 * ITERATIONS) * (nStreams - 1) * g; } double w = (double) totalElements * (double) sizeof (double); G = wG / w; printf ("G=%20.17e, G=%.10f, BW=%.6f MB/s\n", G, G, (1000.0 / G) / (1 << 20)); //Now estimate g if (mode == HYBRID) { return; } //don't measure g for hybrid nStreams = 32; elementsPerStream = size / nStreams; g = 0.0; for (i = 0; i < ITERATIONS; i++) { hipDeviceSynchronize (); start_timer (); for (k = 0; k < nStreams; k++) { if (mode == HOSTTODEVICE) { err = hipMemcpyAsync (devptr + k * elementsPerStream, hostptr + k * elementsPerStream, elementsPerStream * sizeof (double), hipMemcpyHostToDevice, stream[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device: %s\n", hipGetErrorString (err)); } } else if (mode == DEVICETOHOST) { err = hipMemcpyAsync (hostptr + k * elementsPerStream, devptr + k * elementsPerStream, elementsPerStream * sizeof (double), hipMemcpyDeviceToHost, stream[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device: %s\n", hipGetErrorString (err)); } } } hipDeviceSynchronize (); stop_timer (&time); total_time += time; } //L+o+(wG*nStreams)+(g*(nStreams-1)) = float g_time = total_time / (float) (ITERATIONS); total_time = 0.0; //-(L+o) double tmp = (double) g_time - Lo; //-(wG*nStreams) tmp = tmp - (G * (double) (size * sizeof (double))); //= (g*(nStreams-1)) g = tmp / (double) (nStreams - 1); printf ("g=%f\n", g); } __global__ void mappedMemoryCopy (double *dst, double *src, int n) { //obtain index int i = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { double temp = src[i]; dst[i] = temp; } }
c0160249ab3c2815dac047d21570919e069f2b06.cu
/* * Copyright 2014 Netherlands eScience Center * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <math.h> #define NSTREAMS 256 // 256 #define TOTAL_SIZE (1 << 27) //1 GB of doubles #define ITERATIONS 10 //modes for testing #define HOSTTODEVICE 1 #define DEVICETOHOST 2 #define HYBRID 3 #define CUDA_CHECK_ERROR(errorMessage) do { \ cudaError_t err = cudaGetLastError(); \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ err = cudaThreadSynchronize(); \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) );\ exit(EXIT_FAILURE); \ } } while (0) extern "C" { void start_timer (); void stop_timer (float *); void measure (int size, int mode); __global__ void mappedMemoryCopy (double *dst, double *src, int n); } cudaStream_t stream[NSTREAMS]; double *hostptr = 0; double *devptr = 0; double Lo; double G; double g; int main () { cudaError_t err; int k; cudaSetDeviceFlags (cudaDeviceMapHost); cudaSetDevice (0); cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After device initialization"); //setup streams for (k = 0; k < NSTREAMS; k++) { err = cudaStreamCreate (&stream[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaStreamCreate: %s\n", cudaGetErrorString (err)); } } err = cudaHostAlloc ((void **) &hostptr, TOTAL_SIZE * sizeof (double), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err)); } //fill the memory for (int i = 0; i < TOTAL_SIZE; i++) { hostptr[i] = (i & 0xff) * 1.0; } err = cudaMalloc ((void **) &devptr, TOTAL_SIZE * sizeof (double)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc: %s\n", cudaGetErrorString (err)); } cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After memory setup"); printf ("Measure HostToDevice\n"); measure (TOTAL_SIZE, HOSTTODEVICE); printf ("Measure DeviceToHost\n"); measure (TOTAL_SIZE, DEVICETOHOST); printf ("Measure Hybrid\n"); measure (TOTAL_SIZE, HYBRID); //clean up cudaFreeHost (hostptr); cudaFree (devptr); } void measure (int size, int mode) { cudaError_t err; int i, k; //setup timers and parameters float time; float total_time = 0.0f; int nStreams; int elementsPerStream; //setup grids for kernel launches dim3 single_block (1, 1); dim3 single_thread (1, 1); dim3 threads (256, 1); dim3 grid (1, 1); int max_blocks = (64 * 1024) - 1; int blocks = (int) ceilf ((float) size / (float) 256.0); grid.x = min (blocks, max_blocks); grid.y = (int) ceilf ((float) blocks / (float) max_blocks); cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("Before starting new measurement"); //First do some transfers to wake up the device nStreams = 8; elementsPerStream = TOTAL_SIZE / nStreams; cudaDeviceSynchronize (); for (i = 0; i < ITERATIONS; i++) { for (k = 0; k < nStreams; k++) { err = cudaMemcpyAsync (devptr + k * elementsPerStream, hostptr + k * elementsPerStream, elementsPerStream * sizeof (double), cudaMemcpyHostToDevice, stream[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device: %s\n", cudaGetErrorString (err)); } err = cudaMemcpyAsync (hostptr + k * elementsPerStream, devptr + k * elementsPerStream, elementsPerStream * sizeof (double), cudaMemcpyDeviceToHost, stream[k + 1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device: %s\n", cudaGetErrorString (err)); } } } cudaDeviceSynchronize (); //Now estimate model parameters L+o, G, and g //estimate L+o using a very small message, thus assuming kG = 0. for (i = 0; i < ITERATIONS; i++) { cudaDeviceSynchronize (); start_timer (); if (mode == HOSTTODEVICE) { cudaMemcpyAsync (devptr, hostptr, 1 * sizeof (double), cudaMemcpyHostToDevice, stream[1]); } else if (mode == DEVICETOHOST) { cudaMemcpyAsync (hostptr, devptr, 1 * sizeof (double), cudaMemcpyDeviceToHost, stream[1]); } else if (mode == HYBRID) { mappedMemoryCopy <<< single_block, single_thread, 0, stream[1] >>> (hostptr, devptr, 1); } cudaDeviceSynchronize (); stop_timer (&time); total_time += time; } Lo = (double) total_time / (ITERATIONS); printf ("L+o=%.10f\n", Lo); total_time = 0.0; //Now estimate G nStreams = 1; if (mode == HYBRID) { nStreams = 128; max_blocks = (64 * 1024) - 1; blocks = (int) ceilf ((int) ceilf ((float) size / (float) nStreams) / (float) 256.0); grid.x = min (blocks, max_blocks); grid.y = (int) ceilf ((float) blocks / (float) max_blocks); } long long totalElements = (long long) 0; double sum_time = 0.0; for (int j = 0; j < 10; j++) { elementsPerStream = size / nStreams; totalElements += elementsPerStream * nStreams; for (i = 0; i < ITERATIONS; i++) { cudaDeviceSynchronize (); start_timer (); for (k = 0; k < nStreams; k++) { if (mode == HOSTTODEVICE) { err = cudaMemcpyAsync (devptr + k * elementsPerStream, hostptr + k * elementsPerStream, elementsPerStream * sizeof (double), cudaMemcpyHostToDevice, stream[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device: %s\n", cudaGetErrorString (err)); } } else if (mode == DEVICETOHOST) { err = cudaMemcpyAsync (hostptr + k * elementsPerStream, devptr + k * elementsPerStream, elementsPerStream * sizeof (double), cudaMemcpyDeviceToHost, stream[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device: %s\n", cudaGetErrorString (err)); } } else if (mode == HYBRID) { err = cudaMemcpyAsync (devptr + k * elementsPerStream, hostptr + k * elementsPerStream, elementsPerStream * sizeof (double), cudaMemcpyHostToDevice, stream[k + 1]); if (err != cudaSuccess) fprintf (stderr, "Error in cudaMemcpy host to device: %s\n", cudaGetErrorString (err)); mappedMemoryCopy <<< grid, threads, 0, stream[k] >>> (hostptr + k * elementsPerStream, devptr + k * elementsPerStream, elementsPerStream); } } cudaDeviceSynchronize (); stop_timer (&time); total_time += time; } sum_time += (double) total_time / (ITERATIONS); total_time = 0.0; } double wG = sum_time - (10 * ITERATIONS) * Lo; if (mode == HYBRID) { wG = wG - (10 * ITERATIONS) * (nStreams - 1) * g; } double w = (double) totalElements * (double) sizeof (double); G = wG / w; printf ("G=%20.17e, G=%.10f, BW=%.6f MB/s\n", G, G, (1000.0 / G) / (1 << 20)); //Now estimate g if (mode == HYBRID) { return; } //don't measure g for hybrid nStreams = 32; elementsPerStream = size / nStreams; g = 0.0; for (i = 0; i < ITERATIONS; i++) { cudaDeviceSynchronize (); start_timer (); for (k = 0; k < nStreams; k++) { if (mode == HOSTTODEVICE) { err = cudaMemcpyAsync (devptr + k * elementsPerStream, hostptr + k * elementsPerStream, elementsPerStream * sizeof (double), cudaMemcpyHostToDevice, stream[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device: %s\n", cudaGetErrorString (err)); } } else if (mode == DEVICETOHOST) { err = cudaMemcpyAsync (hostptr + k * elementsPerStream, devptr + k * elementsPerStream, elementsPerStream * sizeof (double), cudaMemcpyDeviceToHost, stream[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device: %s\n", cudaGetErrorString (err)); } } } cudaDeviceSynchronize (); stop_timer (&time); total_time += time; } //L+o+(wG*nStreams)+(g*(nStreams-1)) = float g_time = total_time / (float) (ITERATIONS); total_time = 0.0; //-(L+o) double tmp = (double) g_time - Lo; //-(wG*nStreams) tmp = tmp - (G * (double) (size * sizeof (double))); //= (g*(nStreams-1)) g = tmp / (double) (nStreams - 1); printf ("g=%f\n", g); } __global__ void mappedMemoryCopy (double *dst, double *src, int n) { //obtain index int i = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { double temp = src[i]; dst[i] = temp; } }
1313524f9b35ccec50ef4acab6acd83858358ef9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/loss/tv_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/format.hpp" namespace caffe { static __global__ void tvloss_forward(int count, int channels, int height, int width, const float * data, float *loss) { CUDA_KERNEL_LOOP(i, count) { int h = i / width % height; int w = i % width; float per_loss = 0; if (w < width - 1) per_loss += (data[i] - data[i+1]) * (data[i] - data[i+1]); if (h < height - 1) per_loss += (data[i] - data[i+width]) * (data[i] - data[i+width]); loss[i] = per_loss; } } static __global__ void tvloss_backward(int count, int channels, int height, int width, const float * data, float *diff) { CUDA_KERNEL_LOOP(i, count) { int h = i / width % height; int w = i % width; float per_diff = 0; if (w < width-1) per_diff += 2*(data[i] - data[i+1]); if (w > 0) per_diff -= 2*(data[i-1] - data[i]); if (h < height-1) per_diff += 2*(data[i] - data[i+width]); if (h > 0) per_diff -= 2*(data[i-width] - data[i]); diff[i] = per_diff; } } void TVLossLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); hipLaunchKernelGGL(( tvloss_forward), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->count(),channels,height,width,bottom[0]->gpu_data(),loss_.mutable_gpu_data()); float loss; caffe_gpu_asum(bottom[0]->count(),loss_.gpu_data(),&loss); top[0]->mutable_cpu_data()[0] = loss / float(bottom[0]->count()); } void TVLossLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); hipLaunchKernelGGL(( tvloss_backward), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->count(),channels,height,width,bottom[0]->gpu_data(),bottom[0]->mutable_gpu_diff()); float loss_weight_ = top[0]->cpu_diff()[0] / float(bottom[0]->count()); caffe_gpu_scal(bottom[0]->count(),loss_weight_,bottom[0]->mutable_gpu_diff()); } void TVLossLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { } } // namespace caffe
1313524f9b35ccec50ef4acab6acd83858358ef9.cu
#include <vector> #include "caffe/layers/loss/tv_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/format.hpp" namespace caffe { static __global__ void tvloss_forward(int count, int channels, int height, int width, const float * data, float *loss) { CUDA_KERNEL_LOOP(i, count) { int h = i / width % height; int w = i % width; float per_loss = 0; if (w < width - 1) per_loss += (data[i] - data[i+1]) * (data[i] - data[i+1]); if (h < height - 1) per_loss += (data[i] - data[i+width]) * (data[i] - data[i+width]); loss[i] = per_loss; } } static __global__ void tvloss_backward(int count, int channels, int height, int width, const float * data, float *diff) { CUDA_KERNEL_LOOP(i, count) { int h = i / width % height; int w = i % width; float per_diff = 0; if (w < width-1) per_diff += 2*(data[i] - data[i+1]); if (w > 0) per_diff -= 2*(data[i-1] - data[i]); if (h < height-1) per_diff += 2*(data[i] - data[i+width]); if (h > 0) per_diff -= 2*(data[i-width] - data[i]); diff[i] = per_diff; } } void TVLossLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); tvloss_forward<<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>> (bottom[0]->count(),channels,height,width,bottom[0]->gpu_data(),loss_.mutable_gpu_data()); float loss; caffe_gpu_asum(bottom[0]->count(),loss_.gpu_data(),&loss); top[0]->mutable_cpu_data()[0] = loss / float(bottom[0]->count()); } void TVLossLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); tvloss_backward<<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>> (bottom[0]->count(),channels,height,width,bottom[0]->gpu_data(),bottom[0]->mutable_gpu_diff()); float loss_weight_ = top[0]->cpu_diff()[0] / float(bottom[0]->count()); caffe_gpu_scal(bottom[0]->count(),loss_weight_,bottom[0]->mutable_gpu_diff()); } void TVLossLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { } } // namespace caffe
b5946db98240373f54350663499e7b8ea7a20695.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2012-2013 Indian Institute of Technology Kanpur. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY INDIAN INSTITUTE OF TECHNOLOGY KANPUR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INDIAN INSTITUTE OF TECHNOLOGY KANPUR OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Indian Institute of Technology Kanpur. */ /********************************************************************************** Lock-free priority queue for CUDA; tested for CUDA 4.2 on 32-bit Ubuntu 10.10 and 64-bit Ubuntu 12.04. Developed at IIT Kanpur. Inputs: Percentage of add operations (e.g., 30 for 30% add) Output: Prints the total time (in milliseconds) to execute the the sequence of operations Compilation flags: -O3 -arch sm_20 -I ~/NVIDIA_GPU_Computing_SDK/C/common/inc/ -DNUM_ITEMS=num_ops -DFACTOR=num_ops_per_thread -DKEYS=num_keys NUM_ITEMS is the total number of operations (mix of add and deleteMin) to execute. FACTOR is the number of operations per thread. KEYS is the number of integer keys assumed in the range [10, 9+KEYS]. The paper cited below states that the key range is [0, KEYS-1]. However, we have shifted the range by +10 so that the head sentinel key (the minimum key) can be chosen as zero. Any positive shift other than +10 would also work. The include path ~/NVIDIA_GPU_Computing_SDK/C/common/inc/ is needed for cutil.h. Related work: Prabhakar Misra and Mainak Chaudhuri. Performance Evaluation of Concurrent Lock-free Data Structures on GPUs. In Proceedings of the 18th IEEE International Conference on Parallel and Distributed Systems, December 2012. ***************************************************************************************/ #include"cutil.h" // Comment this if cutil.h is not available #include"cuda_runtime.h" #include"stdio.h" #include"stdlib.h" #include"time.h" #include"assert.h" #if __WORDSIZE == 64 typedef unsigned long long LL; #else typedef unsigned int LL; #endif // Maximum level of a node in the skip list #define MAX_LEVEL 32 // Number of threads per block #define NUM_THREADS 512 // Supported operations #define ADD (0) #define DELETE_MIN (1) class Node; // Definition of generic node class class __attribute__((aligned (16))) Node { public: int topLevel; // Level of the node LL priority; // Priority value of the node (this is the key value) LL marked; // Special mark field used by DeleteMin LL next[MAX_LEVEL+1]; // Array of next links // Create a next field from a reference and mark bit __device__ __host__ LL CreateRef( Node* ref, bool mark) { LL val=(LL)ref; val=val|mark; return val; } __device__ __host__ void SetRef(int index, Node* ref, bool mark) { next[index]=CreateRef(ref, mark); } // Extract the reference from a next field __device__ Node* GetReference(int index) { LL ref=next[index]; return (Node*)((ref>>1)<<1); } // Extract the reference and mark bit from a next field __device__ Node* Get(int index, bool* marked) { marked[0]=next[index]%2; return (Node*)((next[index]>>1)<<1); } // CompareAndSet wrapper __device__ bool CompareAndSet(int index, Node* expectedRef, Node* newRef, bool oldMark, bool newMark) { LL oldVal = (LL)expectedRef|oldMark; LL newVal = (LL)newRef|newMark; LL oldValOut=atomicCAS(&(next[index]), oldVal, newVal); if (oldValOut==oldVal) return true; return false; } // Constructor for sentinel nodes Node(LL k) { priority=k; topLevel=MAX_LEVEL; marked =0; int i; for(i=0;i<MAX_LEVEL+1;i++){ next[i]=CreateRef((Node*)NULL, false); } } }; // Definition of a lock-free skip list class PrioritySkipList { public: Node* head; // Head sentinel Node* tail; // Tail sentinel PrioritySkipList() { Node* h=new Node(0); #if __WORDSIZE == 64 Node* t=new Node((LL)0xffffffffffffffff); #else Node* t=new Node((LL)0xffffffff); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&head, sizeof(Node))); #else hipMalloc((void**)&head, sizeof(Node)); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&tail, sizeof(Node))); #else hipMalloc((void**)&tail, sizeof(Node)); #endif int i; for(i=0;i<h->topLevel+1;i++){ h->SetRef(i, tail, false); } #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(head, h, sizeof(Node), hipMemcpyHostToDevice)); #else hipMemcpy(head, h, sizeof(Node), hipMemcpyHostToDevice); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(tail, t, sizeof(Node), hipMemcpyHostToDevice)); #else hipMemcpy(tail, t, sizeof(Node), hipMemcpyHostToDevice); #endif } // Method used by DeleteMin __device__ Node* FindAndMarkMin() { Node* curr =NULL; curr=head->GetReference(0); while(curr!=tail){ if(!curr->marked){ if(0==atomicCAS((LL*)&(curr->marked), 0, 1)) return curr; } else curr=curr->GetReference(0); } return NULL; } __device__ bool find(LL, Node**, Node**); // Helping method __device__ bool Add(Node*); __device__ bool Delete(LL); }; __device__ Node** nodes; // Pool of pre-allocated nodes __device__ unsigned int pointerIndex=0; // Index into pool of free nodes __device__ LL* randoms; // Array storing the levels of the nodes in the free pool // Function for creating a new node when requested by an add operation __device__ Node* GetNewNode(LL priority) { LL ind=atomicInc(&pointerIndex, NUM_ITEMS); Node* n=nodes[ind]; n->marked=0; n->priority=priority; n->topLevel=randoms[ind]; int i; for(i=0;i<n->topLevel+1;i++){ n->SetRef(i, NULL, false); } return n; } // Definition of a priority queue based on a lock-free skip list class SkipQueue { public: PrioritySkipList* skipList; // The priority queue SkipQueue(){ skipList=NULL; } // The method for adding new nodes __device__ bool Add(LL item) { Node* newNode=GetNewNode(item); return skipList->Add(newNode); } // The method for deleting the minimum __device__ LL DeleteMin() { Node* n=skipList->FindAndMarkMin(); if(n!=NULL){ skipList->Delete(n->priority); return n->priority; } return 0; } }; __device__ SkipQueue* l; // The lock-free priority queue // Kernel for initializing device memory __global__ void init(SkipQueue* l1, Node** n, LL* levels) { randoms=levels; nodes=n; l=l1; } // Find the window of node with key=priority // On the way clean up logically deleted nodes (those with set marked bit) __device__ bool PrioritySkipList::find(LL priority, Node** preds, Node** succs) { // preds and succs are arrays of pointers int bottomLevel=0; bool marked[]={false}; bool snip; Node* pred=NULL; Node* curr=NULL; Node* succ=NULL; bool beenThereDoneThat; while(true){ beenThereDoneThat = false; pred=head; int level; for(level=MAX_LEVEL;level>=bottomLevel;level--){ curr=pred->GetReference(level); while(true){ succ=curr->Get(level, marked); while(marked[0]){ snip=pred->CompareAndSet(level, curr, succ, false, false); beenThereDoneThat = true; if(!snip) break; curr=pred->GetReference(level); succ=curr->Get(level, marked); beenThereDoneThat = false; } if (beenThereDoneThat && !snip) break; if(curr->priority<priority){ pred=curr; curr=succ; } else{ break; } } if (beenThereDoneThat && !snip) break; preds[level]=pred; succs[level]=curr; } if (beenThereDoneThat && !snip) continue; return((curr->priority==priority)); } } // Called by DeleteMin __device__ bool PrioritySkipList::Delete(LL priority) { int bottomLevel=0; Node* preds[MAX_LEVEL+1]; Node* succs[MAX_LEVEL+1]; Node* succ; bool marked[]={false}; while(true){ bool found=find(priority, preds, succs); if(!found){ return false; } else{ Node* nodeToDelete=succs[bottomLevel]; int level; for(level=nodeToDelete->topLevel;level>=bottomLevel+1;level--){ succ=nodeToDelete->Get(level, marked); while(marked[0]==false){ nodeToDelete->CompareAndSet(level, succ, succ, false, true); succ=nodeToDelete->Get(level, marked); } } succ=nodeToDelete->Get(bottomLevel, marked); while(true){ bool iMarkedIt=nodeToDelete->CompareAndSet(bottomLevel, succ, succ, false, true); succ=succs[bottomLevel]->Get(bottomLevel, marked); if(iMarkedIt==true){ find(priority, preds, succs); return true; } else if(marked[0]==true){ return false; } } } } } __device__ bool PrioritySkipList::Add(Node* newNode) { LL priority=newNode->priority; int topLevel=newNode->topLevel; int bottomLevel=0; Node* preds[MAX_LEVEL+1]; Node* succs[MAX_LEVEL+1]; while(true){ bool found=find(priority, preds, succs); if(found){ return false; } else{ int level; for(level=bottomLevel;level<=topLevel;level++){ Node* succ=succs[level]; newNode->SetRef(level, succ, false); } Node* pred=preds[bottomLevel]; Node* succ=succs[bottomLevel]; bool t; t=pred->CompareAndSet(bottomLevel, succ, newNode, false, false); if(!t){ continue; } for(level=bottomLevel+1;level<=topLevel;level++){ while(true){ pred=preds[level]; succ=succs[level]; if(pred->CompareAndSet(level, succ, newNode, false, false)){ break; } find(priority, preds, succs); } } return true; } } } // The main kernel __global__ void kernel(LL* items, LL* op, LL* result) { // The array items holds the sequence of keys // The array op holds the sequence of operations // The array result, at the end, will hold the outcome of the operations int tid,i; for(i=0;i<FACTOR;i++){ // FACTOR is the number of operations per thread tid=i*gridDim.x*blockDim.x+blockIdx.x*blockDim.x+threadIdx.x; if(tid>NUM_ITEMS) return; // Grab the operation and the associated key and execute if(op[tid]==ADD){ result[tid]=l->Add(items[tid]); } if(op[tid]==DELETE_MIN){ result[tid]=l->DeleteMin(); } } } // Generate the level of a newly created node LL Randomlevel() { LL v=1; double p=0.5; while(((rand()/(double)(RAND_MAX))<p) && (v<MAX_LEVEL)) v++; return v; } int main(int argc,char** argv) { if (argc != 2) { printf("Need one argument: percent add ops (e.g., 30 for 30%% add ops).\nAborting...\n"); exit(1); } if (atoi(argv[1]) > 100) { printf("Input more than 100%%.\nAborting...\n"); exit(1); } int adds=(NUM_ITEMS*atoi(argv[1]))/100; LL op[NUM_ITEMS]; // Sequence of operations LL items[NUM_ITEMS]; // Sequence of keys (relevant only if op is add) LL result[NUM_ITEMS]; // Sequence of outcomes LL levels[NUM_ITEMS]; // Pre-generated levels of newly created nodes (relevant only if op is add) // Populate sequence of operations and priorities int i; srand(0); for(i=0;i<adds;i++){ op[i]=ADD; items[i]=10+rand()%KEYS; } for(;i<NUM_ITEMS;i++){ op[i]=DELETE_MIN; } // Pre-generate levels of newly created nodes (relevant only if op[i] is add) srand(0); for(i=0;i<NUM_ITEMS;i++){ levels[i]=Randomlevel()-1; } // Allocate device memory LL* Citems; LL* Cop; LL* Cresult; LL* Clevels; #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS)); #else hipMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS)); #else hipMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS)); #else hipMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&Clevels, sizeof(LL)*NUM_ITEMS)); #else hipMalloc((void**)&Clevels, sizeof(LL)*NUM_ITEMS); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(Clevels, levels, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice)); #else hipMemcpy(Clevels, levels, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(Citems, items, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice)); #else hipMemcpy(Citems, items, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice)); #else hipMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice); #endif Node* pointers[adds]; Node** Cpointers; // Allocate the pool of free nodes for(i=0;i<adds;i++){ #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&pointers[i], sizeof(Node))); #else hipMalloc((void**)&pointers[i], sizeof(Node)); #endif } #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&Cpointers, sizeof(Node*)*adds)); #else hipMalloc((void**)&Cpointers, sizeof(Node*)*adds); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(Cpointers, pointers, sizeof(Node*)*adds, hipMemcpyHostToDevice)); #else hipMemcpy(Cpointers, pointers, sizeof(Node*)*adds, hipMemcpyHostToDevice); #endif // Allocate the skip list PrioritySkipList* Clist; PrioritySkipList* list=new PrioritySkipList(); #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&Clist, sizeof(PrioritySkipList))); #else hipMalloc((void**)&Clist, sizeof(PrioritySkipList)); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(Clist, list, sizeof(PrioritySkipList), hipMemcpyHostToDevice)); #else hipMemcpy(Clist, list, sizeof(PrioritySkipList), hipMemcpyHostToDevice); #endif // Allocate the priority queue SkipQueue* Cq; SkipQueue* q=new SkipQueue(); q->skipList=Clist; #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMalloc((void**)&Cq, sizeof(PrioritySkipList))); #else hipMalloc((void**)&Cq, sizeof(PrioritySkipList)); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(Cq, q, sizeof(PrioritySkipList), hipMemcpyHostToDevice)); #else hipMemcpy(Cq, q, sizeof(PrioritySkipList), hipMemcpyHostToDevice); #endif // Calculate the number of thread blocks // NUM_ITEMS = total number of operations to execute // NUM_THREADS = number of threads per block // FACTOR = number of operations per thread int blocks=(NUM_ITEMS%(NUM_THREADS*FACTOR)==0)?NUM_ITEMS/(NUM_THREADS*FACTOR):(NUM_ITEMS/(NUM_THREADS*FACTOR))+1; // Error checking code hipError_t error= hipGetLastError(); if(hipSuccess!=error){ printf("error0:CUDA ERROR (%d) {%s}\n",error,hipGetErrorString(error)); exit(-1); } // Initialize the device memory hipLaunchKernelGGL(( init), dim3(1),dim3(32), 0, 0, Cq,Cpointers,Clevels); hipDeviceSynchronize(); // Launch main kernel hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipLaunchKernelGGL(( kernel), dim3(blocks),dim3(NUM_THREADS), 0, 0, Citems, Cop, Cresult); hipEventRecord(stop,0); hipEventSynchronize(stop); float time; hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); // Print kernel execution time in milliseconds printf("%lf\n",time); // Check for errors error= hipGetLastError(); if(hipSuccess!=error){ printf("error1:CUDA ERROR (%d) {%s}\n",error,hipGetErrorString(error)); exit(-1); } // Move results back to host memory #ifdef _CUTIL_H_ CUDA_SAFE_CALL(hipMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, hipMemcpyDeviceToHost)); #else hipMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, hipMemcpyDeviceToHost); #endif return 0; }
b5946db98240373f54350663499e7b8ea7a20695.cu
/* Copyright 2012-2013 Indian Institute of Technology Kanpur. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY INDIAN INSTITUTE OF TECHNOLOGY KANPUR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INDIAN INSTITUTE OF TECHNOLOGY KANPUR OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Indian Institute of Technology Kanpur. */ /********************************************************************************** Lock-free priority queue for CUDA; tested for CUDA 4.2 on 32-bit Ubuntu 10.10 and 64-bit Ubuntu 12.04. Developed at IIT Kanpur. Inputs: Percentage of add operations (e.g., 30 for 30% add) Output: Prints the total time (in milliseconds) to execute the the sequence of operations Compilation flags: -O3 -arch sm_20 -I ~/NVIDIA_GPU_Computing_SDK/C/common/inc/ -DNUM_ITEMS=num_ops -DFACTOR=num_ops_per_thread -DKEYS=num_keys NUM_ITEMS is the total number of operations (mix of add and deleteMin) to execute. FACTOR is the number of operations per thread. KEYS is the number of integer keys assumed in the range [10, 9+KEYS]. The paper cited below states that the key range is [0, KEYS-1]. However, we have shifted the range by +10 so that the head sentinel key (the minimum key) can be chosen as zero. Any positive shift other than +10 would also work. The include path ~/NVIDIA_GPU_Computing_SDK/C/common/inc/ is needed for cutil.h. Related work: Prabhakar Misra and Mainak Chaudhuri. Performance Evaluation of Concurrent Lock-free Data Structures on GPUs. In Proceedings of the 18th IEEE International Conference on Parallel and Distributed Systems, December 2012. ***************************************************************************************/ #include"cutil.h" // Comment this if cutil.h is not available #include"cuda_runtime.h" #include"stdio.h" #include"stdlib.h" #include"time.h" #include"assert.h" #if __WORDSIZE == 64 typedef unsigned long long LL; #else typedef unsigned int LL; #endif // Maximum level of a node in the skip list #define MAX_LEVEL 32 // Number of threads per block #define NUM_THREADS 512 // Supported operations #define ADD (0) #define DELETE_MIN (1) class Node; // Definition of generic node class class __attribute__((aligned (16))) Node { public: int topLevel; // Level of the node LL priority; // Priority value of the node (this is the key value) LL marked; // Special mark field used by DeleteMin LL next[MAX_LEVEL+1]; // Array of next links // Create a next field from a reference and mark bit __device__ __host__ LL CreateRef( Node* ref, bool mark) { LL val=(LL)ref; val=val|mark; return val; } __device__ __host__ void SetRef(int index, Node* ref, bool mark) { next[index]=CreateRef(ref, mark); } // Extract the reference from a next field __device__ Node* GetReference(int index) { LL ref=next[index]; return (Node*)((ref>>1)<<1); } // Extract the reference and mark bit from a next field __device__ Node* Get(int index, bool* marked) { marked[0]=next[index]%2; return (Node*)((next[index]>>1)<<1); } // CompareAndSet wrapper __device__ bool CompareAndSet(int index, Node* expectedRef, Node* newRef, bool oldMark, bool newMark) { LL oldVal = (LL)expectedRef|oldMark; LL newVal = (LL)newRef|newMark; LL oldValOut=atomicCAS(&(next[index]), oldVal, newVal); if (oldValOut==oldVal) return true; return false; } // Constructor for sentinel nodes Node(LL k) { priority=k; topLevel=MAX_LEVEL; marked =0; int i; for(i=0;i<MAX_LEVEL+1;i++){ next[i]=CreateRef((Node*)NULL, false); } } }; // Definition of a lock-free skip list class PrioritySkipList { public: Node* head; // Head sentinel Node* tail; // Tail sentinel PrioritySkipList() { Node* h=new Node(0); #if __WORDSIZE == 64 Node* t=new Node((LL)0xffffffffffffffff); #else Node* t=new Node((LL)0xffffffff); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&head, sizeof(Node))); #else cudaMalloc((void**)&head, sizeof(Node)); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&tail, sizeof(Node))); #else cudaMalloc((void**)&tail, sizeof(Node)); #endif int i; for(i=0;i<h->topLevel+1;i++){ h->SetRef(i, tail, false); } #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(head, h, sizeof(Node), cudaMemcpyHostToDevice)); #else cudaMemcpy(head, h, sizeof(Node), cudaMemcpyHostToDevice); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(tail, t, sizeof(Node), cudaMemcpyHostToDevice)); #else cudaMemcpy(tail, t, sizeof(Node), cudaMemcpyHostToDevice); #endif } // Method used by DeleteMin __device__ Node* FindAndMarkMin() { Node* curr =NULL; curr=head->GetReference(0); while(curr!=tail){ if(!curr->marked){ if(0==atomicCAS((LL*)&(curr->marked), 0, 1)) return curr; } else curr=curr->GetReference(0); } return NULL; } __device__ bool find(LL, Node**, Node**); // Helping method __device__ bool Add(Node*); __device__ bool Delete(LL); }; __device__ Node** nodes; // Pool of pre-allocated nodes __device__ unsigned int pointerIndex=0; // Index into pool of free nodes __device__ LL* randoms; // Array storing the levels of the nodes in the free pool // Function for creating a new node when requested by an add operation __device__ Node* GetNewNode(LL priority) { LL ind=atomicInc(&pointerIndex, NUM_ITEMS); Node* n=nodes[ind]; n->marked=0; n->priority=priority; n->topLevel=randoms[ind]; int i; for(i=0;i<n->topLevel+1;i++){ n->SetRef(i, NULL, false); } return n; } // Definition of a priority queue based on a lock-free skip list class SkipQueue { public: PrioritySkipList* skipList; // The priority queue SkipQueue(){ skipList=NULL; } // The method for adding new nodes __device__ bool Add(LL item) { Node* newNode=GetNewNode(item); return skipList->Add(newNode); } // The method for deleting the minimum __device__ LL DeleteMin() { Node* n=skipList->FindAndMarkMin(); if(n!=NULL){ skipList->Delete(n->priority); return n->priority; } return 0; } }; __device__ SkipQueue* l; // The lock-free priority queue // Kernel for initializing device memory __global__ void init(SkipQueue* l1, Node** n, LL* levels) { randoms=levels; nodes=n; l=l1; } // Find the window of node with key=priority // On the way clean up logically deleted nodes (those with set marked bit) __device__ bool PrioritySkipList::find(LL priority, Node** preds, Node** succs) { // preds and succs are arrays of pointers int bottomLevel=0; bool marked[]={false}; bool snip; Node* pred=NULL; Node* curr=NULL; Node* succ=NULL; bool beenThereDoneThat; while(true){ beenThereDoneThat = false; pred=head; int level; for(level=MAX_LEVEL;level>=bottomLevel;level--){ curr=pred->GetReference(level); while(true){ succ=curr->Get(level, marked); while(marked[0]){ snip=pred->CompareAndSet(level, curr, succ, false, false); beenThereDoneThat = true; if(!snip) break; curr=pred->GetReference(level); succ=curr->Get(level, marked); beenThereDoneThat = false; } if (beenThereDoneThat && !snip) break; if(curr->priority<priority){ pred=curr; curr=succ; } else{ break; } } if (beenThereDoneThat && !snip) break; preds[level]=pred; succs[level]=curr; } if (beenThereDoneThat && !snip) continue; return((curr->priority==priority)); } } // Called by DeleteMin __device__ bool PrioritySkipList::Delete(LL priority) { int bottomLevel=0; Node* preds[MAX_LEVEL+1]; Node* succs[MAX_LEVEL+1]; Node* succ; bool marked[]={false}; while(true){ bool found=find(priority, preds, succs); if(!found){ return false; } else{ Node* nodeToDelete=succs[bottomLevel]; int level; for(level=nodeToDelete->topLevel;level>=bottomLevel+1;level--){ succ=nodeToDelete->Get(level, marked); while(marked[0]==false){ nodeToDelete->CompareAndSet(level, succ, succ, false, true); succ=nodeToDelete->Get(level, marked); } } succ=nodeToDelete->Get(bottomLevel, marked); while(true){ bool iMarkedIt=nodeToDelete->CompareAndSet(bottomLevel, succ, succ, false, true); succ=succs[bottomLevel]->Get(bottomLevel, marked); if(iMarkedIt==true){ find(priority, preds, succs); return true; } else if(marked[0]==true){ return false; } } } } } __device__ bool PrioritySkipList::Add(Node* newNode) { LL priority=newNode->priority; int topLevel=newNode->topLevel; int bottomLevel=0; Node* preds[MAX_LEVEL+1]; Node* succs[MAX_LEVEL+1]; while(true){ bool found=find(priority, preds, succs); if(found){ return false; } else{ int level; for(level=bottomLevel;level<=topLevel;level++){ Node* succ=succs[level]; newNode->SetRef(level, succ, false); } Node* pred=preds[bottomLevel]; Node* succ=succs[bottomLevel]; bool t; t=pred->CompareAndSet(bottomLevel, succ, newNode, false, false); if(!t){ continue; } for(level=bottomLevel+1;level<=topLevel;level++){ while(true){ pred=preds[level]; succ=succs[level]; if(pred->CompareAndSet(level, succ, newNode, false, false)){ break; } find(priority, preds, succs); } } return true; } } } // The main kernel __global__ void kernel(LL* items, LL* op, LL* result) { // The array items holds the sequence of keys // The array op holds the sequence of operations // The array result, at the end, will hold the outcome of the operations int tid,i; for(i=0;i<FACTOR;i++){ // FACTOR is the number of operations per thread tid=i*gridDim.x*blockDim.x+blockIdx.x*blockDim.x+threadIdx.x; if(tid>NUM_ITEMS) return; // Grab the operation and the associated key and execute if(op[tid]==ADD){ result[tid]=l->Add(items[tid]); } if(op[tid]==DELETE_MIN){ result[tid]=l->DeleteMin(); } } } // Generate the level of a newly created node LL Randomlevel() { LL v=1; double p=0.5; while(((rand()/(double)(RAND_MAX))<p) && (v<MAX_LEVEL)) v++; return v; } int main(int argc,char** argv) { if (argc != 2) { printf("Need one argument: percent add ops (e.g., 30 for 30%% add ops).\nAborting...\n"); exit(1); } if (atoi(argv[1]) > 100) { printf("Input more than 100%%.\nAborting...\n"); exit(1); } int adds=(NUM_ITEMS*atoi(argv[1]))/100; LL op[NUM_ITEMS]; // Sequence of operations LL items[NUM_ITEMS]; // Sequence of keys (relevant only if op is add) LL result[NUM_ITEMS]; // Sequence of outcomes LL levels[NUM_ITEMS]; // Pre-generated levels of newly created nodes (relevant only if op is add) // Populate sequence of operations and priorities int i; srand(0); for(i=0;i<adds;i++){ op[i]=ADD; items[i]=10+rand()%KEYS; } for(;i<NUM_ITEMS;i++){ op[i]=DELETE_MIN; } // Pre-generate levels of newly created nodes (relevant only if op[i] is add) srand(0); for(i=0;i<NUM_ITEMS;i++){ levels[i]=Randomlevel()-1; } // Allocate device memory LL* Citems; LL* Cop; LL* Cresult; LL* Clevels; #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS)); #else cudaMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS)); #else cudaMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS)); #else cudaMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&Clevels, sizeof(LL)*NUM_ITEMS)); #else cudaMalloc((void**)&Clevels, sizeof(LL)*NUM_ITEMS); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(Clevels, levels, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice)); #else cudaMemcpy(Clevels, levels, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(Citems, items, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice)); #else cudaMemcpy(Citems, items, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice)); #else cudaMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice); #endif Node* pointers[adds]; Node** Cpointers; // Allocate the pool of free nodes for(i=0;i<adds;i++){ #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&pointers[i], sizeof(Node))); #else cudaMalloc((void**)&pointers[i], sizeof(Node)); #endif } #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&Cpointers, sizeof(Node*)*adds)); #else cudaMalloc((void**)&Cpointers, sizeof(Node*)*adds); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(Cpointers, pointers, sizeof(Node*)*adds, cudaMemcpyHostToDevice)); #else cudaMemcpy(Cpointers, pointers, sizeof(Node*)*adds, cudaMemcpyHostToDevice); #endif // Allocate the skip list PrioritySkipList* Clist; PrioritySkipList* list=new PrioritySkipList(); #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&Clist, sizeof(PrioritySkipList))); #else cudaMalloc((void**)&Clist, sizeof(PrioritySkipList)); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(Clist, list, sizeof(PrioritySkipList), cudaMemcpyHostToDevice)); #else cudaMemcpy(Clist, list, sizeof(PrioritySkipList), cudaMemcpyHostToDevice); #endif // Allocate the priority queue SkipQueue* Cq; SkipQueue* q=new SkipQueue(); q->skipList=Clist; #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMalloc((void**)&Cq, sizeof(PrioritySkipList))); #else cudaMalloc((void**)&Cq, sizeof(PrioritySkipList)); #endif #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(Cq, q, sizeof(PrioritySkipList), cudaMemcpyHostToDevice)); #else cudaMemcpy(Cq, q, sizeof(PrioritySkipList), cudaMemcpyHostToDevice); #endif // Calculate the number of thread blocks // NUM_ITEMS = total number of operations to execute // NUM_THREADS = number of threads per block // FACTOR = number of operations per thread int blocks=(NUM_ITEMS%(NUM_THREADS*FACTOR)==0)?NUM_ITEMS/(NUM_THREADS*FACTOR):(NUM_ITEMS/(NUM_THREADS*FACTOR))+1; // Error checking code cudaError_t error= cudaGetLastError(); if(cudaSuccess!=error){ printf("error0:CUDA ERROR (%d) {%s}\n",error,cudaGetErrorString(error)); exit(-1); } // Initialize the device memory init<<<1,32>>>(Cq,Cpointers,Clevels); cudaThreadSynchronize(); // Launch main kernel cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); kernel<<<blocks,NUM_THREADS>>>(Citems, Cop, Cresult); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float time; cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); // Print kernel execution time in milliseconds printf("%lf\n",time); // Check for errors error= cudaGetLastError(); if(cudaSuccess!=error){ printf("error1:CUDA ERROR (%d) {%s}\n",error,cudaGetErrorString(error)); exit(-1); } // Move results back to host memory #ifdef _CUTIL_H_ CUDA_SAFE_CALL(cudaMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, cudaMemcpyDeviceToHost)); #else cudaMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, cudaMemcpyDeviceToHost); #endif return 0; }
235ab0d0bb540ee659743b33cfa0bb9ace6a12e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // kernels.cu // Burgers3d-GPU // // Created by Manuel Diaz on 7/26/16. // Copyright 2016 Manuel Diaz. All rights reserved. // extern "C" { #include "Burgers.h" } /*******************************/ /* Define Textures & Constanst */ /*******************************/ __constant__ REAL d_kx; __constant__ REAL d_ky; __constant__ REAL d_kz; texture<int2, 2, hipReadModeElementType> tex_u; #define checkCuda(error) __checkCuda(error, __FILE__, __LINE__) /*********************************************/ /* A method for checking error in CUDA calls */ /*********************************************/ inline void __checkCuda(hipError_t error, const char *file, const int line) { #if defined(DISPL) if (error != hipSuccess) { printf("checkCuda error at %s:%i: %s\n", file, line, hipGetErrorString(hipGetLastError())); exit(-1); } #endif return; } /*****************/ /* FLUX FUNCTION */ /*****************/ __device__ REAL Flux( const REAL u){ return C1o2*u*u; } /***********************/ /* WENO RECONSTRUCTION */ /***********************/ // ************************************************************************* // Input: v(i) = [v(i-2) v(i-1) v(i) v(i+1) v(i+2) v(i+3)]; // Output: res = df/dx; // // Based on: // C.W. Shu's Lectures notes on: 'ENO and WENO schemes for Hyperbolic // Conservation Laws' // // coded by Manuel Diaz, 02.10.2012, NTU Taiwan. // ************************************************************************* // // Domain cells (I{i}) reference: // // | | u(i) | | // | u(i-1) |___________| | // |___________| | u(i+1) | // | | |___________| // ...|-----0-----|-----0-----|-----0-----|... // | i-1 | i | i+1 | // |- +|- +|- +| // i-3/2 i-1/2 i+1/2 i+3/2 // // ENO stencils (S{r}) reference: // // |___________S2__________| // | | // |___________S1__________| | // | | | using only f^{+} // |___________S0__________| | | // ..|---o---|---o---|---o---|---o---|---o---|... // | I{i-2}| I{i-1}| I{i} | I{i+1}| I{i+2}| // -| // i+1/2 // // |___________S0__________| // | | // | |___________S1__________| using only f^{-} // | | | // | | |___________S2__________| // ..|---o---|---o---|---o---|---o---|---o---|... // | I{i-1}| I{i} | I{i+1}| I{i+2}| I{i+3}| // |+ // i+1/2 // // WENO stencil: S{i} = [ I{i-2},...,I{i+3} ] // ************************************************************************* __device__ REAL WENO5reconstruction(const REAL * __restrict__ u) { REAL B0, B1, B2, a0, a1, a2, alphasum, dflux; REAL umm,um,uo,up,upp; // Split data for f_{i}^{+} umm=C1o2*(Flux(u[0]) + fabs(u[0])*u[0]); um =C1o2*(Flux(u[1]) + fabs(u[1])*u[1]); uo =C1o2*(Flux(u[2]) + fabs(u[2])*u[2]); up =C1o2*(Flux(u[3]) + fabs(u[3])*u[3]); upp=C1o2*(Flux(u[4]) + fabs(u[4])*u[4]); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights a0 = D0N/((EPS + B0)*(EPS + B0)); a1 = D1N/((EPS + B1)*(EPS + B1)); a2 = D2N/((EPS + B2)*(EPS + B2)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $v_{i+1/2}^{-}$; dflux =(a0*(2*umm- 7*um + 11*uo) + a1*( -um + 5*uo + 2*up) + a2*( 2*uo+ 5*up - upp ))*(C1o6*alphasum); // split data for f_{i}^{-} umm=C1o2*(Flux(u[1]) - fabs(u[1])*u[1]); um =C1o2*(Flux(u[2]) - fabs(u[2])*u[2]); uo =C1o2*(Flux(u[3]) - fabs(u[3])*u[3]); up =C1o2*(Flux(u[4]) - fabs(u[4])*u[4]); upp=C1o2*(Flux(u[5]) - fabs(u[5])*u[5]); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights a0 = D0P/((EPS + B0)*(EPS + B0)); a1 = D1P/((EPS + B1)*(EPS + B1)); a2 = D2P/((EPS + B2)*(EPS + B2)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $v_{i+1/2}^{+}$; dflux+=(a0*( -umm + 5*um + 2*uo ) + a1*( 2*um + 5*uo - up ) + a2*(11*uo - 7*up + 2*upp))*(C1o6*alphasum); // Compute the numerical flux v_{i+1/2} return dflux; } __device__ REAL WENO5Zreconstruction(const REAL * __restrict__ u) { REAL B0, B1, B2, a0, a1, a2, tau5, alphasum, dflux; REAL umm,um,uo,up,upp; // Split data for f_{i}^{+} umm=C1o2*(Flux(u[0]) + fabs(u[0])*u[0]); um =C1o2*(Flux(u[1]) + fabs(u[1])*u[1]); uo =C1o2*(Flux(u[2]) + fabs(u[2])*u[2]); up =C1o2*(Flux(u[3]) + fabs(u[3])*u[3]); upp=C1o2*(Flux(u[4]) + fabs(u[4])*u[4]); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0N*(1.+tau5/(B0+EPS)); a1 = D1N*(1.+tau5/(B1+EPS)); a2 = D2N*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $v_{i+1/2}^{-}$; dflux =(a0*(2*umm- 7*um + 11*uo) + a1*( -um + 5*uo + 2*up) + a2*( 2*uo+ 5*up - upp ))*(C1o6*alphasum); // split data for f_{i}^{-} umm=C1o2*(Flux(u[1]) - fabs(u[1])*u[1]); um =C1o2*(Flux(u[2]) - fabs(u[2])*u[2]); uo =C1o2*(Flux(u[3]) - fabs(u[3])*u[3]); up =C1o2*(Flux(u[4]) - fabs(u[4])*u[4]); upp=C1o2*(Flux(u[5]) - fabs(u[5])*u[5]); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0P*(1.+tau5/(B0+EPS)); a1 = D1P*(1.+tau5/(B1+EPS)); a2 = D2P*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $v_{i+1/2}^{+}$; dflux+=(a0*( -umm + 5*um + 2*uo ) + a1*( 2*um + 5*uo - up ) + a2*(11*uo - 7*up + 2*upp))*(C1o6*alphasum); // Compute the numerical flux v_{i+1/2} return dflux; } /*****************/ /* Compute du/dx */ // <==== parallel strategy: compute serialy by rows or by columns! /*****************/ __global__ void Compute_dF( REAL * __restrict__ Lu, const unsigned int pitch, const unsigned int nx, const unsigned int ny, const REAL dx, const int k) { // Shared variables __shared__ REAL sfu[WIDTH][TILE+1]; // faces = inner nodes + 1 // Temporary variables REAL u0,u1,u2,u3,u4,u5; REAL B0,B1,B2,a0,a1,a2; REAL umm,um,uo,up,upp,tau5,alphasum; // Indexes unsigned int i,j,si,sj,o; int2 Data; // Global threads indexes i = TILE * blockIdx.x + threadIdx.x; j = blockDim.y * blockIdx.y + threadIdx.y; // shared memory indexes si = threadIdx.x; sj = threadIdx.y; // Global index o = i+pitch*j+pitch*ny*k; if (i < nx){ // Load data from texture memory Data = tex2D(tex_u,i-3,j); u0 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i-2,j); u1 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i-1,j); u2 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u, i ,j); u3 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i+1,j); u4 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i+2,j); u5 = __hiloint2double(Data.y, Data.x); // Reconstruct Face fluxes // Split data for f_{i}^{+} umm=C1o2*(Flux(u0) + fabs(u0)*u0); um =C1o2*(Flux(u1) + fabs(u1)*u1); uo =C1o2*(Flux(u2) + fabs(u2)*u2); up =C1o2*(Flux(u3) + fabs(u3)*u3); upp=C1o2*(Flux(u4) + fabs(u4)*u4); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0N*(1.+tau5/(B0+EPS)); a1 = D1N*(1.+tau5/(B1+EPS)); a2 = D2N*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $u_{i+1/2}^{-}$; sfu[sj][si] = ( a0*(2*umm- 7*um + 11*uo) + a1*( -um + 5*uo + 2*up ) + a2*( 2*uo+ 5*up - upp ))*(C1o6*alphasum); // split data for f_{i}^{-} umm=C1o2*(Flux(u1) - fabs(u1)*u1); um =C1o2*(Flux(u2) - fabs(u2)*u2); uo =C1o2*(Flux(u3) - fabs(u3)*u3); up =C1o2*(Flux(u4) - fabs(u4)*u4); upp=C1o2*(Flux(u5) - fabs(u5)*u5); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0P*(1.+tau5/(B0+EPS)); a1 = D1P*(1.+tau5/(B1+EPS)); a2 = D2P*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $u_{i+1/2}^{+}$; sfu[sj][si] += ( a0*( -umm + 5*um + 2*uo ) + a1*( 2*um + 5*uo - up ) + a2*(11*uo - 7*up + 2*upp))*(C1o6*alphasum); __syncthreads(); // Compute Lq = (f_{i+1/2}-f_{i-1/2})/dx if ( si<TILE ){ Lu[o] = -(sfu[sj][si+1] - sfu[sj][si])/dx; } } } /*****************/ /* Compute du/dy */ // <==== parallel strategy: compute serialy by rows or by columns! /*****************/ __global__ void Compute_dG( REAL * __restrict__ Lu, const unsigned int pitch, const unsigned int nx, const unsigned int ny, const REAL dy, const int k) { // Shared variables __shared__ REAL sfu[TILE+1][WIDTH]; // faces = inner nodes + 1 // Temporary variables REAL u0,u1,u2,u3,u4,u5; REAL B0,B1,B2,a0,a1,a2; REAL umm,um,uo,up,upp,tau5,alphasum; // Indexes unsigned int i,j,si,sj,o; int2 Data; // local threads indexes i = blockDim.x * blockIdx.x + threadIdx.x; j = TILE * blockIdx.y + threadIdx.y; // shared memory indexes si = threadIdx.x; sj = threadIdx.y; // Global index o = i+pitch*j+pitch*ny*k; if (j < ny){ // Load data from texture memory Data = tex2D(tex_u,i,j-3); u0 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i,j-2); u1 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i,j-1); u2 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i, j ); u3 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i,j+1); u4 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i,j+2); u5 = __hiloint2double(Data.y, Data.x); // Reconstruct Face fluxes // Split data for f_{i}^{+} umm=C1o2*(Flux(u0) + fabs(u0)*u0); um =C1o2*(Flux(u1) + fabs(u1)*u1); uo =C1o2*(Flux(u2) + fabs(u2)*u2); up =C1o2*(Flux(u3) + fabs(u3)*u3); upp=C1o2*(Flux(u4) + fabs(u4)*u4); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0N*(1.+tau5/(B0+EPS)); a1 = D1N*(1.+tau5/(B1+EPS)); a2 = D2N*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $u_{i+1/2}^{-}$; sfu[sj][si] = ( a0*(2*umm- 7*um + 11*uo) + a1*( -um + 5*uo + 2*up ) + a2*( 2*uo+ 5*up - upp ))*(C1o6*alphasum); // split data for f_{i}^{-} umm=C1o2*(Flux(u1) - fabs(u1)*u1); um =C1o2*(Flux(u2) - fabs(u2)*u2); uo =C1o2*(Flux(u3) - fabs(u3)*u3); up =C1o2*(Flux(u4) - fabs(u4)*u4); upp=C1o2*(Flux(u5) - fabs(u5)*u5); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0P*(1.+tau5/(B0+EPS)); a1 = D1P*(1.+tau5/(B1+EPS)); a2 = D2P*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $u_{i+1/2}^{+}$; sfu[sj][si] += ( a0*( -umm + 5*um + 2*uo ) + a1*( 2*um + 5*uo - up ) + a2*(11*uo - 7*up + 2*upp))*(C1o6*alphasum); __syncthreads(); // Compute Lq = (f_{i+1/2}-f_{i-1/2})/dx if ( sj<TILE ){ Lu[o] -= (sfu[sj+1][si] - sfu[sj][si])/dy; } } } /*****************/ /* Compute du/dz */ // <==== parallel strategy: compute serialy by rows or by columns! /*****************/ __global__ void Compute_dH( const REAL * __restrict__ w, REAL * __restrict__ Lw, const unsigned int pitch, const unsigned int nx, const unsigned int ny, const unsigned int nz, const REAL dz) { // Shared variables __shared__ REAL s_w[WIDTH][TILE+6]; // 3-wide halo __shared__ REAL sfw[WIDTH][TILE+1]; // faces = inner nodes + 1 // Indexes unsigned int i,k,I,J,K,si,sk,o; // Global threads indexes I = blockDim.x * blockIdx.x + threadIdx.x; K = TILE * blockIdx.y + threadIdx.y; J = blockIdx.z; // Local thead indexes i = threadIdx.x; k = threadIdx.y; // Local share memory indexes si = threadIdx.x; // local i for shared memory access sk = threadIdx.y+3; // local j for shared memory access + halo offset // Global index o = I+pitch*J+pitch*ny*K; if (K < nz){ // Load data into shared memory s_w[si][sk]=w[o]; // Load boundary values if ( (k<3) && (K<3) ){ s_w[si][sk-3]=0.; // set Dirichlet BCs } else if (k < 3){ s_w[si][sk-3]=w[o-3*pitch*ny]; // get data from neighbour } // Load boundary values if ( (k>TILE-2) && (K>nz-2) ){ s_w[si][sk+2]=0.; // set Dirichlet BCs } else if (k > TILE-2){ s_w[si][sk+2]=w[o+2*pitch*ny]; // get data from neighbour } __syncthreads(); // Compute face fluxes sfw[i][k]=WENO5Zreconstruction(&s_w[si][sk-3]); // fp_{i+1/2} __syncthreads(); // Compute Lq = (f_{i+1/2}-f_{i-1/2})/dz if ( k<TILE ){ Lw[o] -= (sfw[i][k+1] - sfw[i][k])/dz; } } } /********************/ /* Laplace Operator */ /********************/ __global__ void Compute_Laplace( const REAL * __restrict__ u, REAL * __restrict__ Lu, const unsigned int px, // pitch in the x-direction const unsigned int nx, const unsigned int ny, const unsigned int nz) { REAL above2; REAL above; REAL center; REAL below; REAL below2; unsigned int i, j, k, o, xy, px2, xy2; xy = px*ny; px2 = 2*px; xy2 = 2*xy; i = threadIdx.x + blockIdx.x * blockDim.x; j = threadIdx.y + blockIdx.y * blockDim.y; // For initial slice k=3; o=i+px*j+xy*k; if (i>2 && i<nx-3 && j>2 && j<ny-3) { below2=u[o-xy2]; below=u[o-xy]; center=u[o]; above=u[o+xy]; above2=u[o+xy2]; Lu[o]+= d_kx * (- u[o-2] +16*u[o-1] - 30*center + 16*u[o+1] - u[o+2] ) + d_ky * (-u[o-px2]+16*u[o-px]- 30*center + 16*u[o+px]- u[o+px2])+ d_kz * (- below2 +16* below - 30*center + 16* above - above2 ); // For the rest of the slide for(k = 4; k < nz-3; k++) { o=o+xy; below2=below; below=center; center=above; above=above2; above2=u[o+xy2]; Lu[o]+= d_kx * (- u[o-2] +16*u[o-1] - 30*center + 16*u[o+1] - u[o+2] ) + d_ky * (-u[o-px2]+16*u[o-px]- 30*center + 16*u[o+px]- u[o+px2])+ d_kz * (- below2 +16* below - 30*center + 16* above - above2 ); } } // else : do nothing! } /**************************/ /* Async Laplace Operator */ /**************************/ __global__ void Compute_Laplace_Async( const REAL * __restrict__ u, REAL * __restrict__ Lu, unsigned int px, // pitch in the x-direction unsigned int Nx, unsigned int Ny, unsigned int _Nz, unsigned int kstart, unsigned int kstop, unsigned int loop_z) { register REAL above2; register REAL above; register REAL center; register REAL below; register REAL below2; unsigned int i, j, k, o, z, XY, px2, XY2; i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * loop_z; k = MAX(kstart,k); XY=px*Ny; px2=px+px; XY2=XY+XY; o=i+px*j+XY*k; if (i>2 && i<Nx-3 && j>2 && j<Ny-3) { below2=u[o-XY2]; below=u[o-XY]; center=u[o]; above=u[o+XY]; above2=u[o+XY2]; Lu[o]+= d_kx*(- u[o-2] +16* u[o-1]-30*center+16*u[o+1] - u[o+2] ) + d_ky*(-u[o-px2]+16*u[o-px]-30*center+16*u[o+px]-u[o+px2]) + d_kz*(- below2 +16* below -30*center+16* above - above2 ); for(z = 1; z < loop_z; z++) { k += 1; if (k < MIN(kstop,_Nz+1)) { o=o+XY; below2=below; below=center; center=above; above=above2; above2=u[o+XY2]; Lu[o]+= d_kx*(- u[o-2] +16*u[o-1] -30*center+16*u[o+1] - u[o+2] ) + d_ky*(-u[o-px2]+16*u[o-px]-30*center+16*u[o+px]-u[o+px2]) + d_kz*(- below2 +16* below -30*center+16* above - above2 ); } } } // else : do nothing! } /***********************/ /* Runge Kutta Methods */ // <==== this is perfectly parallel! /***********************/ __global__ void Compute_RK( REAL * __restrict__ u, const REAL * __restrict__ uo, const REAL * __restrict__ Lu, const unsigned int step, const unsigned int pitch, const unsigned int nx, const unsigned int ny, const unsigned int nz, const REAL dt) { // local threads indexes int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; // compute single index unsigned int o=i+pitch*j+pitch*ny*k; // Compute Runge-Kutta step, update only internal cells if (i>2 && i<nx-3 && j>2 && j<ny-3 && k>2 && k<nz-3) { switch (step) { case 1: // step 1 u[o] = uo[o]+dt*(Lu[o]); break; case 2: // step 2 u[o] = 0.75*uo[o]+C1o4*(u[o]+dt*(Lu[o])); break; case 3: // step 3 u[o] = (uo[o]+2*(u[o]+dt*(Lu[o])))/3; break; } } // else : do nothing! } /*********************/ /* Function Wrappers */ /*********************/ extern "C" void CopyToConstantMemory(const REAL kx, const REAL ky, const REAL kz) { checkCuda(hipMemcpyToSymbol(d_kx, &kx, sizeof(REAL), 0, hipMemcpyHostToDevice)); checkCuda(hipMemcpyToSymbol(d_ky, &ky, sizeof(REAL), 0, hipMemcpyHostToDevice)); checkCuda(hipMemcpyToSymbol(d_kz, &kz, sizeof(REAL), 0, hipMemcpyHostToDevice)); } extern "C" void InitializeTextures() { // Texture indexing depends on the address Mode // index -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10 11 // Clamp 0 0 0 0 0 0 0 0 1 2 3 3 3 3 3 3 3 3 3 // Border 0 0 0 0 0 0 0 0 1 2 3 0 0 0 0 0 0 0 0 // Wrap 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 // Mirror 1 2 3 3 2 1 0 0 1 2 3 3 2 1 0 0 1 2 3 // Texture basic Setup tex_u.addressMode[0] = hipAddressModeClamp; tex_u.addressMode[1] = hipAddressModeClamp; tex_u.filterMode = hipFilterModePoint; tex_u.normalized = false; } extern "C" void Call_Adv_L(dim3 numBlocks_x, dim3 threadsPerBlock_x, dim3 numBlocks_y, dim3 threadsPerBlock_y, hipStream_t aStream, size_t pitch_bytes, unsigned int pitch, hipChannelFormatDesc *desc, unsigned int nx, unsigned int ny, unsigned int nz, REAL dx, REAL dy, REAL *u, REAL *Lu) { for(int k=3; k<=nz-3; k++){ // Bind texture and Run checkCuda(hipBindTexture2D(0,&tex_u,&u[k*ny*pitch],desc,nx,ny,pitch_bytes)); hipLaunchKernelGGL(( Compute_dF), dim3(numBlocks_x),dim3(threadsPerBlock_x),0,aStream, Lu,pitch,nx,ny,dx,k); hipLaunchKernelGGL(( Compute_dG), dim3(numBlocks_y),dim3(threadsPerBlock_y),0,aStream, Lu,pitch,nx,ny,dy,k); } } // extern "C" void Call_Adv_x(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream, // size_t pitch_bytes, unsigned int pitch, hipChannelFormatDesc *desc, unsigned int nx, unsigned int ny, unsigned int nz, REAL dx, REAL *u, REAL *Lu) // { // for(int k=3; k<=nz-3; k++){ // // Bind texture and Run // checkCuda(hipBindTexture2D(0,&tex_u,&u[k*ny*pitch],desc,nx,ny,pitch_bytes)); // Compute_dF<<<numBlocks,threadsPerBlock,0,aStream>>>(Lu,pitch,nx,ny,dx,k); // } // } // extern "C" void Call_Adv_y(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream, // size_t pitch_bytes, unsigned int pitch, hipChannelFormatDesc *desc, unsigned int nx, unsigned int ny, unsigned int nz, REAL dy, REAL *u, REAL *Lu) // { // for(int k=3; k<=nz-3; k++){ // // Bind Texture and Run // checkCuda(hipBindTexture2D(0,&tex_u,&u[k*ny*pitch],desc,nx,ny,pitch_bytes)); // Compute_dG<<<numBlocks,threadsPerBlock,0,aStream>>>(Lu,pitch,nx,ny,dy,k); // } // } extern "C" void Call_Adv_z(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream, unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL dz, REAL *u, REAL *Lu) { hipLaunchKernelGGL(( Compute_dH), dim3(numBlocks),dim3(threadsPerBlock),0,aStream, u,Lu,pitch,nx,ny,nz,dz); } extern "C" void Call_Diff_(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream, unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL *u, REAL *Lu) { // Compute_Laplace<<<numBlocks,threadsPerBlock,0,aStream>>>(u,Lu,pitch,nx,ny,nz); hipLaunchKernelGGL(( Compute_Laplace_Async), dim3(numBlocks),dim3(threadsPerBlock),0,aStream, u,Lu,pitch,nx,ny,nz,3,nz-2,LOOP); } extern "C" void Call_sspRK(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream, unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, unsigned int step, REAL dt, REAL *u, REAL *uo, REAL *Lu) { hipLaunchKernelGGL(( Compute_RK), dim3(numBlocks),dim3(threadsPerBlock),0,aStream, u,uo,Lu,step,pitch,nx,ny,nz,dt); }
235ab0d0bb540ee659743b33cfa0bb9ace6a12e8.cu
// // kernels.cu // Burgers3d-GPU // // Created by Manuel Diaz on 7/26/16. // Copyright © 2016 Manuel Diaz. All rights reserved. // extern "C" { #include "Burgers.h" } /*******************************/ /* Define Textures & Constanst */ /*******************************/ __constant__ REAL d_kx; __constant__ REAL d_ky; __constant__ REAL d_kz; texture<int2, 2, cudaReadModeElementType> tex_u; #define checkCuda(error) __checkCuda(error, __FILE__, __LINE__) /*********************************************/ /* A method for checking error in CUDA calls */ /*********************************************/ inline void __checkCuda(cudaError_t error, const char *file, const int line) { #if defined(DISPL) if (error != cudaSuccess) { printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError())); exit(-1); } #endif return; } /*****************/ /* FLUX FUNCTION */ /*****************/ __device__ REAL Flux( const REAL u){ return C1o2*u*u; } /***********************/ /* WENO RECONSTRUCTION */ /***********************/ // ************************************************************************* // Input: v(i) = [v(i-2) v(i-1) v(i) v(i+1) v(i+2) v(i+3)]; // Output: res = df/dx; // // Based on: // C.W. Shu's Lectures notes on: 'ENO and WENO schemes for Hyperbolic // Conservation Laws' // // coded by Manuel Diaz, 02.10.2012, NTU Taiwan. // ************************************************************************* // // Domain cells (I{i}) reference: // // | | u(i) | | // | u(i-1) |___________| | // |___________| | u(i+1) | // | | |___________| // ...|-----0-----|-----0-----|-----0-----|... // | i-1 | i | i+1 | // |- +|- +|- +| // i-3/2 i-1/2 i+1/2 i+3/2 // // ENO stencils (S{r}) reference: // // |___________S2__________| // | | // |___________S1__________| | // | | | using only f^{+} // |___________S0__________| | | // ..|---o---|---o---|---o---|---o---|---o---|... // | I{i-2}| I{i-1}| I{i} | I{i+1}| I{i+2}| // -| // i+1/2 // // |___________S0__________| // | | // | |___________S1__________| using only f^{-} // | | | // | | |___________S2__________| // ..|---o---|---o---|---o---|---o---|---o---|... // | I{i-1}| I{i} | I{i+1}| I{i+2}| I{i+3}| // |+ // i+1/2 // // WENO stencil: S{i} = [ I{i-2},...,I{i+3} ] // ************************************************************************* __device__ REAL WENO5reconstruction(const REAL * __restrict__ u) { REAL B0, B1, B2, a0, a1, a2, alphasum, dflux; REAL umm,um,uo,up,upp; // Split data for f_{i}^{+} umm=C1o2*(Flux(u[0]) + fabs(u[0])*u[0]); um =C1o2*(Flux(u[1]) + fabs(u[1])*u[1]); uo =C1o2*(Flux(u[2]) + fabs(u[2])*u[2]); up =C1o2*(Flux(u[3]) + fabs(u[3])*u[3]); upp=C1o2*(Flux(u[4]) + fabs(u[4])*u[4]); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights a0 = D0N/((EPS + B0)*(EPS + B0)); a1 = D1N/((EPS + B1)*(EPS + B1)); a2 = D2N/((EPS + B2)*(EPS + B2)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $v_{i+1/2}^{-}$; dflux =(a0*(2*umm- 7*um + 11*uo) + a1*( -um + 5*uo + 2*up) + a2*( 2*uo+ 5*up - upp ))*(C1o6*alphasum); // split data for f_{i}^{-} umm=C1o2*(Flux(u[1]) - fabs(u[1])*u[1]); um =C1o2*(Flux(u[2]) - fabs(u[2])*u[2]); uo =C1o2*(Flux(u[3]) - fabs(u[3])*u[3]); up =C1o2*(Flux(u[4]) - fabs(u[4])*u[4]); upp=C1o2*(Flux(u[5]) - fabs(u[5])*u[5]); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights a0 = D0P/((EPS + B0)*(EPS + B0)); a1 = D1P/((EPS + B1)*(EPS + B1)); a2 = D2P/((EPS + B2)*(EPS + B2)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $v_{i+1/2}^{+}$; dflux+=(a0*( -umm + 5*um + 2*uo ) + a1*( 2*um + 5*uo - up ) + a2*(11*uo - 7*up + 2*upp))*(C1o6*alphasum); // Compute the numerical flux v_{i+1/2} return dflux; } __device__ REAL WENO5Zreconstruction(const REAL * __restrict__ u) { REAL B0, B1, B2, a0, a1, a2, tau5, alphasum, dflux; REAL umm,um,uo,up,upp; // Split data for f_{i}^{+} umm=C1o2*(Flux(u[0]) + fabs(u[0])*u[0]); um =C1o2*(Flux(u[1]) + fabs(u[1])*u[1]); uo =C1o2*(Flux(u[2]) + fabs(u[2])*u[2]); up =C1o2*(Flux(u[3]) + fabs(u[3])*u[3]); upp=C1o2*(Flux(u[4]) + fabs(u[4])*u[4]); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0N*(1.+tau5/(B0+EPS)); a1 = D1N*(1.+tau5/(B1+EPS)); a2 = D2N*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $v_{i+1/2}^{-}$; dflux =(a0*(2*umm- 7*um + 11*uo) + a1*( -um + 5*uo + 2*up) + a2*( 2*uo+ 5*up - upp ))*(C1o6*alphasum); // split data for f_{i}^{-} umm=C1o2*(Flux(u[1]) - fabs(u[1])*u[1]); um =C1o2*(Flux(u[2]) - fabs(u[2])*u[2]); uo =C1o2*(Flux(u[3]) - fabs(u[3])*u[3]); up =C1o2*(Flux(u[4]) - fabs(u[4])*u[4]); upp=C1o2*(Flux(u[5]) - fabs(u[5])*u[5]); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0P*(1.+tau5/(B0+EPS)); a1 = D1P*(1.+tau5/(B1+EPS)); a2 = D2P*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $v_{i+1/2}^{+}$; dflux+=(a0*( -umm + 5*um + 2*uo ) + a1*( 2*um + 5*uo - up ) + a2*(11*uo - 7*up + 2*upp))*(C1o6*alphasum); // Compute the numerical flux v_{i+1/2} return dflux; } /*****************/ /* Compute du/dx */ // <==== parallel strategy: compute serialy by rows or by columns! /*****************/ __global__ void Compute_dF( REAL * __restrict__ Lu, const unsigned int pitch, const unsigned int nx, const unsigned int ny, const REAL dx, const int k) { // Shared variables __shared__ REAL sfu[WIDTH][TILE+1]; // faces = inner nodes + 1 // Temporary variables REAL u0,u1,u2,u3,u4,u5; REAL B0,B1,B2,a0,a1,a2; REAL umm,um,uo,up,upp,tau5,alphasum; // Indexes unsigned int i,j,si,sj,o; int2 Data; // Global threads indexes i = TILE * blockIdx.x + threadIdx.x; j = blockDim.y * blockIdx.y + threadIdx.y; // shared memory indexes si = threadIdx.x; sj = threadIdx.y; // Global index o = i+pitch*j+pitch*ny*k; if (i < nx){ // Load data from texture memory Data = tex2D(tex_u,i-3,j); u0 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i-2,j); u1 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i-1,j); u2 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u, i ,j); u3 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i+1,j); u4 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i+2,j); u5 = __hiloint2double(Data.y, Data.x); // Reconstruct Face fluxes // Split data for f_{i}^{+} umm=C1o2*(Flux(u0) + fabs(u0)*u0); um =C1o2*(Flux(u1) + fabs(u1)*u1); uo =C1o2*(Flux(u2) + fabs(u2)*u2); up =C1o2*(Flux(u3) + fabs(u3)*u3); upp=C1o2*(Flux(u4) + fabs(u4)*u4); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0N*(1.+tau5/(B0+EPS)); a1 = D1N*(1.+tau5/(B1+EPS)); a2 = D2N*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $u_{i+1/2}^{-}$; sfu[sj][si] = ( a0*(2*umm- 7*um + 11*uo) + a1*( -um + 5*uo + 2*up ) + a2*( 2*uo+ 5*up - upp ))*(C1o6*alphasum); // split data for f_{i}^{-} umm=C1o2*(Flux(u1) - fabs(u1)*u1); um =C1o2*(Flux(u2) - fabs(u2)*u2); uo =C1o2*(Flux(u3) - fabs(u3)*u3); up =C1o2*(Flux(u4) - fabs(u4)*u4); upp=C1o2*(Flux(u5) - fabs(u5)*u5); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0P*(1.+tau5/(B0+EPS)); a1 = D1P*(1.+tau5/(B1+EPS)); a2 = D2P*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $u_{i+1/2}^{+}$; sfu[sj][si] += ( a0*( -umm + 5*um + 2*uo ) + a1*( 2*um + 5*uo - up ) + a2*(11*uo - 7*up + 2*upp))*(C1o6*alphasum); __syncthreads(); // Compute Lq = (f_{i+1/2}-f_{i-1/2})/dx if ( si<TILE ){ Lu[o] = -(sfu[sj][si+1] - sfu[sj][si])/dx; } } } /*****************/ /* Compute du/dy */ // <==== parallel strategy: compute serialy by rows or by columns! /*****************/ __global__ void Compute_dG( REAL * __restrict__ Lu, const unsigned int pitch, const unsigned int nx, const unsigned int ny, const REAL dy, const int k) { // Shared variables __shared__ REAL sfu[TILE+1][WIDTH]; // faces = inner nodes + 1 // Temporary variables REAL u0,u1,u2,u3,u4,u5; REAL B0,B1,B2,a0,a1,a2; REAL umm,um,uo,up,upp,tau5,alphasum; // Indexes unsigned int i,j,si,sj,o; int2 Data; // local threads indexes i = blockDim.x * blockIdx.x + threadIdx.x; j = TILE * blockIdx.y + threadIdx.y; // shared memory indexes si = threadIdx.x; sj = threadIdx.y; // Global index o = i+pitch*j+pitch*ny*k; if (j < ny){ // Load data from texture memory Data = tex2D(tex_u,i,j-3); u0 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i,j-2); u1 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i,j-1); u2 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i, j ); u3 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i,j+1); u4 = __hiloint2double(Data.y, Data.x); Data = tex2D(tex_u,i,j+2); u5 = __hiloint2double(Data.y, Data.x); // Reconstruct Face fluxes // Split data for f_{i}^{+} umm=C1o2*(Flux(u0) + fabs(u0)*u0); um =C1o2*(Flux(u1) + fabs(u1)*u1); uo =C1o2*(Flux(u2) + fabs(u2)*u2); up =C1o2*(Flux(u3) + fabs(u3)*u3); upp=C1o2*(Flux(u4) + fabs(u4)*u4); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0N*(1.+tau5/(B0+EPS)); a1 = D1N*(1.+tau5/(B1+EPS)); a2 = D2N*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $u_{i+1/2}^{-}$; sfu[sj][si] = ( a0*(2*umm- 7*um + 11*uo) + a1*( -um + 5*uo + 2*up ) + a2*( 2*uo+ 5*up - upp ))*(C1o6*alphasum); // split data for f_{i}^{-} umm=C1o2*(Flux(u1) - fabs(u1)*u1); um =C1o2*(Flux(u2) - fabs(u2)*u2); uo =C1o2*(Flux(u3) - fabs(u3)*u3); up =C1o2*(Flux(u4) - fabs(u4)*u4); upp=C1o2*(Flux(u5) - fabs(u5)*u5); // Smooth Indicators (Beta factors) B0 = C13o12*(umm-2*um+uo )*(umm-2*um +uo ) + C1o4*(umm-4*um+3*uo)*(umm-4*um+3*uo); B1 = C13o12*(um -2*uo+up )*(um -2*uo +up ) + C1o4*(um-up)*(um-up); B2 = C13o12*(uo -2*up+upp)*(uo -2*up +upp) + C1o4*(3*uo-4*up+upp)*(3*uo-4*up+upp); // Alpha weights tau5 = fabs(B0-B2); a0 = D0P*(1.+tau5/(B0+EPS)); a1 = D1P*(1.+tau5/(B1+EPS)); a2 = D2P*(1.+tau5/(B2+EPS)); alphasum = 1./(a0 + a1 + a2); // Numerical Flux at cell boundary, $u_{i+1/2}^{+}$; sfu[sj][si] += ( a0*( -umm + 5*um + 2*uo ) + a1*( 2*um + 5*uo - up ) + a2*(11*uo - 7*up + 2*upp))*(C1o6*alphasum); __syncthreads(); // Compute Lq = (f_{i+1/2}-f_{i-1/2})/dx if ( sj<TILE ){ Lu[o] -= (sfu[sj+1][si] - sfu[sj][si])/dy; } } } /*****************/ /* Compute du/dz */ // <==== parallel strategy: compute serialy by rows or by columns! /*****************/ __global__ void Compute_dH( const REAL * __restrict__ w, REAL * __restrict__ Lw, const unsigned int pitch, const unsigned int nx, const unsigned int ny, const unsigned int nz, const REAL dz) { // Shared variables __shared__ REAL s_w[WIDTH][TILE+6]; // 3-wide halo __shared__ REAL sfw[WIDTH][TILE+1]; // faces = inner nodes + 1 // Indexes unsigned int i,k,I,J,K,si,sk,o; // Global threads indexes I = blockDim.x * blockIdx.x + threadIdx.x; K = TILE * blockIdx.y + threadIdx.y; J = blockIdx.z; // Local thead indexes i = threadIdx.x; k = threadIdx.y; // Local share memory indexes si = threadIdx.x; // local i for shared memory access sk = threadIdx.y+3; // local j for shared memory access + halo offset // Global index o = I+pitch*J+pitch*ny*K; if (K < nz){ // Load data into shared memory s_w[si][sk]=w[o]; // Load boundary values if ( (k<3) && (K<3) ){ s_w[si][sk-3]=0.; // set Dirichlet BCs } else if (k < 3){ s_w[si][sk-3]=w[o-3*pitch*ny]; // get data from neighbour } // Load boundary values if ( (k>TILE-2) && (K>nz-2) ){ s_w[si][sk+2]=0.; // set Dirichlet BCs } else if (k > TILE-2){ s_w[si][sk+2]=w[o+2*pitch*ny]; // get data from neighbour } __syncthreads(); // Compute face fluxes sfw[i][k]=WENO5Zreconstruction(&s_w[si][sk-3]); // fp_{i+1/2} __syncthreads(); // Compute Lq = (f_{i+1/2}-f_{i-1/2})/dz if ( k<TILE ){ Lw[o] -= (sfw[i][k+1] - sfw[i][k])/dz; } } } /********************/ /* Laplace Operator */ /********************/ __global__ void Compute_Laplace( const REAL * __restrict__ u, REAL * __restrict__ Lu, const unsigned int px, // pitch in the x-direction const unsigned int nx, const unsigned int ny, const unsigned int nz) { REAL above2; REAL above; REAL center; REAL below; REAL below2; unsigned int i, j, k, o, xy, px2, xy2; xy = px*ny; px2 = 2*px; xy2 = 2*xy; i = threadIdx.x + blockIdx.x * blockDim.x; j = threadIdx.y + blockIdx.y * blockDim.y; // For initial slice k=3; o=i+px*j+xy*k; if (i>2 && i<nx-3 && j>2 && j<ny-3) { below2=u[o-xy2]; below=u[o-xy]; center=u[o]; above=u[o+xy]; above2=u[o+xy2]; Lu[o]+= d_kx * (- u[o-2] +16*u[o-1] - 30*center + 16*u[o+1] - u[o+2] ) + d_ky * (-u[o-px2]+16*u[o-px]- 30*center + 16*u[o+px]- u[o+px2])+ d_kz * (- below2 +16* below - 30*center + 16* above - above2 ); // For the rest of the slide for(k = 4; k < nz-3; k++) { o=o+xy; below2=below; below=center; center=above; above=above2; above2=u[o+xy2]; Lu[o]+= d_kx * (- u[o-2] +16*u[o-1] - 30*center + 16*u[o+1] - u[o+2] ) + d_ky * (-u[o-px2]+16*u[o-px]- 30*center + 16*u[o+px]- u[o+px2])+ d_kz * (- below2 +16* below - 30*center + 16* above - above2 ); } } // else : do nothing! } /**************************/ /* Async Laplace Operator */ /**************************/ __global__ void Compute_Laplace_Async( const REAL * __restrict__ u, REAL * __restrict__ Lu, unsigned int px, // pitch in the x-direction unsigned int Nx, unsigned int Ny, unsigned int _Nz, unsigned int kstart, unsigned int kstop, unsigned int loop_z) { register REAL above2; register REAL above; register REAL center; register REAL below; register REAL below2; unsigned int i, j, k, o, z, XY, px2, XY2; i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * loop_z; k = MAX(kstart,k); XY=px*Ny; px2=px+px; XY2=XY+XY; o=i+px*j+XY*k; if (i>2 && i<Nx-3 && j>2 && j<Ny-3) { below2=u[o-XY2]; below=u[o-XY]; center=u[o]; above=u[o+XY]; above2=u[o+XY2]; Lu[o]+= d_kx*(- u[o-2] +16* u[o-1]-30*center+16*u[o+1] - u[o+2] ) + d_ky*(-u[o-px2]+16*u[o-px]-30*center+16*u[o+px]-u[o+px2]) + d_kz*(- below2 +16* below -30*center+16* above - above2 ); for(z = 1; z < loop_z; z++) { k += 1; if (k < MIN(kstop,_Nz+1)) { o=o+XY; below2=below; below=center; center=above; above=above2; above2=u[o+XY2]; Lu[o]+= d_kx*(- u[o-2] +16*u[o-1] -30*center+16*u[o+1] - u[o+2] ) + d_ky*(-u[o-px2]+16*u[o-px]-30*center+16*u[o+px]-u[o+px2]) + d_kz*(- below2 +16* below -30*center+16* above - above2 ); } } } // else : do nothing! } /***********************/ /* Runge Kutta Methods */ // <==== this is perfectly parallel! /***********************/ __global__ void Compute_RK( REAL * __restrict__ u, const REAL * __restrict__ uo, const REAL * __restrict__ Lu, const unsigned int step, const unsigned int pitch, const unsigned int nx, const unsigned int ny, const unsigned int nz, const REAL dt) { // local threads indexes int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; // compute single index unsigned int o=i+pitch*j+pitch*ny*k; // Compute Runge-Kutta step, update only internal cells if (i>2 && i<nx-3 && j>2 && j<ny-3 && k>2 && k<nz-3) { switch (step) { case 1: // step 1 u[o] = uo[o]+dt*(Lu[o]); break; case 2: // step 2 u[o] = 0.75*uo[o]+C1o4*(u[o]+dt*(Lu[o])); break; case 3: // step 3 u[o] = (uo[o]+2*(u[o]+dt*(Lu[o])))/3; break; } } // else : do nothing! } /*********************/ /* Function Wrappers */ /*********************/ extern "C" void CopyToConstantMemory(const REAL kx, const REAL ky, const REAL kz) { checkCuda(cudaMemcpyToSymbol(d_kx, &kx, sizeof(REAL), 0, cudaMemcpyHostToDevice)); checkCuda(cudaMemcpyToSymbol(d_ky, &ky, sizeof(REAL), 0, cudaMemcpyHostToDevice)); checkCuda(cudaMemcpyToSymbol(d_kz, &kz, sizeof(REAL), 0, cudaMemcpyHostToDevice)); } extern "C" void InitializeTextures() { // Texture indexing depends on the address Mode // index -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10 11 // Clamp 0 0 0 0 0 0 0 0 1 2 3 3 3 3 3 3 3 3 3 // Border 0 0 0 0 0 0 0 0 1 2 3 0 0 0 0 0 0 0 0 // Wrap 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 // Mirror 1 2 3 3 2 1 0 0 1 2 3 3 2 1 0 0 1 2 3 // Texture basic Setup tex_u.addressMode[0] = cudaAddressModeClamp; tex_u.addressMode[1] = cudaAddressModeClamp; tex_u.filterMode = cudaFilterModePoint; tex_u.normalized = false; } extern "C" void Call_Adv_L(dim3 numBlocks_x, dim3 threadsPerBlock_x, dim3 numBlocks_y, dim3 threadsPerBlock_y, cudaStream_t aStream, size_t pitch_bytes, unsigned int pitch, cudaChannelFormatDesc *desc, unsigned int nx, unsigned int ny, unsigned int nz, REAL dx, REAL dy, REAL *u, REAL *Lu) { for(int k=3; k<=nz-3; k++){ // Bind texture and Run checkCuda(cudaBindTexture2D(0,&tex_u,&u[k*ny*pitch],desc,nx,ny,pitch_bytes)); Compute_dF<<<numBlocks_x,threadsPerBlock_x,0,aStream>>>(Lu,pitch,nx,ny,dx,k); Compute_dG<<<numBlocks_y,threadsPerBlock_y,0,aStream>>>(Lu,pitch,nx,ny,dy,k); } } // extern "C" void Call_Adv_x(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream, // size_t pitch_bytes, unsigned int pitch, cudaChannelFormatDesc *desc, unsigned int nx, unsigned int ny, unsigned int nz, REAL dx, REAL *u, REAL *Lu) // { // for(int k=3; k<=nz-3; k++){ // // Bind texture and Run // checkCuda(cudaBindTexture2D(0,&tex_u,&u[k*ny*pitch],desc,nx,ny,pitch_bytes)); // Compute_dF<<<numBlocks,threadsPerBlock,0,aStream>>>(Lu,pitch,nx,ny,dx,k); // } // } // extern "C" void Call_Adv_y(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream, // size_t pitch_bytes, unsigned int pitch, cudaChannelFormatDesc *desc, unsigned int nx, unsigned int ny, unsigned int nz, REAL dy, REAL *u, REAL *Lu) // { // for(int k=3; k<=nz-3; k++){ // // Bind Texture and Run // checkCuda(cudaBindTexture2D(0,&tex_u,&u[k*ny*pitch],desc,nx,ny,pitch_bytes)); // Compute_dG<<<numBlocks,threadsPerBlock,0,aStream>>>(Lu,pitch,nx,ny,dy,k); // } // } extern "C" void Call_Adv_z(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream, unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL dz, REAL *u, REAL *Lu) { Compute_dH<<<numBlocks,threadsPerBlock,0,aStream>>>(u,Lu,pitch,nx,ny,nz,dz); } extern "C" void Call_Diff_(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream, unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL *u, REAL *Lu) { // Compute_Laplace<<<numBlocks,threadsPerBlock,0,aStream>>>(u,Lu,pitch,nx,ny,nz); Compute_Laplace_Async<<<numBlocks,threadsPerBlock,0,aStream>>>(u,Lu,pitch,nx,ny,nz,3,nz-2,LOOP); } extern "C" void Call_sspRK(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream, unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, unsigned int step, REAL dt, REAL *u, REAL *uo, REAL *Lu) { Compute_RK<<<numBlocks,threadsPerBlock,0,aStream>>>(u,uo,Lu,step,pitch,nx,ny,nz,dt); }
cce51d881671ce6c228d9c17040abe27844bda07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/CellListGPU.cu * \brief Defines GPU functions and kernels used by mpcd::CellListGPU */ #include "CellListGPU.cuh" namespace mpcd { namespace gpu { namespace kernel { //! Kernel to compute the MPCD cell list on the GPU /*! * \param d_cell_np Array of number of particles per cell * \param d_cell_list 2D array of MPCD particles in each cell * \param d_conditions Conditions flags for error reporting * \param d_vel MPCD particle velocities * \param d_embed_cell_ids Cell indexes of embedded particles * \param d_pos MPCD particle positions * \param d_pos_embed Particle positions * \param d_embed_member_idx Indexes of embedded particles in \a d_pos_embed * \param periodic Flags if local simulation is periodic * \param origin_idx Global origin index for the local box * \param grid_shift Random grid shift vector * \param global_lo Lower bound of global orthorhombic simulation box * \param n_global_cell Global dimensions of the cell list, including padding * \param cell_size Cell width * \param cell_np_max Maximum number of particles per cell * \param cell_indexer 3D indexer for cell id * \param cell_list_indexer 2D indexer for particle position in cell * \param N_mpcd Number of MPCD particles * \param N_tot Total number of particle (MPCD + embedded) * * \b Implementation * One thread is launched per particle. The particle is floored into a bin subject to a random grid * shift. The number of particles in that bin is atomically incremented. If the addition of the * particle will not overflow the allocated memory, the particle is written into that bin. * Otherwise, a flag is set to resize the cell list and recompute. The MPCD particle's cell id is * stashed into the velocity array. */ __global__ void compute_cell_list(unsigned int* d_cell_np, unsigned int* d_cell_list, uint3* d_conditions, Scalar4* d_vel, unsigned int* d_embed_cell_ids, const Scalar4* d_pos, const Scalar4* d_pos_embed, const unsigned int* d_embed_member_idx, const uchar3 periodic, const int3 origin_idx, const Scalar3 grid_shift, const Scalar3 global_lo, const uint3 n_global_cell, const Scalar cell_size, const unsigned int cell_np_max, const Index3D cell_indexer, const Index2D cell_list_indexer, const unsigned int N_mpcd, const unsigned int N_tot) { // one thread per particle unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N_tot) return; Scalar4 postype_i; if (idx < N_mpcd) { postype_i = d_pos[idx]; } else { postype_i = d_pos_embed[d_embed_member_idx[idx - N_mpcd]]; } const Scalar3 pos_i = make_scalar3(postype_i.x, postype_i.y, postype_i.z); if (isnan(pos_i.x) || isnan(pos_i.y) || isnan(pos_i.z)) { (*d_conditions).y = idx + 1; return; } // bin particle with grid shift assuming orthorhombic box (already validated) const Scalar3 delta = (pos_i - grid_shift) - global_lo; int3 global_bin = make_int3(::floor(delta.x / cell_size), ::floor(delta.y / cell_size), ::floor(delta.z / cell_size)); // wrap cell back through the boundaries (grid shifting may send +/- 1 outside of range) // this is done using periodic from the "local" box, since this will be periodic // only when there is one rank along the dimension if (periodic.x) { if (global_bin.x == (int)n_global_cell.x) global_bin.x = 0; else if (global_bin.x == -1) global_bin.x = n_global_cell.x - 1; } if (periodic.y) { if (global_bin.y == (int)n_global_cell.y) global_bin.y = 0; else if (global_bin.y == -1) global_bin.y = n_global_cell.y - 1; } if (periodic.z) { if (global_bin.z == (int)n_global_cell.z) global_bin.z = 0; else if (global_bin.z == -1) global_bin.z = n_global_cell.z - 1; } // compute the local cell int3 bin = make_int3(global_bin.x - origin_idx.x, global_bin.y - origin_idx.y, global_bin.z - origin_idx.z); // validate and make sure no particles blew out of the box if ((bin.x < 0 || bin.x >= (int)cell_indexer.getW()) || (bin.y < 0 || bin.y >= (int)cell_indexer.getH()) || (bin.z < 0 || bin.z >= (int)cell_indexer.getD())) { (*d_conditions).z = idx + 1; return; } const unsigned int bin_idx = cell_indexer(bin.x, bin.y, bin.z); const unsigned int offset = atomicInc(&d_cell_np[bin_idx], 0xffffffff); if (offset < cell_np_max) { d_cell_list[cell_list_indexer(offset, bin_idx)] = idx; } else { // overflow atomicMax(&(*d_conditions).x, offset + 1); } // stash the current particle bin into the velocity array if (idx < N_mpcd) { d_vel[idx].w = __int_as_scalar(bin_idx); } else { d_embed_cell_ids[idx - N_mpcd] = bin_idx; } } /*! * \param d_migrate_flag Flag signaling migration is required (output) * \param d_pos Embedded particle positions * \param d_group Indexes into \a d_pos for particles in embedded group * \param box Box covered by this domain * \param num_dim Dimensionality of system * \param N Number of particles in group * * \b Implementation * Using one thread per particle, each particle position is compared to the * bounds of the simulation box. If a particle lies outside the box, \a d_migrate_flag * has its bits set using an atomicMax transaction. The caller should then trigger * a communication step to migrate particles to their appropriate ranks. */ __global__ void cell_check_migrate_embed(unsigned int* d_migrate_flag, const Scalar4* d_pos, const unsigned int* d_group, const BoxDim box, const unsigned int num_dim, const unsigned int N) { // one thread per particle in group unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; const unsigned int idx = d_group[tid]; const Scalar4 postype = d_pos[idx]; const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); const Scalar3 lo = box.getLo(); const Scalar3 hi = box.getHi(); const uchar3 periodic = box.getPeriodic(); if ((!periodic.x && (pos.x >= hi.x || pos.x < lo.x)) || (!periodic.y && (pos.y >= hi.y || pos.y < lo.y)) || (!periodic.z && num_dim == 3 && (pos.z >= hi.z || pos.z < lo.z))) { atomicMax(d_migrate_flag, 1); } } __global__ void cell_apply_sort(unsigned int* d_cell_list, const unsigned int* d_rorder, const unsigned int* d_cell_np, const Index2D cli, const unsigned int N_mpcd, const unsigned int N_cli) { // one thread per cell-list entry const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N_cli) return; // convert the entry 1D index into a 2D index const unsigned int cell = idx / cli.getW(); const unsigned int offset = idx - (cell * cli.getW()); /* here comes some terrible execution divergence */ // check if the cell is filled const unsigned int np = d_cell_np[cell]; if (offset < np) { // check if this is an MPCD particle const unsigned int pid = d_cell_list[idx]; if (pid < N_mpcd) { d_cell_list[idx] = d_rorder[pid]; } } } } // end namespace kernel } // end namespace gpu } // end namespace mpcd /*! * \param d_cell_np Array of number of particles per cell * \param d_cell_list 2D array of MPCD particles in each cell * \param d_conditions Conditions flags for error reporting * \param d_vel MPCD particle velocities * \param d_embed_cell_ids Cell indexes of embedded particles * \param d_pos MPCD particle positions * \param d_pos_embed Particle positions * \param d_embed_member_idx Indexes of embedded particles in \a d_pos_embed * \param periodic Flags if local simulation is periodic * \param origin_idx Global origin index for the local box * \param grid_shift Random grid shift vector * \param global_lo Lower bound of global orthorhombic simulation box * \param n_global_cell Global dimensions of the cell list, including padding * \param cell_size Cell width * \param cell_np_max Maximum number of particles per cell * \param cell_indexer 3D indexer for cell id * \param cell_list_indexer 2D indexer for particle position in cell * \param N_mpcd Number of MPCD particles * \param N_tot Total number of particle (MPCD + embedded) * \param block_size Number of threads per block * * \returns hipSuccess on completion, or an error on failure */ hipError_t mpcd::gpu::compute_cell_list(unsigned int* d_cell_np, unsigned int* d_cell_list, uint3* d_conditions, Scalar4* d_vel, unsigned int* d_embed_cell_ids, const Scalar4* d_pos, const Scalar4* d_pos_embed, const unsigned int* d_embed_member_idx, const uchar3& periodic, const int3& origin_idx, const Scalar3& grid_shift, const Scalar3& global_lo, const uint3& n_global_cell, const Scalar cell_size, const unsigned int cell_np_max, const Index3D& cell_indexer, const Index2D& cell_list_indexer, const unsigned int N_mpcd, const unsigned int N_tot, const unsigned int block_size) { // set the number of particles in each cell to zero hipError_t error = hipMemset(d_cell_np, 0, sizeof(unsigned int) * cell_indexer.getNumElements()); if (error != hipSuccess) return error; unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::compute_cell_list); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N_tot / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::compute_cell_list), dim3(grid), dim3(run_block_size), 0, 0, d_cell_np, d_cell_list, d_conditions, d_vel, d_embed_cell_ids, d_pos, d_pos_embed, d_embed_member_idx, periodic, origin_idx, grid_shift, global_lo, n_global_cell, cell_size, cell_np_max, cell_indexer, cell_list_indexer, N_mpcd, N_tot); return hipSuccess; } /*! * \param d_migrate_flag Flag signaling migration is required (output) * \param d_pos Embedded particle positions * \param d_group Indexes into \a d_pos for particles in embedded group * \param box Box covered by this domain * \param N Number of particles in group * \param block_size Number of threads per block * * \sa mpcd::gpu::kernel::cell_check_migrate_embed */ hipError_t mpcd::gpu::cell_check_migrate_embed(unsigned int* d_migrate_flag, const Scalar4* d_pos, const unsigned int* d_group, const BoxDim& box, const unsigned int num_dim, const unsigned int N, const unsigned int block_size) { // ensure that the flag is always zeroed even if the caller forgets hipMemset(d_migrate_flag, 0, sizeof(unsigned int)); unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::cell_check_migrate_embed); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::cell_check_migrate_embed), dim3(grid), dim3(run_block_size), 0, 0, d_migrate_flag, d_pos, d_group, box, num_dim, N); return hipSuccess; } hipError_t mpcd::gpu::cell_apply_sort(unsigned int* d_cell_list, const unsigned int* d_rorder, const unsigned int* d_cell_np, const Index2D& cli, const unsigned int N_mpcd, const unsigned int block_size) { unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::cell_apply_sort); max_block_size = attr.maxThreadsPerBlock; const unsigned int N_cli = cli.getNumElements(); unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N_cli / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::cell_apply_sort), dim3(grid), dim3(run_block_size), 0, 0, d_cell_list, d_rorder, d_cell_np, cli, N_mpcd, N_cli); return hipSuccess; }
cce51d881671ce6c228d9c17040abe27844bda07.cu
// Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/CellListGPU.cu * \brief Defines GPU functions and kernels used by mpcd::CellListGPU */ #include "CellListGPU.cuh" namespace mpcd { namespace gpu { namespace kernel { //! Kernel to compute the MPCD cell list on the GPU /*! * \param d_cell_np Array of number of particles per cell * \param d_cell_list 2D array of MPCD particles in each cell * \param d_conditions Conditions flags for error reporting * \param d_vel MPCD particle velocities * \param d_embed_cell_ids Cell indexes of embedded particles * \param d_pos MPCD particle positions * \param d_pos_embed Particle positions * \param d_embed_member_idx Indexes of embedded particles in \a d_pos_embed * \param periodic Flags if local simulation is periodic * \param origin_idx Global origin index for the local box * \param grid_shift Random grid shift vector * \param global_lo Lower bound of global orthorhombic simulation box * \param n_global_cell Global dimensions of the cell list, including padding * \param cell_size Cell width * \param cell_np_max Maximum number of particles per cell * \param cell_indexer 3D indexer for cell id * \param cell_list_indexer 2D indexer for particle position in cell * \param N_mpcd Number of MPCD particles * \param N_tot Total number of particle (MPCD + embedded) * * \b Implementation * One thread is launched per particle. The particle is floored into a bin subject to a random grid * shift. The number of particles in that bin is atomically incremented. If the addition of the * particle will not overflow the allocated memory, the particle is written into that bin. * Otherwise, a flag is set to resize the cell list and recompute. The MPCD particle's cell id is * stashed into the velocity array. */ __global__ void compute_cell_list(unsigned int* d_cell_np, unsigned int* d_cell_list, uint3* d_conditions, Scalar4* d_vel, unsigned int* d_embed_cell_ids, const Scalar4* d_pos, const Scalar4* d_pos_embed, const unsigned int* d_embed_member_idx, const uchar3 periodic, const int3 origin_idx, const Scalar3 grid_shift, const Scalar3 global_lo, const uint3 n_global_cell, const Scalar cell_size, const unsigned int cell_np_max, const Index3D cell_indexer, const Index2D cell_list_indexer, const unsigned int N_mpcd, const unsigned int N_tot) { // one thread per particle unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N_tot) return; Scalar4 postype_i; if (idx < N_mpcd) { postype_i = d_pos[idx]; } else { postype_i = d_pos_embed[d_embed_member_idx[idx - N_mpcd]]; } const Scalar3 pos_i = make_scalar3(postype_i.x, postype_i.y, postype_i.z); if (isnan(pos_i.x) || isnan(pos_i.y) || isnan(pos_i.z)) { (*d_conditions).y = idx + 1; return; } // bin particle with grid shift assuming orthorhombic box (already validated) const Scalar3 delta = (pos_i - grid_shift) - global_lo; int3 global_bin = make_int3(std::floor(delta.x / cell_size), std::floor(delta.y / cell_size), std::floor(delta.z / cell_size)); // wrap cell back through the boundaries (grid shifting may send +/- 1 outside of range) // this is done using periodic from the "local" box, since this will be periodic // only when there is one rank along the dimension if (periodic.x) { if (global_bin.x == (int)n_global_cell.x) global_bin.x = 0; else if (global_bin.x == -1) global_bin.x = n_global_cell.x - 1; } if (periodic.y) { if (global_bin.y == (int)n_global_cell.y) global_bin.y = 0; else if (global_bin.y == -1) global_bin.y = n_global_cell.y - 1; } if (periodic.z) { if (global_bin.z == (int)n_global_cell.z) global_bin.z = 0; else if (global_bin.z == -1) global_bin.z = n_global_cell.z - 1; } // compute the local cell int3 bin = make_int3(global_bin.x - origin_idx.x, global_bin.y - origin_idx.y, global_bin.z - origin_idx.z); // validate and make sure no particles blew out of the box if ((bin.x < 0 || bin.x >= (int)cell_indexer.getW()) || (bin.y < 0 || bin.y >= (int)cell_indexer.getH()) || (bin.z < 0 || bin.z >= (int)cell_indexer.getD())) { (*d_conditions).z = idx + 1; return; } const unsigned int bin_idx = cell_indexer(bin.x, bin.y, bin.z); const unsigned int offset = atomicInc(&d_cell_np[bin_idx], 0xffffffff); if (offset < cell_np_max) { d_cell_list[cell_list_indexer(offset, bin_idx)] = idx; } else { // overflow atomicMax(&(*d_conditions).x, offset + 1); } // stash the current particle bin into the velocity array if (idx < N_mpcd) { d_vel[idx].w = __int_as_scalar(bin_idx); } else { d_embed_cell_ids[idx - N_mpcd] = bin_idx; } } /*! * \param d_migrate_flag Flag signaling migration is required (output) * \param d_pos Embedded particle positions * \param d_group Indexes into \a d_pos for particles in embedded group * \param box Box covered by this domain * \param num_dim Dimensionality of system * \param N Number of particles in group * * \b Implementation * Using one thread per particle, each particle position is compared to the * bounds of the simulation box. If a particle lies outside the box, \a d_migrate_flag * has its bits set using an atomicMax transaction. The caller should then trigger * a communication step to migrate particles to their appropriate ranks. */ __global__ void cell_check_migrate_embed(unsigned int* d_migrate_flag, const Scalar4* d_pos, const unsigned int* d_group, const BoxDim box, const unsigned int num_dim, const unsigned int N) { // one thread per particle in group unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; const unsigned int idx = d_group[tid]; const Scalar4 postype = d_pos[idx]; const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); const Scalar3 lo = box.getLo(); const Scalar3 hi = box.getHi(); const uchar3 periodic = box.getPeriodic(); if ((!periodic.x && (pos.x >= hi.x || pos.x < lo.x)) || (!periodic.y && (pos.y >= hi.y || pos.y < lo.y)) || (!periodic.z && num_dim == 3 && (pos.z >= hi.z || pos.z < lo.z))) { atomicMax(d_migrate_flag, 1); } } __global__ void cell_apply_sort(unsigned int* d_cell_list, const unsigned int* d_rorder, const unsigned int* d_cell_np, const Index2D cli, const unsigned int N_mpcd, const unsigned int N_cli) { // one thread per cell-list entry const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N_cli) return; // convert the entry 1D index into a 2D index const unsigned int cell = idx / cli.getW(); const unsigned int offset = idx - (cell * cli.getW()); /* here comes some terrible execution divergence */ // check if the cell is filled const unsigned int np = d_cell_np[cell]; if (offset < np) { // check if this is an MPCD particle const unsigned int pid = d_cell_list[idx]; if (pid < N_mpcd) { d_cell_list[idx] = d_rorder[pid]; } } } } // end namespace kernel } // end namespace gpu } // end namespace mpcd /*! * \param d_cell_np Array of number of particles per cell * \param d_cell_list 2D array of MPCD particles in each cell * \param d_conditions Conditions flags for error reporting * \param d_vel MPCD particle velocities * \param d_embed_cell_ids Cell indexes of embedded particles * \param d_pos MPCD particle positions * \param d_pos_embed Particle positions * \param d_embed_member_idx Indexes of embedded particles in \a d_pos_embed * \param periodic Flags if local simulation is periodic * \param origin_idx Global origin index for the local box * \param grid_shift Random grid shift vector * \param global_lo Lower bound of global orthorhombic simulation box * \param n_global_cell Global dimensions of the cell list, including padding * \param cell_size Cell width * \param cell_np_max Maximum number of particles per cell * \param cell_indexer 3D indexer for cell id * \param cell_list_indexer 2D indexer for particle position in cell * \param N_mpcd Number of MPCD particles * \param N_tot Total number of particle (MPCD + embedded) * \param block_size Number of threads per block * * \returns cudaSuccess on completion, or an error on failure */ cudaError_t mpcd::gpu::compute_cell_list(unsigned int* d_cell_np, unsigned int* d_cell_list, uint3* d_conditions, Scalar4* d_vel, unsigned int* d_embed_cell_ids, const Scalar4* d_pos, const Scalar4* d_pos_embed, const unsigned int* d_embed_member_idx, const uchar3& periodic, const int3& origin_idx, const Scalar3& grid_shift, const Scalar3& global_lo, const uint3& n_global_cell, const Scalar cell_size, const unsigned int cell_np_max, const Index3D& cell_indexer, const Index2D& cell_list_indexer, const unsigned int N_mpcd, const unsigned int N_tot, const unsigned int block_size) { // set the number of particles in each cell to zero cudaError_t error = cudaMemset(d_cell_np, 0, sizeof(unsigned int) * cell_indexer.getNumElements()); if (error != cudaSuccess) return error; unsigned int max_block_size; cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::compute_cell_list); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N_tot / run_block_size + 1); mpcd::gpu::kernel::compute_cell_list<<<grid, run_block_size>>>(d_cell_np, d_cell_list, d_conditions, d_vel, d_embed_cell_ids, d_pos, d_pos_embed, d_embed_member_idx, periodic, origin_idx, grid_shift, global_lo, n_global_cell, cell_size, cell_np_max, cell_indexer, cell_list_indexer, N_mpcd, N_tot); return cudaSuccess; } /*! * \param d_migrate_flag Flag signaling migration is required (output) * \param d_pos Embedded particle positions * \param d_group Indexes into \a d_pos for particles in embedded group * \param box Box covered by this domain * \param N Number of particles in group * \param block_size Number of threads per block * * \sa mpcd::gpu::kernel::cell_check_migrate_embed */ cudaError_t mpcd::gpu::cell_check_migrate_embed(unsigned int* d_migrate_flag, const Scalar4* d_pos, const unsigned int* d_group, const BoxDim& box, const unsigned int num_dim, const unsigned int N, const unsigned int block_size) { // ensure that the flag is always zeroed even if the caller forgets cudaMemset(d_migrate_flag, 0, sizeof(unsigned int)); unsigned int max_block_size; cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::cell_check_migrate_embed); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N / run_block_size + 1); mpcd::gpu::kernel::cell_check_migrate_embed<<<grid, run_block_size>>>(d_migrate_flag, d_pos, d_group, box, num_dim, N); return cudaSuccess; } cudaError_t mpcd::gpu::cell_apply_sort(unsigned int* d_cell_list, const unsigned int* d_rorder, const unsigned int* d_cell_np, const Index2D& cli, const unsigned int N_mpcd, const unsigned int block_size) { unsigned int max_block_size; cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::cell_apply_sort); max_block_size = attr.maxThreadsPerBlock; const unsigned int N_cli = cli.getNumElements(); unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N_cli / run_block_size + 1); mpcd::gpu::kernel::cell_apply_sort<<<grid, run_block_size>>>(d_cell_list, d_rorder, d_cell_np, cli, N_mpcd, N_cli); return cudaSuccess; }
1235e31374e8b39b5fd4043ad8d9d0bb1b971d46.hip
// !!! This is a file automatically generated by hipify!!! /** Copyright 2022 BioMap (Beijing) Intelligence Technology Limited Copyright 2022 HPC-AI Technology Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. **/ // part of code modified from https://github.com/NVIDIA/apex #include <hip/hip_cooperative_groups.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <torch/extension.h> #include <THH/THHDeviceUtils.cuh> #include "ATen/ATen.h" #include "ATen/AccumulateType.h" #include "ATen/hip/HIPContext.h" #include "compat.h" #include "type_shim.h" #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) \ CHECK_CUDA(x); \ CHECK_CONTIGUOUS(x) inline __device__ void WelfordOnline(float val, float* mean, float* m2, float* count) { *count += 1; float delta1 = val - *mean; *mean += delta1 / (*count); float delta2 = val - *mean; *m2 += delta1 * delta2; } inline __device__ void WelfordOnline(float b_mean, float b_m2, float b_count, float* mean, float* m2, float* count) { if (b_count == 0) { return; } float new_count = *count + b_count; float nb_n = b_count / new_count; float delta = b_mean - *mean; *mean += delta * nb_n; *m2 += b_m2 + delta * delta * (*count) * nb_n; *count = new_count; } __inline__ __device__ void WelfordWarpAllReduce(float thread_mean, float thread_m2, float thread_count, float* mean, float* m2, float* count) { *mean = thread_mean; *m2 = thread_m2; *count = thread_count; for (int mask = 1; mask < 32; mask *= 2) { float b_mean = __shfl_down_sync(0xffffffff, *mean, mask); float b_m2 = __shfl_down_sync(0xffffffff, *m2, mask); float b_count = __shfl_down_sync(0xffffffff, *count, mask); WelfordOnline(b_mean, b_m2, b_count, mean, m2, count); } *mean = __shfl_sync(0xffffffff, *mean, 0, 32); *m2 = __shfl_sync(0xffffffff, *m2, 0, 32); *count = __shfl_sync(0xffffffff, *count, 0, 32); } template <typename T> __global__ void fastfold_layernorm(T* input, T* output, T* gamma, T* beta, float* mean, float* invvar, int rows, int cols, double epsilon) { int threadidx_x = threadIdx.x / 32; int threadidx_y = threadIdx.x % 32; int row_offset = blockIdx.x * 4 + threadidx_x; int cols_per_thread = (cols + 31) / 32; int cols_this_thread = cols_per_thread; int last_y = (cols / cols_per_thread); if (threadidx_y == last_y) { cols_this_thread = cols - cols_per_thread * last_y; } else if (threadidx_y > last_y) { cols_this_thread = 0; } int lane_id = threadidx_y; if (row_offset < rows) { float buf[32]; float thread_mean = 0.f; float thread_m2 = 0.f; float thread_count = 0.f; float warp_mean; float warp_m2; float warp_count; T* row_input = input + row_offset * cols; T* row_output = output + row_offset * cols; #pragma unroll for (int i = 0; i < cols_this_thread; i++) { buf[i] = static_cast<float>(row_input[lane_id * cols_per_thread + i]); } #pragma unroll for (int i = 0; i < cols_this_thread; i++) { WelfordOnline(buf[i], &thread_mean, &thread_m2, &thread_count); } WelfordWarpAllReduce(thread_mean, thread_m2, thread_count, &warp_mean, &warp_m2, &warp_count); float row_mean = warp_mean; float row_variance = max(warp_m2 / warp_count, 0.f); float row_inv_var = rsqrt(row_variance + epsilon); if (lane_id == 0) { mean[row_offset] = row_mean; invvar[row_offset] = row_inv_var; } #pragma unroll for (int i = 0; i < cols_this_thread; ++i) { buf[i] = (buf[i] - row_mean) * row_inv_var; } #pragma unroll for (int i = 0; i < cols_this_thread; ++i) { row_output[lane_id * cols_per_thread + i] = static_cast<T>(buf[i]) * gamma[lane_id * cols_per_thread + i] + beta[lane_id * cols_per_thread + i]; } } } template <typename T> __global__ void fastfold_layernorm2(T* input, T* output, T* gamma, T* beta, float* mean, float* invvar, int rows, int cols, double epsilon) { int threadidx_x = threadIdx.x / 32; int threadidx_y = threadIdx.x % 32; int row_offset = blockIdx.x * 4 + threadidx_x; int cols_per_thread = (cols - 1) / 32; int cols_this_thread = cols_per_thread; int last_y = (cols / cols_per_thread); if (threadidx_y == last_y) { cols_this_thread = cols - cols_per_thread * last_y; } else if (threadidx_y > last_y) { cols_this_thread = 0; } int lane_id = threadidx_y; if (row_offset < rows) { float buf[32]; float thread_mean = 0.f; float thread_m2 = 0.f; float thread_count = 0.f; float warp_mean; float warp_m2; float warp_count; T* row_input = input + row_offset * cols; T* row_output = output + row_offset * cols; #pragma unroll for (int i = 0; i < cols_this_thread; i++) { buf[i] = static_cast<float>(row_input[lane_id * cols_per_thread + i]); } #pragma unroll for (int i = 0; i < cols_this_thread; i++) { WelfordOnline(buf[i], &thread_mean, &thread_m2, &thread_count); } WelfordWarpAllReduce(thread_mean, thread_m2, thread_count, &warp_mean, &warp_m2, &warp_count); float row_mean = warp_mean; float row_variance = max(warp_m2 / warp_count, 0.f); float row_inv_var = rsqrt(row_variance + epsilon); if (lane_id == 0) { mean[row_offset] = row_mean; invvar[row_offset] = row_inv_var; } #pragma unroll for (int i = 0; i < cols_this_thread; ++i) { buf[i] = (buf[i] - row_mean) * row_inv_var; } #pragma unroll for (int i = 0; i < cols_this_thread; ++i) { row_output[lane_id * cols_per_thread + i] = static_cast<T>(buf[i]) * gamma[lane_id * cols_per_thread + i] + beta[lane_id * cols_per_thread + i]; } } } void cuda_layer_norm(at::Tensor* output, at::Tensor* mean, at::Tensor* invvar, at::Tensor* input, int rows, int cols, at::IntArrayRef normalized_shape, at::Tensor* gamma, at::Tensor* beta, double epsilon) { int grid = (rows + 3) / 4; dim3 block(128); if (output->dtype() == torch::kFloat32) { hipLaunchKernelGGL(( fastfold_layernorm<float>), dim3(grid), dim3(block), 0, 0, (float*)input->data_ptr(), (float*)output->data_ptr(), (float*)gamma->data_ptr(), (float*)beta->data_ptr(), (float*)mean->data_ptr(), (float*)invvar->data_ptr(), rows, cols, epsilon); } else if (output->dtype() == torch::kFloat16) { hipLaunchKernelGGL(( fastfold_layernorm<at::Half>), dim3(grid), dim3(block), 0, 0, (at::Half*)input->data_ptr(), (at::Half*)output->data_ptr(), (at::Half*)gamma->data_ptr(), (at::Half*)beta->data_ptr(), (float*)mean->data_ptr(), (float*)invvar->data_ptr(), rows, cols, epsilon); } else if (output->dtype() == torch::kBFloat16) { hipLaunchKernelGGL(( fastfold_layernorm<at::BFloat16>), dim3(grid), dim3(block), 0, 0, (at::BFloat16*)input->data_ptr(), (at::BFloat16*)output->data_ptr(), (at::BFloat16*)gamma->data_ptr(), (at::BFloat16*)beta->data_ptr(), (float*)mean->data_ptr(), (float*)invvar->data_ptr(), rows, cols, epsilon); } } template <typename T> struct SharedMemory; template <> struct SharedMemory<float> { __device__ float* getPointer() { extern __shared__ float s_float[]; return s_float; } }; template <typename T, typename U, typename V> __device__ void cuLoadWriteStridedInputs(const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const V* dout, const int i1_end, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar) { int i1 = i1_block + thr_load_row_off; if (i1 < i1_end) { U curr_mean = mean[i1]; U curr_invvar = invvar[i1]; for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1 * n2 + i2; int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; if (i2 < n2) { U curr_input = static_cast<U>(input[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] = curr_dout; warp_buf2[write_idx] = curr_dout * (curr_input - curr_mean) * curr_invvar; } else { warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } else { for (int k = 0; k < blockDim.y; ++k) { int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } template <typename T, typename U, typename V> __device__ void cuLoadAddStridedInputs(const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const V* dout, const int i1_end, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar) { int i1 = i1_block + thr_load_row_off; if (i1 < i1_end) { U curr_mean = mean[i1]; U curr_invvar = invvar[i1]; for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1 * n2 + i2; int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; if (i2 < n2) { U curr_input = static_cast<U>(input[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] += curr_dout; warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar; } } } } template <typename T, typename U, typename V> __global__ void cuComputePartGradGammaBeta(const V* __restrict__ dout, const T* __restrict__ input, const int n1, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar, U epsilon, U* part_grad_gamma, U* part_grad_beta) { const int numsegs_n1 = (n1 + blockDim.y * blockDim.y - 1) / (blockDim.y * blockDim.y); const int segs_per_block = (numsegs_n1 + gridDim.y - 1) / gridDim.y; const int i1_beg = blockIdx.y * segs_per_block * blockDim.y * blockDim.y; const int i1_beg_plus_one = (blockIdx.y + 1) * segs_per_block * blockDim.y * blockDim.y; const int i1_end = i1_beg_plus_one < n1 ? i1_beg_plus_one : n1; const int row_stride = blockDim.x + 1; const int thr_load_col_off = (threadIdx.x * blockDim.y) & (blockDim.x - 1); const int thr_load_row_off = (threadIdx.x * blockDim.y) / blockDim.x + threadIdx.y * blockDim.y; const int i2_off = blockIdx.x * blockDim.x + thr_load_col_off; SharedMemory<U> shared; U* buf = shared.getPointer(); // buf has at least blockDim.x * blockDim.y * blockDim.y + // (blockDim.y - 1)*(blockDim.x/blockDim.y) elements U* warp_buf1 = (U*)buf; U* warp_buf2 = warp_buf1 + blockDim.y * blockDim.y * row_stride; // compute partial sums from strided inputs // do this to increase number of loads in flight cuLoadWriteStridedInputs(i1_beg, thr_load_row_off, thr_load_col_off, i2_off, row_stride, warp_buf1, warp_buf2, input, dout, i1_end, n2, mean, invvar); for (int i1_block = i1_beg + blockDim.y * blockDim.y; i1_block < i1_end; i1_block += blockDim.y * blockDim.y) { cuLoadAddStridedInputs(i1_block, thr_load_row_off, thr_load_col_off, i2_off, row_stride, warp_buf1, warp_buf2, input, dout, i1_end, n2, mean, invvar); } __syncthreads(); // inter-warp reductions // sum within each warp U acc1 = U(0); U acc2 = U(0); for (int k = 0; k < blockDim.y; ++k) { int row1 = threadIdx.y + k * blockDim.y; int idx1 = row1 * row_stride + threadIdx.x; acc1 += warp_buf1[idx1]; acc2 += warp_buf2[idx1]; } warp_buf1[threadIdx.y * row_stride + threadIdx.x] = acc1; warp_buf2[threadIdx.y * row_stride + threadIdx.x] = acc2; __syncthreads(); // sum all warps for (int offset = blockDim.y / 2; offset > 1; offset /= 2) { if (threadIdx.y < offset) { int row1 = threadIdx.y; int row2 = threadIdx.y + offset; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; warp_buf1[idx1] += warp_buf1[idx2]; warp_buf2[idx1] += warp_buf2[idx2]; } __syncthreads(); } int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (threadIdx.y == 0 && i2 < n2) { int row1 = threadIdx.y; int row2 = threadIdx.y + 1; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; part_grad_beta[blockIdx.y * n2 + i2] = warp_buf1[idx1] + warp_buf1[idx2]; part_grad_gamma[blockIdx.y * n2 + i2] = warp_buf2[idx1] + warp_buf2[idx2]; } } template <typename U, typename V> __global__ void cuComputeGradGammaBeta(const U* part_grad_gamma, const U* part_grad_beta, const int part_size, const int n1, const int n2, V* grad_gamma, V* grad_beta) { // sum partial gradients for gamma and beta SharedMemory<U> shared; U* buf = shared.getPointer(); int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < n2) { // each warp does sequential reductions until reduced part_size is num_warps int num_warp_reductions = part_size / blockDim.y; U sum_gamma = U(0); U sum_beta = U(0); const U* part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2; const U* part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2; for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) { sum_gamma += part_grad_gamma_ptr[warp_offset * n2]; sum_beta += part_grad_beta_ptr[warp_offset * n2]; } // inter-warp reductions const int nbsize3 = blockDim.x * blockDim.y / 2; for (int offset = blockDim.y / 2; offset >= 1; offset /= 2) { // top half write to shared memory if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[write_idx] = sum_gamma; buf[write_idx + nbsize3] = sum_beta; } __syncthreads(); // bottom half sums if (threadIdx.y < offset) { const int read_idx = threadIdx.y * blockDim.x + threadIdx.x; sum_gamma += buf[read_idx]; sum_beta += buf[read_idx + nbsize3]; } __syncthreads(); } // write out fully summed gradients if (threadIdx.y == 0) { grad_gamma[i2] = sum_gamma; grad_beta[i2] = sum_beta; } } } template <typename T, typename U, typename V> __global__ void cuComputeGradInput(const V* __restrict__ dout, const T* __restrict__ input, const int n1, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar, U epsilon, const V* gamma, T* grad_input) { for (auto i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) { U sum_loss1 = U(0); U sum_loss2 = U(0); const U c_mean = mean[i1]; const U c_invvar = invvar[i1]; const T* k_input = input + i1 * n2; const V* k_dout = dout + i1 * n2; const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; if (gamma != NULL) { int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_h = static_cast<U>(k_input[l + k]); const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss * gamma[l + k]; sum_loss2 += c_loss * gamma[l + k] * (c_h - c_mean) * c_invvar; } } for (; l < n2; ++l) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss * gamma[l]; sum_loss2 += c_loss * gamma[l] * (c_h - c_mean) * c_invvar; } } else { int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_h = static_cast<U>(k_input[l + k]); const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss; sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } } for (; l < n2; ++l) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss; sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } } // intra-warp reductions for (int mask = blockDim.x / 2; mask > 0; mask /= 2) { sum_loss1 += WARP_SHFL_XOR(sum_loss1, mask); sum_loss2 += WARP_SHFL_XOR(sum_loss2, mask); } // inter-warp reductions if (blockDim.y > 1) { SharedMemory<U> shared; U* buf = shared.getPointer(); for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int wrt_i = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[2 * wrt_i] = sum_loss1; buf[2 * wrt_i + 1] = sum_loss2; } __syncthreads(); // lower half merges if (threadIdx.y < offset) { const int read_i = threadIdx.y * blockDim.x + threadIdx.x; sum_loss1 += buf[2 * read_i]; sum_loss2 += buf[2 * read_i + 1]; } __syncthreads(); } if (threadIdx.y == 0) { buf[2 * threadIdx.x] = sum_loss1; buf[2 * threadIdx.x + 1] = sum_loss2; } __syncthreads(); if (threadIdx.y != 0) { sum_loss1 = buf[2 * threadIdx.x]; sum_loss2 = buf[2 * threadIdx.x + 1]; } } // all threads now have the two sums over l U fH = (U)n2; U term1 = (U(1) / fH) * c_invvar; T* k_grad_input = grad_input + i1 * n2; if (gamma != NULL) { for (int l = thrx; l < n2; l += numx) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss * gamma[l]; f_grad_input -= sum_loss1; f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } else { for (int l = thrx; l < n2; l += numx) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss; f_grad_input -= sum_loss1; f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } } } template <typename T, typename U, typename V> void HostLayerNormGradient(const V* dout, const U* mean, const U* invvar, at::Tensor* input, int n1, int n2, const V* gamma, const V* beta, double epsilon, T* grad_input, V* grad_gamma, V* grad_beta) { auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); if (gamma != NULL && beta != NULL) { // compute grad_gamma(j) and grad_beta(j) const int part_size = 16; const dim3 threads2(32, 4, 1); const dim3 blocks2((n2 + threads2.x - 1) / threads2.x, part_size, 1); const int nshared2_a = 2 * sizeof(U) * threads2.y * threads2.y * (threads2.x + 1); const int nshared2_b = threads2.x * threads2.y * sizeof(U); const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b; at::Tensor part_grad_gamma = at::empty({part_size, n2}, input->options().dtype(at::ScalarType::Float)); at::Tensor part_grad_beta = at::empty_like(part_grad_gamma); hipLaunchKernelGGL(( cuComputePartGradGammaBeta), dim3(blocks2), dim3(threads2), nshared2, stream, dout, input->DATA_PTR<T>(), n1, n2, mean, invvar, U(epsilon), part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>()); const dim3 threads3(32, 8, 1); const dim3 blocks3((n2 + threads2.x - 1) / threads2.x, 1, 1); const int nshared3 = threads3.x * threads3.y * sizeof(U); hipLaunchKernelGGL(( cuComputeGradGammaBeta), dim3(blocks3), dim3(threads3), nshared3, stream, part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>(), part_size, n1, n2, grad_gamma, grad_beta); } // compute grad_input const uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; const dim3 blocks1(1, ::min((uint64_t)n1, maxGridY), 1); const dim3 threads1(32, 4, 1); int nshared = threads1.y > 1 ? threads1.y * threads1.x * sizeof(U) : 0; hipLaunchKernelGGL(( cuComputeGradInput), dim3(blocks1), dim3(threads1), nshared, stream, dout, input->DATA_PTR<T>(), n1, n2, mean, invvar, U(epsilon), gamma, grad_input); } void cuda_layer_norm_gradient(at::Tensor* dout, at::Tensor* mean, at::Tensor* invvar, at::Tensor* input, int n1, int n2, at::IntArrayRef normalized_shape, at::Tensor* gamma, at::Tensor* beta, double epsilon, at::Tensor* grad_input, at::Tensor* grad_gamma, at::Tensor* grad_beta) { using namespace at; DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES( input->scalar_type(), gamma->scalar_type(), "cuda_layer_norm_gradient_kernel", HostLayerNormGradient(dout->DATA_PTR<scalar_t_out>(), mean->DATA_PTR<float>(), invvar->DATA_PTR<float>(), input, n1, n2, // TMJ pass NULL argument for gamma, beta, grad_gamma and grad_beta // if gamma Tensor is NULL on input. gamma != NULL ? gamma->DATA_PTR<scalar_t_out>() : NULL, gamma != NULL ? beta->DATA_PTR<scalar_t_out>() : NULL, epsilon, grad_input->DATA_PTR<scalar_t_in>(), gamma != NULL ? grad_gamma->DATA_PTR<scalar_t_out>() : NULL, gamma != NULL ? grad_beta->DATA_PTR<scalar_t_out>() : NULL);) }
1235e31374e8b39b5fd4043ad8d9d0bb1b971d46.cu
/** Copyright 2022 BioMap (Beijing) Intelligence Technology Limited Copyright 2022 HPC-AI Technology Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. **/ // part of code modified from https://github.com/NVIDIA/apex #include <cooperative_groups.h> #include <cuda.h> #include <cuda_runtime.h> #include <torch/extension.h> #include <THC/THCDeviceUtils.cuh> #include "ATen/ATen.h" #include "ATen/AccumulateType.h" #include "ATen/cuda/CUDAContext.h" #include "compat.h" #include "type_shim.h" #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) \ CHECK_CUDA(x); \ CHECK_CONTIGUOUS(x) inline __device__ void WelfordOnline(float val, float* mean, float* m2, float* count) { *count += 1; float delta1 = val - *mean; *mean += delta1 / (*count); float delta2 = val - *mean; *m2 += delta1 * delta2; } inline __device__ void WelfordOnline(float b_mean, float b_m2, float b_count, float* mean, float* m2, float* count) { if (b_count == 0) { return; } float new_count = *count + b_count; float nb_n = b_count / new_count; float delta = b_mean - *mean; *mean += delta * nb_n; *m2 += b_m2 + delta * delta * (*count) * nb_n; *count = new_count; } __inline__ __device__ void WelfordWarpAllReduce(float thread_mean, float thread_m2, float thread_count, float* mean, float* m2, float* count) { *mean = thread_mean; *m2 = thread_m2; *count = thread_count; for (int mask = 1; mask < 32; mask *= 2) { float b_mean = __shfl_down_sync(0xffffffff, *mean, mask); float b_m2 = __shfl_down_sync(0xffffffff, *m2, mask); float b_count = __shfl_down_sync(0xffffffff, *count, mask); WelfordOnline(b_mean, b_m2, b_count, mean, m2, count); } *mean = __shfl_sync(0xffffffff, *mean, 0, 32); *m2 = __shfl_sync(0xffffffff, *m2, 0, 32); *count = __shfl_sync(0xffffffff, *count, 0, 32); } template <typename T> __global__ void fastfold_layernorm(T* input, T* output, T* gamma, T* beta, float* mean, float* invvar, int rows, int cols, double epsilon) { int threadidx_x = threadIdx.x / 32; int threadidx_y = threadIdx.x % 32; int row_offset = blockIdx.x * 4 + threadidx_x; int cols_per_thread = (cols + 31) / 32; int cols_this_thread = cols_per_thread; int last_y = (cols / cols_per_thread); if (threadidx_y == last_y) { cols_this_thread = cols - cols_per_thread * last_y; } else if (threadidx_y > last_y) { cols_this_thread = 0; } int lane_id = threadidx_y; if (row_offset < rows) { float buf[32]; float thread_mean = 0.f; float thread_m2 = 0.f; float thread_count = 0.f; float warp_mean; float warp_m2; float warp_count; T* row_input = input + row_offset * cols; T* row_output = output + row_offset * cols; #pragma unroll for (int i = 0; i < cols_this_thread; i++) { buf[i] = static_cast<float>(row_input[lane_id * cols_per_thread + i]); } #pragma unroll for (int i = 0; i < cols_this_thread; i++) { WelfordOnline(buf[i], &thread_mean, &thread_m2, &thread_count); } WelfordWarpAllReduce(thread_mean, thread_m2, thread_count, &warp_mean, &warp_m2, &warp_count); float row_mean = warp_mean; float row_variance = max(warp_m2 / warp_count, 0.f); float row_inv_var = rsqrt(row_variance + epsilon); if (lane_id == 0) { mean[row_offset] = row_mean; invvar[row_offset] = row_inv_var; } #pragma unroll for (int i = 0; i < cols_this_thread; ++i) { buf[i] = (buf[i] - row_mean) * row_inv_var; } #pragma unroll for (int i = 0; i < cols_this_thread; ++i) { row_output[lane_id * cols_per_thread + i] = static_cast<T>(buf[i]) * gamma[lane_id * cols_per_thread + i] + beta[lane_id * cols_per_thread + i]; } } } template <typename T> __global__ void fastfold_layernorm2(T* input, T* output, T* gamma, T* beta, float* mean, float* invvar, int rows, int cols, double epsilon) { int threadidx_x = threadIdx.x / 32; int threadidx_y = threadIdx.x % 32; int row_offset = blockIdx.x * 4 + threadidx_x; int cols_per_thread = (cols - 1) / 32; int cols_this_thread = cols_per_thread; int last_y = (cols / cols_per_thread); if (threadidx_y == last_y) { cols_this_thread = cols - cols_per_thread * last_y; } else if (threadidx_y > last_y) { cols_this_thread = 0; } int lane_id = threadidx_y; if (row_offset < rows) { float buf[32]; float thread_mean = 0.f; float thread_m2 = 0.f; float thread_count = 0.f; float warp_mean; float warp_m2; float warp_count; T* row_input = input + row_offset * cols; T* row_output = output + row_offset * cols; #pragma unroll for (int i = 0; i < cols_this_thread; i++) { buf[i] = static_cast<float>(row_input[lane_id * cols_per_thread + i]); } #pragma unroll for (int i = 0; i < cols_this_thread; i++) { WelfordOnline(buf[i], &thread_mean, &thread_m2, &thread_count); } WelfordWarpAllReduce(thread_mean, thread_m2, thread_count, &warp_mean, &warp_m2, &warp_count); float row_mean = warp_mean; float row_variance = max(warp_m2 / warp_count, 0.f); float row_inv_var = rsqrt(row_variance + epsilon); if (lane_id == 0) { mean[row_offset] = row_mean; invvar[row_offset] = row_inv_var; } #pragma unroll for (int i = 0; i < cols_this_thread; ++i) { buf[i] = (buf[i] - row_mean) * row_inv_var; } #pragma unroll for (int i = 0; i < cols_this_thread; ++i) { row_output[lane_id * cols_per_thread + i] = static_cast<T>(buf[i]) * gamma[lane_id * cols_per_thread + i] + beta[lane_id * cols_per_thread + i]; } } } void cuda_layer_norm(at::Tensor* output, at::Tensor* mean, at::Tensor* invvar, at::Tensor* input, int rows, int cols, at::IntArrayRef normalized_shape, at::Tensor* gamma, at::Tensor* beta, double epsilon) { int grid = (rows + 3) / 4; dim3 block(128); if (output->dtype() == torch::kFloat32) { fastfold_layernorm<float><<<grid, block>>>( (float*)input->data_ptr(), (float*)output->data_ptr(), (float*)gamma->data_ptr(), (float*)beta->data_ptr(), (float*)mean->data_ptr(), (float*)invvar->data_ptr(), rows, cols, epsilon); } else if (output->dtype() == torch::kFloat16) { fastfold_layernorm<at::Half><<<grid, block>>>( (at::Half*)input->data_ptr(), (at::Half*)output->data_ptr(), (at::Half*)gamma->data_ptr(), (at::Half*)beta->data_ptr(), (float*)mean->data_ptr(), (float*)invvar->data_ptr(), rows, cols, epsilon); } else if (output->dtype() == torch::kBFloat16) { fastfold_layernorm<at::BFloat16><<<grid, block>>>( (at::BFloat16*)input->data_ptr(), (at::BFloat16*)output->data_ptr(), (at::BFloat16*)gamma->data_ptr(), (at::BFloat16*)beta->data_ptr(), (float*)mean->data_ptr(), (float*)invvar->data_ptr(), rows, cols, epsilon); } } template <typename T> struct SharedMemory; template <> struct SharedMemory<float> { __device__ float* getPointer() { extern __shared__ float s_float[]; return s_float; } }; template <typename T, typename U, typename V> __device__ void cuLoadWriteStridedInputs(const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const V* dout, const int i1_end, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar) { int i1 = i1_block + thr_load_row_off; if (i1 < i1_end) { U curr_mean = mean[i1]; U curr_invvar = invvar[i1]; for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1 * n2 + i2; int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; if (i2 < n2) { U curr_input = static_cast<U>(input[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] = curr_dout; warp_buf2[write_idx] = curr_dout * (curr_input - curr_mean) * curr_invvar; } else { warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } else { for (int k = 0; k < blockDim.y; ++k) { int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } template <typename T, typename U, typename V> __device__ void cuLoadAddStridedInputs(const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const V* dout, const int i1_end, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar) { int i1 = i1_block + thr_load_row_off; if (i1 < i1_end) { U curr_mean = mean[i1]; U curr_invvar = invvar[i1]; for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1 * n2 + i2; int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; if (i2 < n2) { U curr_input = static_cast<U>(input[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] += curr_dout; warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar; } } } } template <typename T, typename U, typename V> __global__ void cuComputePartGradGammaBeta(const V* __restrict__ dout, const T* __restrict__ input, const int n1, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar, U epsilon, U* part_grad_gamma, U* part_grad_beta) { const int numsegs_n1 = (n1 + blockDim.y * blockDim.y - 1) / (blockDim.y * blockDim.y); const int segs_per_block = (numsegs_n1 + gridDim.y - 1) / gridDim.y; const int i1_beg = blockIdx.y * segs_per_block * blockDim.y * blockDim.y; const int i1_beg_plus_one = (blockIdx.y + 1) * segs_per_block * blockDim.y * blockDim.y; const int i1_end = i1_beg_plus_one < n1 ? i1_beg_plus_one : n1; const int row_stride = blockDim.x + 1; const int thr_load_col_off = (threadIdx.x * blockDim.y) & (blockDim.x - 1); const int thr_load_row_off = (threadIdx.x * blockDim.y) / blockDim.x + threadIdx.y * blockDim.y; const int i2_off = blockIdx.x * blockDim.x + thr_load_col_off; SharedMemory<U> shared; U* buf = shared.getPointer(); // buf has at least blockDim.x * blockDim.y * blockDim.y + // (blockDim.y - 1)*(blockDim.x/blockDim.y) elements U* warp_buf1 = (U*)buf; U* warp_buf2 = warp_buf1 + blockDim.y * blockDim.y * row_stride; // compute partial sums from strided inputs // do this to increase number of loads in flight cuLoadWriteStridedInputs(i1_beg, thr_load_row_off, thr_load_col_off, i2_off, row_stride, warp_buf1, warp_buf2, input, dout, i1_end, n2, mean, invvar); for (int i1_block = i1_beg + blockDim.y * blockDim.y; i1_block < i1_end; i1_block += blockDim.y * blockDim.y) { cuLoadAddStridedInputs(i1_block, thr_load_row_off, thr_load_col_off, i2_off, row_stride, warp_buf1, warp_buf2, input, dout, i1_end, n2, mean, invvar); } __syncthreads(); // inter-warp reductions // sum within each warp U acc1 = U(0); U acc2 = U(0); for (int k = 0; k < blockDim.y; ++k) { int row1 = threadIdx.y + k * blockDim.y; int idx1 = row1 * row_stride + threadIdx.x; acc1 += warp_buf1[idx1]; acc2 += warp_buf2[idx1]; } warp_buf1[threadIdx.y * row_stride + threadIdx.x] = acc1; warp_buf2[threadIdx.y * row_stride + threadIdx.x] = acc2; __syncthreads(); // sum all warps for (int offset = blockDim.y / 2; offset > 1; offset /= 2) { if (threadIdx.y < offset) { int row1 = threadIdx.y; int row2 = threadIdx.y + offset; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; warp_buf1[idx1] += warp_buf1[idx2]; warp_buf2[idx1] += warp_buf2[idx2]; } __syncthreads(); } int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (threadIdx.y == 0 && i2 < n2) { int row1 = threadIdx.y; int row2 = threadIdx.y + 1; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; part_grad_beta[blockIdx.y * n2 + i2] = warp_buf1[idx1] + warp_buf1[idx2]; part_grad_gamma[blockIdx.y * n2 + i2] = warp_buf2[idx1] + warp_buf2[idx2]; } } template <typename U, typename V> __global__ void cuComputeGradGammaBeta(const U* part_grad_gamma, const U* part_grad_beta, const int part_size, const int n1, const int n2, V* grad_gamma, V* grad_beta) { // sum partial gradients for gamma and beta SharedMemory<U> shared; U* buf = shared.getPointer(); int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < n2) { // each warp does sequential reductions until reduced part_size is num_warps int num_warp_reductions = part_size / blockDim.y; U sum_gamma = U(0); U sum_beta = U(0); const U* part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2; const U* part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2; for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) { sum_gamma += part_grad_gamma_ptr[warp_offset * n2]; sum_beta += part_grad_beta_ptr[warp_offset * n2]; } // inter-warp reductions const int nbsize3 = blockDim.x * blockDim.y / 2; for (int offset = blockDim.y / 2; offset >= 1; offset /= 2) { // top half write to shared memory if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[write_idx] = sum_gamma; buf[write_idx + nbsize3] = sum_beta; } __syncthreads(); // bottom half sums if (threadIdx.y < offset) { const int read_idx = threadIdx.y * blockDim.x + threadIdx.x; sum_gamma += buf[read_idx]; sum_beta += buf[read_idx + nbsize3]; } __syncthreads(); } // write out fully summed gradients if (threadIdx.y == 0) { grad_gamma[i2] = sum_gamma; grad_beta[i2] = sum_beta; } } } template <typename T, typename U, typename V> __global__ void cuComputeGradInput(const V* __restrict__ dout, const T* __restrict__ input, const int n1, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar, U epsilon, const V* gamma, T* grad_input) { for (auto i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) { U sum_loss1 = U(0); U sum_loss2 = U(0); const U c_mean = mean[i1]; const U c_invvar = invvar[i1]; const T* k_input = input + i1 * n2; const V* k_dout = dout + i1 * n2; const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; if (gamma != NULL) { int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_h = static_cast<U>(k_input[l + k]); const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss * gamma[l + k]; sum_loss2 += c_loss * gamma[l + k] * (c_h - c_mean) * c_invvar; } } for (; l < n2; ++l) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss * gamma[l]; sum_loss2 += c_loss * gamma[l] * (c_h - c_mean) * c_invvar; } } else { int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_h = static_cast<U>(k_input[l + k]); const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss; sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } } for (; l < n2; ++l) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss; sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } } // intra-warp reductions for (int mask = blockDim.x / 2; mask > 0; mask /= 2) { sum_loss1 += WARP_SHFL_XOR(sum_loss1, mask); sum_loss2 += WARP_SHFL_XOR(sum_loss2, mask); } // inter-warp reductions if (blockDim.y > 1) { SharedMemory<U> shared; U* buf = shared.getPointer(); for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int wrt_i = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[2 * wrt_i] = sum_loss1; buf[2 * wrt_i + 1] = sum_loss2; } __syncthreads(); // lower half merges if (threadIdx.y < offset) { const int read_i = threadIdx.y * blockDim.x + threadIdx.x; sum_loss1 += buf[2 * read_i]; sum_loss2 += buf[2 * read_i + 1]; } __syncthreads(); } if (threadIdx.y == 0) { buf[2 * threadIdx.x] = sum_loss1; buf[2 * threadIdx.x + 1] = sum_loss2; } __syncthreads(); if (threadIdx.y != 0) { sum_loss1 = buf[2 * threadIdx.x]; sum_loss2 = buf[2 * threadIdx.x + 1]; } } // all threads now have the two sums over l U fH = (U)n2; U term1 = (U(1) / fH) * c_invvar; T* k_grad_input = grad_input + i1 * n2; if (gamma != NULL) { for (int l = thrx; l < n2; l += numx) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss * gamma[l]; f_grad_input -= sum_loss1; f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } else { for (int l = thrx; l < n2; l += numx) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss; f_grad_input -= sum_loss1; f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } } } template <typename T, typename U, typename V> void HostLayerNormGradient(const V* dout, const U* mean, const U* invvar, at::Tensor* input, int n1, int n2, const V* gamma, const V* beta, double epsilon, T* grad_input, V* grad_gamma, V* grad_beta) { auto stream = at::cuda::getCurrentCUDAStream().stream(); if (gamma != NULL && beta != NULL) { // compute grad_gamma(j) and grad_beta(j) const int part_size = 16; const dim3 threads2(32, 4, 1); const dim3 blocks2((n2 + threads2.x - 1) / threads2.x, part_size, 1); const int nshared2_a = 2 * sizeof(U) * threads2.y * threads2.y * (threads2.x + 1); const int nshared2_b = threads2.x * threads2.y * sizeof(U); const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b; at::Tensor part_grad_gamma = at::empty({part_size, n2}, input->options().dtype(at::ScalarType::Float)); at::Tensor part_grad_beta = at::empty_like(part_grad_gamma); cuComputePartGradGammaBeta<<<blocks2, threads2, nshared2, stream>>>( dout, input->DATA_PTR<T>(), n1, n2, mean, invvar, U(epsilon), part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>()); const dim3 threads3(32, 8, 1); const dim3 blocks3((n2 + threads2.x - 1) / threads2.x, 1, 1); const int nshared3 = threads3.x * threads3.y * sizeof(U); cuComputeGradGammaBeta<<<blocks3, threads3, nshared3, stream>>>( part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>(), part_size, n1, n2, grad_gamma, grad_beta); } // compute grad_input const uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; const dim3 blocks1(1, std::min((uint64_t)n1, maxGridY), 1); const dim3 threads1(32, 4, 1); int nshared = threads1.y > 1 ? threads1.y * threads1.x * sizeof(U) : 0; cuComputeGradInput<<<blocks1, threads1, nshared, stream>>>( dout, input->DATA_PTR<T>(), n1, n2, mean, invvar, U(epsilon), gamma, grad_input); } void cuda_layer_norm_gradient(at::Tensor* dout, at::Tensor* mean, at::Tensor* invvar, at::Tensor* input, int n1, int n2, at::IntArrayRef normalized_shape, at::Tensor* gamma, at::Tensor* beta, double epsilon, at::Tensor* grad_input, at::Tensor* grad_gamma, at::Tensor* grad_beta) { using namespace at; DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES( input->scalar_type(), gamma->scalar_type(), "cuda_layer_norm_gradient_kernel", HostLayerNormGradient(dout->DATA_PTR<scalar_t_out>(), mean->DATA_PTR<float>(), invvar->DATA_PTR<float>(), input, n1, n2, // TMJ pass NULL argument for gamma, beta, grad_gamma and grad_beta // if gamma Tensor is NULL on input. gamma != NULL ? gamma->DATA_PTR<scalar_t_out>() : NULL, gamma != NULL ? beta->DATA_PTR<scalar_t_out>() : NULL, epsilon, grad_input->DATA_PTR<scalar_t_in>(), gamma != NULL ? grad_gamma->DATA_PTR<scalar_t_out>() : NULL, gamma != NULL ? grad_beta->DATA_PTR<scalar_t_out>() : NULL);) }
a37d535b10968f9b334812df06a1228ae8d7d0a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ============================================================================= // === GPUQREngine/Source/GPUQREngine_UberKernel.cu ============================ // ============================================================================= // // This is the actual concrete kernel invocation, transfering control flow to // the GPU accelerator briefly. We actually launch kernels using alternating // streams to overlap communication with computation, so the launch is actually // asynchronous in nature. We use the CUDA events and streams model througout // the Scheduler to coordinate asynchronous launch behavior. // // ============================================================================= #define CUDA_INCLUDE #include "Kernel/uberKernel.cu" void GPUQREngine_UberKernel ( hipStream_t kernelStream, // The stream on which to launch the kernel TaskDescriptor *gpuWorkQueue, // The list of work items for the GPU int numTasks // The # of items in the work list ) { /* Set the standard launch configuration. */ dim3 threads(NUMTHREADS, 1); dim3 grid(numTasks, 1); /* Launch the kernel */ hipLaunchKernelGGL(( qrKernel), dim3(grid), dim3(threads), 0, kernelStream, gpuWorkQueue, numTasks); }
a37d535b10968f9b334812df06a1228ae8d7d0a4.cu
// ============================================================================= // === GPUQREngine/Source/GPUQREngine_UberKernel.cu ============================ // ============================================================================= // // This is the actual concrete kernel invocation, transfering control flow to // the GPU accelerator briefly. We actually launch kernels using alternating // streams to overlap communication with computation, so the launch is actually // asynchronous in nature. We use the CUDA events and streams model througout // the Scheduler to coordinate asynchronous launch behavior. // // ============================================================================= #define CUDA_INCLUDE #include "Kernel/uberKernel.cu" void GPUQREngine_UberKernel ( cudaStream_t kernelStream, // The stream on which to launch the kernel TaskDescriptor *gpuWorkQueue, // The list of work items for the GPU int numTasks // The # of items in the work list ) { /* Set the standard launch configuration. */ dim3 threads(NUMTHREADS, 1); dim3 grid(numTasks, 1); /* Launch the kernel */ qrKernel<<<grid, threads, 0, kernelStream>>>(gpuWorkQueue, numTasks); }
5c86889eb600c21da8bdc71da99301e368b7ae9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < outScalarCount; tid += stride) { int linearIndex = tid; int outIndex0 = linearIndex / outStride0; linearIndex = linearIndex - outIndex0 * outStride0; int outIndex1 = linearIndex / outStride1; int outIndex2 = linearIndex - outIndex1 * outStride1; int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1; out[tid] = in[inIndex]; } }
5c86889eb600c21da8bdc71da99301e368b7ae9e.cu
#include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < outScalarCount; tid += stride) { int linearIndex = tid; int outIndex0 = linearIndex / outStride0; linearIndex = linearIndex - outIndex0 * outStride0; int outIndex1 = linearIndex / outStride1; int outIndex2 = linearIndex - outIndex1 * outStride1; int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1; out[tid] = in[inIndex]; } }
02001840bff82ef736300466282feda8772cdc6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/LogSoftMax.cu" #else #include "../common.h" void THNN_(LogSoftMax_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int dim) { THCUNN_assertSameGPU(state, 2, input, output); THArgCheck(dim >= 0 && dim < input->nDimension, 4, "dim out of range (got %d, but input has %d dims)", dim, input->nDimension); THArgCheck(TensorUtils<THCTensor>::canUse32BitIndexMath(state, input), 4, "input tensor is too large (unsupported size. file a feature request)"); THCTensor_(resizeAs)(state, output, input); uint64_t outer_size = 1; uint64_t dim_size = input->size[dim]; uint64_t inner_size = 1; for (uint64_t i = 0; i < dim; ++i) outer_size *= input->size[i]; for (uint64_t i = dim + 1; i < input->nDimension; ++i) inner_size *= input->size[i]; // This kernel spawns a block of 1024 threads per each element in the batch. // XXX: it assumes that inner_size == 1 input = THCTensor_(newContiguous)(state, input); if (inner_size == 1 && dim_size >= 64) { dim3 grid(outer_size); dim3 block(1024); hipLaunchKernelGGL(( cunn_LogSoftMax_updateOutput_kernel<2, real, accreal>) , dim3(grid), dim3(block), block.x * sizeof(accreal), THCState_getCurrentStream(state), THCTensor_(data)(state, output), THCTensor_(data)(state, input), dim_size ); // This kernel runs in a 2D grid, where each application along y dimension has a fixed // outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size. // Reductions over dim are done in a single-threaded manner. } else { dim3 grid, block; uint32_t block_size = 1024; while (block_size > inner_size) block_size >>= 1; // block_size = floor(log2(inner_size)) int max_active_blocks; hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, &cunn_SpatialLogSoftMax_updateOutput_kernel<real, accreal>, block_size, 0); max_active_blocks *= THCState_getCurrentDeviceProperties(state)->multiProcessorCount; LogSoftMax_getSpatialGridSize(block_size, max_active_blocks, outer_size, dim_size, inner_size, grid, block); hipLaunchKernelGGL(( cunn_SpatialLogSoftMax_updateOutput_kernel<real, accreal>) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, output), THCTensor_(data)(state, input), outer_size, dim_size, inner_size ); } THCudaCheck(hipGetLastError()); THCTensor_(free)(state, input); } void THNN_(LogSoftMax_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *output, int dim) { THArgCheck(dim >= 0 && dim < output->nDimension, 6, "dim out of range (got %d, but input has %d dims)", dim, output->nDimension); THArgCheck(TensorUtils<THCTensor>::canUse32BitIndexMath(state, output), 6, "input tensor is too large (unsupported size. file a feature request)"); THCUNN_check_nElement(state, output, gradOutput); THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, output); uint64_t outer_size = 1; uint64_t dim_size = output->size[dim]; uint64_t inner_size = 1; for (uint64_t i = 0; i < dim; ++i) outer_size *= output->size[i]; for (uint64_t i = dim + 1; i < output->nDimension; ++i) inner_size *= output->size[i]; output = THCTensor_(newContiguous)(state, output); gradOutput = THCTensor_(newContiguous)(state, gradOutput); // See descriptions of kernels above. if (inner_size == 1 && dim_size >= 64) { dim3 grid(outer_size); dim3 block(1024); hipLaunchKernelGGL(( cunn_LogSoftMax_updateGradInput_kernel<2, real, accreal>) , dim3(grid), dim3(block), block.x * sizeof(accreal), THCState_getCurrentStream(state), THCTensor_(data)(state, gradInput), THCTensor_(data)(state, output), THCTensor_(data)(state, gradOutput), dim_size ); } else { dim3 grid, block; uint32_t block_size = 1024; while (block_size > inner_size) block_size >>= 1; // block_size = floor(log2(inner_size)) int max_active_blocks; hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, &cunn_SpatialLogSoftMax_updateGradInput_kernel<real, accreal>, block_size, 0); max_active_blocks *= THCState_getCurrentDeviceProperties(state)->multiProcessorCount; LogSoftMax_getSpatialGridSize(block_size, max_active_blocks, outer_size, dim_size, inner_size, grid, block); hipLaunchKernelGGL(( cunn_SpatialLogSoftMax_updateGradInput_kernel<real, accreal>) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, gradInput), THCTensor_(data)(state, output), THCTensor_(data)(state, gradOutput), outer_size, dim_size, inner_size ); } hipError_t errcode = hipGetLastError(); if (errcode != hipSuccess) { THError(hipGetErrorString(errcode)); } THCTensor_(free)(state, gradOutput); THCTensor_(free)(state, output); } #endif
02001840bff82ef736300466282feda8772cdc6b.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/LogSoftMax.cu" #else #include "../common.h" void THNN_(LogSoftMax_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int dim) { THCUNN_assertSameGPU(state, 2, input, output); THArgCheck(dim >= 0 && dim < input->nDimension, 4, "dim out of range (got %d, but input has %d dims)", dim, input->nDimension); THArgCheck(TensorUtils<THCTensor>::canUse32BitIndexMath(state, input), 4, "input tensor is too large (unsupported size. file a feature request)"); THCTensor_(resizeAs)(state, output, input); uint64_t outer_size = 1; uint64_t dim_size = input->size[dim]; uint64_t inner_size = 1; for (uint64_t i = 0; i < dim; ++i) outer_size *= input->size[i]; for (uint64_t i = dim + 1; i < input->nDimension; ++i) inner_size *= input->size[i]; // This kernel spawns a block of 1024 threads per each element in the batch. // XXX: it assumes that inner_size == 1 input = THCTensor_(newContiguous)(state, input); if (inner_size == 1 && dim_size >= 64) { dim3 grid(outer_size); dim3 block(1024); cunn_LogSoftMax_updateOutput_kernel<2, real, accreal> <<<grid, block, block.x * sizeof(accreal), THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), dim_size ); // This kernel runs in a 2D grid, where each application along y dimension has a fixed // outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size. // Reductions over dim are done in a single-threaded manner. } else { dim3 grid, block; uint32_t block_size = 1024; while (block_size > inner_size) block_size >>= 1; // block_size = floor(log2(inner_size)) int max_active_blocks; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, &cunn_SpatialLogSoftMax_updateOutput_kernel<real, accreal>, block_size, 0); max_active_blocks *= THCState_getCurrentDeviceProperties(state)->multiProcessorCount; LogSoftMax_getSpatialGridSize(block_size, max_active_blocks, outer_size, dim_size, inner_size, grid, block); cunn_SpatialLogSoftMax_updateOutput_kernel<real, accreal> <<<grid, block, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), outer_size, dim_size, inner_size ); } THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, input); } void THNN_(LogSoftMax_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *output, int dim) { THArgCheck(dim >= 0 && dim < output->nDimension, 6, "dim out of range (got %d, but input has %d dims)", dim, output->nDimension); THArgCheck(TensorUtils<THCTensor>::canUse32BitIndexMath(state, output), 6, "input tensor is too large (unsupported size. file a feature request)"); THCUNN_check_nElement(state, output, gradOutput); THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, output); uint64_t outer_size = 1; uint64_t dim_size = output->size[dim]; uint64_t inner_size = 1; for (uint64_t i = 0; i < dim; ++i) outer_size *= output->size[i]; for (uint64_t i = dim + 1; i < output->nDimension; ++i) inner_size *= output->size[i]; output = THCTensor_(newContiguous)(state, output); gradOutput = THCTensor_(newContiguous)(state, gradOutput); // See descriptions of kernels above. if (inner_size == 1 && dim_size >= 64) { dim3 grid(outer_size); dim3 block(1024); cunn_LogSoftMax_updateGradInput_kernel<2, real, accreal> <<<grid, block, block.x * sizeof(accreal), THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, output), THCTensor_(data)(state, gradOutput), dim_size ); } else { dim3 grid, block; uint32_t block_size = 1024; while (block_size > inner_size) block_size >>= 1; // block_size = floor(log2(inner_size)) int max_active_blocks; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, &cunn_SpatialLogSoftMax_updateGradInput_kernel<real, accreal>, block_size, 0); max_active_blocks *= THCState_getCurrentDeviceProperties(state)->multiProcessorCount; LogSoftMax_getSpatialGridSize(block_size, max_active_blocks, outer_size, dim_size, inner_size, grid, block); cunn_SpatialLogSoftMax_updateGradInput_kernel<real, accreal> <<<grid, block, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, output), THCTensor_(data)(state, gradOutput), outer_size, dim_size, inner_size ); } cudaError errcode = cudaGetLastError(); if (errcode != cudaSuccess) { THError(cudaGetErrorString(errcode)); } THCTensor_(free)(state, gradOutput); THCTensor_(free)(state, output); } #endif
759fa258b643547b9825f5495d1d3495aa992014.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/cusolver_wrappers.h> #include <test_utils.h> #include <iomanip> #include <raft/handle.hpp> #include <raft/mr/device/allocator.hpp> #include <raft/random/rng.cuh> #include <solver/lars_impl.cuh> #include <sstream> #include <vector> namespace ML { namespace Solver { namespace Lars { template <typename math_t> class LarsTest : public ::testing::Test { protected: LarsTest() : allocator(handle.get_device_allocator()), cor(allocator, handle.get_stream(), n_cols), X(allocator, handle.get_stream(), n_cols * n_rows), G(allocator, handle.get_stream(), n_cols * n_cols), sign(allocator, handle.get_stream(), n_cols), ws(allocator, handle.get_stream(), n_cols), A(allocator, handle.get_stream(), 1) { CUDA_CHECK(hipStreamCreate(&stream)); handle.set_stream(stream); raft::update_device(cor.data(), cor_host, n_cols, stream); raft::update_device(X.data(), X_host, n_cols * n_rows, stream); raft::update_device(G.data(), G_host, n_cols * n_cols, stream); raft::update_device(sign.data(), sign_host, n_cols, stream); } void TearDown() override { CUDA_CHECK(hipStreamDestroy(stream)); } void testSelectMostCorrelated() { math_t cj; int idx; MLCommon::device_buffer<math_t> workspace(allocator, stream, n_cols); ML::Solver::Lars::selectMostCorrelated( n_active, n_cols, cor.data(), &cj, workspace, &idx, n_rows, indices, 1, stream); EXPECT_EQ(idx, 3); EXPECT_EQ(7, cj); } void testMoveToActive() { ML::Solver::Lars::moveToActive(handle.get_cublas_handle(), &n_active, 3, X.data(), n_rows, n_cols, n_rows, cor.data(), indices, G.data(), n_cols, sign.data(), stream); EXPECT_EQ(n_active, 3); EXPECT_TRUE(raft::devArrMatchHost(cor_exp, cor.data(), n_cols, raft::Compare<math_t>())); EXPECT_TRUE(raft::devArrMatchHost(G_exp, G.data(), n_cols * n_cols, raft::Compare<math_t>())); EXPECT_TRUE( raft::devArrMatch((math_t)1.0, sign.data() + n_active - 1, 1, raft::Compare<math_t>())); // Do it again with G == nullptr to test if X is properly changed n_active = 2; ML::Solver::Lars::moveToActive(handle.get_cublas_handle(), &n_active, 3, X.data(), n_rows, n_cols, n_rows, cor.data(), indices, (math_t*)nullptr, n_cols, sign.data(), stream); EXPECT_TRUE(raft::devArrMatchHost(X_exp, X.data(), n_rows * n_cols, raft::Compare<math_t>())); } void calcUExp(math_t* G, int n_cols, math_t* U_dev_exp) { auto allocator = handle.get_device_allocator(); MLCommon::device_buffer<int> devInfo(allocator, stream, 1); MLCommon::device_buffer<math_t> workspace(allocator, stream); int n_work; const int ld_U = n_cols; CUSOLVER_CHECK(raft::linalg::cusolverDnpotrf_bufferSize( handle.get_cusolver_dn_handle(), HIPBLAS_FILL_MODE_UPPER, n_cols, U_dev_exp, ld_U, &n_work)); workspace.resize(n_work, stream); // Expected solution using Cholesky factorization from scratch raft::copy(U_dev_exp, G, n_cols * ld_U, stream); CUSOLVER_CHECK(raft::linalg::cusolverDnpotrf(handle.get_cusolver_dn_handle(), HIPBLAS_FILL_MODE_UPPER, n_cols, U_dev_exp, ld_U, workspace.data(), n_work, devInfo.data(), stream)); } // Initialize a mix of G and U matrices to test updateCholesky void initGU(math_t* GU, math_t* G, math_t* U, int n_active, bool copy_G) { const int ld_U = n_cols; // First we copy over all elements, because the factorization only replaces // the upper triangular part. This way it will be easier to compare to the // reference solution. raft::copy(GU, G, n_cols * n_cols, stream); if (!copy_G) { // zero the new colum of G CUDA_CHECK(hipMemsetAsync(GU + (n_active - 1) * n_cols, 0, n_cols * sizeof(math_t), stream)); } for (int i = 0; i < n_active - 1; i++) { raft::copy(GU + i * ld_U, U + i * ld_U, i + 1, stream); } } void testUpdateCholesky() { const int ld_X = n_rows; const int ld_G = n_cols; const int ld_U = ld_G; auto allocator = handle.get_device_allocator(); MLCommon::device_buffer<math_t> workspace(allocator, stream); MLCommon::device_buffer<math_t> U_dev_exp(allocator, stream, n_cols * n_cols); calcUExp(G.data(), n_cols, U_dev_exp.data()); MLCommon::device_buffer<math_t> U(allocator, stream, n_cols * n_cols); n_active = 4; math_t eps = -1; // First test with U already initialized initGU(U.data(), G.data(), U_dev_exp.data(), n_active, true); ML::Solver::Lars::updateCholesky(handle, n_active, X.data(), n_rows, n_cols, ld_X, U.data(), ld_U, U.data(), ld_G, workspace, eps, stream); EXPECT_TRUE(raft::devArrMatch( U_dev_exp.data(), U.data(), n_cols * n_cols, raft::CompareApprox<math_t>(1e-5))); // Next test where G and U are separate arrays initGU(U.data(), G.data(), U_dev_exp.data(), n_active, false); ML::Solver::Lars::updateCholesky(handle, n_active, X.data(), n_rows, n_cols, ld_X, U.data(), ld_U, G.data(), ld_G, workspace, eps, stream); EXPECT_TRUE(raft::devArrMatch( U_dev_exp.data(), U.data(), n_cols * n_cols, raft::CompareApprox<math_t>(1e-5))); // Third test without Gram matrix. initGU(U.data(), G.data(), U_dev_exp.data(), n_active, false); ML::Solver::Lars::updateCholesky(handle, n_active, X.data(), n_rows, n_cols, ld_X, U.data(), ld_U, (math_t*)nullptr, 0, workspace, eps, stream); EXPECT_TRUE(raft::devArrMatch( U_dev_exp.data(), U.data(), n_cols * n_cols, raft::CompareApprox<math_t>(1e-4))); } void testCalcW0() { n_active = 4; const int ld_U = n_cols; auto allocator = handle.get_device_allocator(); MLCommon::device_buffer<math_t> ws(allocator, stream, n_active); MLCommon::device_buffer<math_t> U(allocator, stream, n_cols * ld_U); calcUExp(G.data(), n_cols, U.data()); ML::Solver::Lars::calcW0( handle, n_active, n_cols, sign.data(), U.data(), ld_U, ws.data(), stream); EXPECT_TRUE( raft::devArrMatchHost(ws0_exp, ws.data(), n_active, raft::CompareApprox<math_t>(1e-3))); } void testCalcA() { n_active = 4; MLCommon::device_buffer<math_t> ws(handle.get_device_allocator(), stream, n_active); raft::update_device(ws.data(), ws0_exp, n_active, stream); ML::Solver::Lars::calcA(handle, A.data(), n_active, sign.data(), ws.data(), stream); EXPECT_TRUE(raft::devArrMatch( (math_t)0.20070615686577709, A.data(), 1, raft::CompareApprox<math_t>(1e-6))); } void testEquiangular() { n_active = 4; auto allocator = handle.get_device_allocator(); MLCommon::device_buffer<math_t> workspace(allocator, stream); MLCommon::device_buffer<math_t> u_eq(allocator, stream, n_rows); MLCommon::device_buffer<math_t> U(allocator, stream, n_cols * n_cols); calcUExp(G.data(), n_cols, U.data()); initGU(G.data(), G.data(), U.data(), n_active, true); const int ld_X = n_rows; const int ld_U = n_cols; const int ld_G = n_cols; ML::Solver::Lars::calcEquiangularVec(handle, n_active, X.data(), n_rows, n_cols, ld_X, sign.data(), G.data(), ld_U, G.data(), ld_G, workspace, ws.data(), A.data(), u_eq.data(), (math_t)-1, stream); EXPECT_TRUE( raft::devArrMatchHost(ws_exp, ws.data(), n_active, raft::CompareApprox<math_t>(1e-3))); EXPECT_TRUE(raft::devArrMatch( (math_t)0.20070615686577709, A.data(), 1, raft::CompareApprox<math_t>(1e-4))); // Now test without Gram matrix, u should be calculated in this case initGU(G.data(), G.data(), U.data(), n_active, false); ML::Solver::Lars::calcEquiangularVec(handle, n_active, X.data(), n_rows, n_cols, ld_X, sign.data(), G.data(), ld_U, (math_t*)nullptr, 0, workspace, ws.data(), A.data(), u_eq.data(), (math_t)-1, stream); EXPECT_TRUE(raft::devArrMatchHost(u_eq_exp, u_eq.data(), 1, raft::CompareApprox<math_t>(1e-3))); } void testCalcMaxStep() { n_active = 2; math_t A_host = 3.6534305290498055; math_t ws_host[2] = {0.25662594, -0.01708941}; math_t u_host[4] = {0.10282127, -0.01595011, 0.07092104, -0.99204011}; math_t cor_host[4] = {137, 42, 4.7, 13.2}; const int ld_X = n_rows; const int ld_G = n_cols; MLCommon::device_buffer<math_t> u(handle.get_device_allocator(), stream, n_rows); MLCommon::device_buffer<math_t> ws(handle.get_device_allocator(), stream, n_active); MLCommon::device_buffer<math_t> gamma(handle.get_device_allocator(), stream, 1); MLCommon::device_buffer<math_t> U(handle.get_device_allocator(), stream, n_cols * n_cols); MLCommon::device_buffer<math_t> a_vec(handle.get_device_allocator(), stream, n_cols - n_active); raft::update_device(A.data(), &A_host, 1, stream); raft::update_device(ws.data(), ws_host, n_active, stream); raft::update_device(u.data(), u_host, n_rows, stream); raft::update_device(cor.data(), cor_host, n_cols, stream); const int max_iter = n_cols; math_t cj = 42; ML::Solver::Lars::calcMaxStep(handle, max_iter, n_rows, n_cols, n_active, cj, A.data(), cor.data(), G.data(), ld_G, X.data(), ld_X, (math_t*)nullptr, ws.data(), gamma.data(), a_vec.data(), stream); math_t gamma_exp = 0.20095407186830386; EXPECT_TRUE(raft::devArrMatch(gamma_exp, gamma.data(), 1, raft::CompareApprox<math_t>(1e-6))); math_t a_vec_exp[2] = {24.69447886, -139.66289908}; EXPECT_TRUE(raft::devArrMatchHost( a_vec_exp, a_vec.data(), a_vec.size(), raft::CompareApprox<math_t>(1e-4))); // test without G matrix, we use U as input in this case CUDA_CHECK(hipMemsetAsync(gamma.data(), 0, sizeof(math_t), stream)); CUDA_CHECK(hipMemsetAsync(a_vec.data(), 0, a_vec.size() * sizeof(math_t), stream)); ML::Solver::Lars::calcMaxStep(handle, max_iter, n_rows, n_cols, n_active, cj, A.data(), cor.data(), (math_t*)nullptr, 0, X.data(), ld_X, u.data(), ws.data(), gamma.data(), a_vec.data(), stream); EXPECT_TRUE(raft::devArrMatch(gamma_exp, gamma.data(), 1, raft::CompareApprox<math_t>(1e-6))); EXPECT_TRUE(raft::devArrMatchHost( a_vec_exp, a_vec.data(), a_vec.size(), raft::CompareApprox<math_t>(1e-4))); // Last iteration n_active = max_iter; CUDA_CHECK(hipMemsetAsync(gamma.data(), 0, sizeof(math_t), stream)); ML::Solver::Lars::calcMaxStep(handle, max_iter, n_rows, n_cols, n_active, cj, A.data(), cor.data(), (math_t*)nullptr, 0, X.data(), ld_X, u.data(), ws.data(), gamma.data(), a_vec.data(), stream); gamma_exp = 11.496044516528272; EXPECT_TRUE(raft::devArrMatch(gamma_exp, gamma.data(), 1, raft::CompareApprox<math_t>(1e-6))); } raft::handle_t handle; hipStream_t stream; std::shared_ptr<raft::mr::device::allocator> allocator; const int n_rows = 4; const int n_cols = 4; int n_active = 2; math_t cor_host[4] = {0, 137, 4, 7}; math_t cor_exp[4] = {0, 137, 7, 4}; // clang-format off // Keep in mind that we actually define column major matrices, so a row here // corresponds to a column of the matrix. math_t X_host[16] = { 1., 4., 9., -3., 9., 61., 131., 13., 3., 22., 111., -17., 0., 40., 40., 143.}; math_t X_exp[16] = { 1., 4., 9., -3., 9., 61., 131., 13., 0., 40., 40., 143., 3., 22., 111., -17.}; math_t G_host[16] = { 107., 1393., 1141., 91., 1393., 21132., 15689., 9539., 1141., 15689., 13103., 2889., 91., 9539., 2889., 23649.}; math_t G_exp[16] = { 107., 1393., 91., 1141., 1393., 21132., 9539., 15689., 91., 9539., 23649., 2889., 1141., 15689., 2889., 13103.}; // clang-format on int indices[4] = {3, 2, 1, 0}; int indices_exp[4] = {3, 4, 0, 1}; math_t sign_host[4] = {1, -1, 1, -1}; math_t ws0_exp[4] = {22.98636271, -2.15225918, 0.41474128, 0.72897179}; math_t ws_exp[4] = {4.61350452, -0.43197167, 0.08324113, 0.14630913}; math_t u_eq_exp[4] = {0.97548288, -0.21258388, 0.02538227, 0.05096055}; MLCommon::device_buffer<math_t> cor; MLCommon::device_buffer<math_t> X; MLCommon::device_buffer<math_t> G; MLCommon::device_buffer<math_t> sign; MLCommon::device_buffer<math_t> ws; MLCommon::device_buffer<math_t> A; }; typedef ::testing::Types<float, double> FloatTypes; TYPED_TEST_CASE(LarsTest, FloatTypes); TYPED_TEST(LarsTest, select) { this->testSelectMostCorrelated(); } TYPED_TEST(LarsTest, moveToActive) { this->testMoveToActive(); } TYPED_TEST(LarsTest, updateCholesky) { this->testUpdateCholesky(); } TYPED_TEST(LarsTest, calcW0) { this->testCalcW0(); } TYPED_TEST(LarsTest, calcA) { this->testCalcA(); } TYPED_TEST(LarsTest, equiangular) { this->testEquiangular(); } TYPED_TEST(LarsTest, maxStep) { this->testCalcMaxStep(); } template <typename math_t> class LarsTestFitPredict : public ::testing::Test { protected: LarsTestFitPredict() : allocator(handle.get_device_allocator()), X(allocator, handle.get_stream(), n_cols * n_rows), y(allocator, handle.get_stream(), n_rows), G(allocator, handle.get_stream(), n_cols * n_cols), beta(allocator, handle.get_stream(), n_cols), coef_path(allocator, handle.get_stream(), (n_cols + 1) * n_cols), alphas(allocator, handle.get_stream(), n_cols + 1), active_idx(allocator, handle.get_stream(), n_cols) { CUDA_CHECK(hipStreamCreate(&stream)); handle.set_stream(stream); raft::update_device(X.data(), X_host, n_cols * n_rows, stream); raft::update_device(y.data(), y_host, n_rows, stream); raft::update_device(G.data(), G_host, n_cols * n_cols, stream); } void TearDown() override { CUDA_CHECK(hipStreamDestroy(stream)); } void testFitGram() { int max_iter = 10; int verbosity = 0; int n_active; ML::Solver::Lars::larsFit(handle, X.data(), n_rows, n_cols, y.data(), beta.data(), active_idx.data(), alphas.data(), &n_active, G.data(), max_iter, (math_t*)nullptr, // coef_path.data(), verbosity, n_rows, n_cols, (math_t)-1); EXPECT_EQ(n_cols, n_active); EXPECT_TRUE( raft::devArrMatchHost(beta_exp, beta.data(), n_cols, raft::CompareApprox<math_t>(1e-5))); EXPECT_TRUE(raft::devArrMatchHost( alphas_exp, alphas.data(), n_cols + 1, raft::CompareApprox<math_t>(1e-4))); EXPECT_TRUE( raft::devArrMatchHost(indices_exp, active_idx.data(), n_cols, raft::Compare<int>())); } void testFitX() { int max_iter = 10; int verbosity = 0; int n_active; ML::Solver::Lars::larsFit(handle, X.data(), n_rows, n_cols, y.data(), beta.data(), active_idx.data(), alphas.data(), &n_active, (math_t*)nullptr, max_iter, (math_t*)nullptr, // coef_path.data(), verbosity, n_rows, n_cols, (math_t)-1); EXPECT_EQ(n_cols, n_active); EXPECT_TRUE( raft::devArrMatchHost(beta_exp, beta.data(), n_cols, raft::CompareApprox<math_t>(2e-4))); EXPECT_TRUE(raft::devArrMatchHost( alphas_exp, alphas.data(), n_cols + 1, raft::CompareApprox<math_t>(1e-4))); EXPECT_TRUE( raft::devArrMatchHost(indices_exp, active_idx.data(), n_cols, raft::Compare<int>())); } void testPredictV1() { int ld_X = n_rows; int n_active = n_cols; raft::update_device(beta.data(), beta_exp, n_active, stream); raft::update_device(active_idx.data(), indices_exp, n_active, stream); CUDA_CHECK(hipMemsetAsync(y.data(), 0, n_rows * sizeof(math_t), stream)); math_t intercept = 0; ML::Solver::Lars::larsPredict(handle, X.data(), n_rows, n_cols, ld_X, beta.data(), n_active, active_idx.data(), intercept, y.data()); EXPECT_TRUE( raft::devArrMatchHost(pred_exp, y.data(), n_rows, raft::CompareApprox<math_t>(1e-5))); } void testPredictV2() { int ld_X = n_rows; int n_active = n_cols; // We set n_cols > n_active to trigger prediction path where columns of X // are copied. int n_cols_loc = n_cols + 1; raft::update_device(beta.data(), beta_exp, n_active, stream); raft::update_device(active_idx.data(), indices_exp, n_active, stream); CUDA_CHECK(hipMemsetAsync(y.data(), 0, n_rows * sizeof(math_t), stream)); math_t intercept = 0; ML::Solver::Lars::larsPredict(handle, X.data(), n_rows, n_cols_loc, ld_X, beta.data(), n_active, active_idx.data(), intercept, y.data()); EXPECT_TRUE( raft::devArrMatchHost(pred_exp, y.data(), n_rows, raft::CompareApprox<math_t>(1e-5))); } void testFitLarge() { int n_rows = 65536; int n_cols = 10; int max_iter = n_cols; int verbosity = 0; int n_active; MLCommon::device_buffer<math_t> X(allocator, stream, n_rows * n_cols); MLCommon::device_buffer<math_t> y(allocator, stream, n_rows); beta.resize(max_iter, stream); active_idx.resize(max_iter, stream); alphas.resize(max_iter + 1, stream); raft::random::Rng r(1234); r.uniform(X.data(), n_rows * n_cols, math_t(-1.0), math_t(1.0), stream); r.uniform(y.data(), n_rows, math_t(-1.0), math_t(1.0), stream); ML::Solver::Lars::larsFit(handle, X.data(), n_rows, n_cols, y.data(), beta.data(), active_idx.data(), alphas.data(), &n_active, (math_t*)nullptr, max_iter, (math_t*)nullptr, verbosity, n_rows, n_cols, (math_t)-1); EXPECT_EQ(n_cols, n_active); } raft::handle_t handle; hipStream_t stream; std::shared_ptr<raft::mr::device::allocator> allocator; const int n_rows = 10; const int n_cols = 5; math_t cor_host[4] = {0, 137, 4, 7}; math_t cor_exp[4] = {0, 137, 7, 4}; // clang-format off // We actually define column major matrices, so a row here corresponds to a // column of the matrix. math_t X_host[50] = { -1.59595376, 1.02675861, 0.45079426, 0.32621407, 0.29018821, -1.30640121, 0.67025452, 0.30196285, 1.28636261, -1.45018015, -1.39544855, 0.90533337, -0.36980987, 0.23706301, 1.33296593, -0.524911 , -0.86187751, 0.30764958, -1.24415885, 1.61319389, -0.01500442, -2.25985187, -0.11147508, 1.08410381, 0.59451579, 0.62568849, 0.99811378, -1.09709453, -0.51940485, 0.70040887, -1.81995734, -0.24101756, 1.21308053, 0.87517302, -0.19806613, 1.50733111, 0.06332581, -0.65824129, 0.45640974, -1.19803788, 0.13838875, -1.01018604, -0.15828873, -1.26652781, 0.41229797, -0.00953721, -0.10602222, -0.51746536, -0.10397987, 2.62132051}; math_t G_host[25] = { 10. , -0.28482905, -3.98401069, 3.63094793, -5.77295066, -0.28482905, 10. , -0.68437245, -1.73251284, 3.49545153, -3.98401069, -0.68437245, 10. , 1.92006934, 3.51643227, 3.63094793, -1.73251284, 1.92006934, 10. , -4.25887055, -5.77295066, 3.49545153, 3.51643227, -4.25887055, 10. }; math_t y_host[10] = { -121.34354343, -170.25131089, 19.34173641, 89.75429795, 99.97210232, 83.67110463, 40.65749808, -109.1490306 , -72.97243308, 140.31957861}; // clang-format on math_t beta_exp[10] = { 7.48589389e+01, 3.90513025e+01, 3.81912823e+01, 2.69095277e+01, -4.74545001e-02}; math_t alphas_exp[6] = {8.90008255e+01, 4.00677648e+01, 2.46147690e+01, 2.06052321e+01, 3.70155968e-02, 0.0740366429090}; math_t pred_exp[10] = {-121.32409183, -170.25278892, 19.26177047, 89.73931476, 100.07545046, 83.71217894, 40.59397899, -109.19137223, -72.89633962, 140.28189898}; int indices_exp[5] = {2, 1, 3, 4, 0}; MLCommon::device_buffer<math_t> X; MLCommon::device_buffer<math_t> G; MLCommon::device_buffer<math_t> y; MLCommon::device_buffer<math_t> beta; MLCommon::device_buffer<math_t> alphas; MLCommon::device_buffer<math_t> coef_path; MLCommon::device_buffer<int> active_idx; }; TYPED_TEST_CASE(LarsTestFitPredict, FloatTypes); TYPED_TEST(LarsTestFitPredict, fitGram) { #if CUDART_VERSION >= 11020 GTEST_SKIP(); #else this->testFitGram(); #endif } TYPED_TEST(LarsTestFitPredict, fitX) { #if CUDART_VERSION >= 11020 GTEST_SKIP(); #else this->testFitX(); #endif } TYPED_TEST(LarsTestFitPredict, fitLarge) { this->testFitLarge(); } TYPED_TEST(LarsTestFitPredict, predictV1) { this->testPredictV1(); } TYPED_TEST(LarsTestFitPredict, predictV2) { this->testPredictV2(); } }; // namespace Lars }; // namespace Solver }; // namespace ML
759fa258b643547b9825f5495d1d3495aa992014.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/cusolver_wrappers.h> #include <test_utils.h> #include <iomanip> #include <raft/handle.hpp> #include <raft/mr/device/allocator.hpp> #include <raft/random/rng.cuh> #include <solver/lars_impl.cuh> #include <sstream> #include <vector> namespace ML { namespace Solver { namespace Lars { template <typename math_t> class LarsTest : public ::testing::Test { protected: LarsTest() : allocator(handle.get_device_allocator()), cor(allocator, handle.get_stream(), n_cols), X(allocator, handle.get_stream(), n_cols * n_rows), G(allocator, handle.get_stream(), n_cols * n_cols), sign(allocator, handle.get_stream(), n_cols), ws(allocator, handle.get_stream(), n_cols), A(allocator, handle.get_stream(), 1) { CUDA_CHECK(cudaStreamCreate(&stream)); handle.set_stream(stream); raft::update_device(cor.data(), cor_host, n_cols, stream); raft::update_device(X.data(), X_host, n_cols * n_rows, stream); raft::update_device(G.data(), G_host, n_cols * n_cols, stream); raft::update_device(sign.data(), sign_host, n_cols, stream); } void TearDown() override { CUDA_CHECK(cudaStreamDestroy(stream)); } void testSelectMostCorrelated() { math_t cj; int idx; MLCommon::device_buffer<math_t> workspace(allocator, stream, n_cols); ML::Solver::Lars::selectMostCorrelated( n_active, n_cols, cor.data(), &cj, workspace, &idx, n_rows, indices, 1, stream); EXPECT_EQ(idx, 3); EXPECT_EQ(7, cj); } void testMoveToActive() { ML::Solver::Lars::moveToActive(handle.get_cublas_handle(), &n_active, 3, X.data(), n_rows, n_cols, n_rows, cor.data(), indices, G.data(), n_cols, sign.data(), stream); EXPECT_EQ(n_active, 3); EXPECT_TRUE(raft::devArrMatchHost(cor_exp, cor.data(), n_cols, raft::Compare<math_t>())); EXPECT_TRUE(raft::devArrMatchHost(G_exp, G.data(), n_cols * n_cols, raft::Compare<math_t>())); EXPECT_TRUE( raft::devArrMatch((math_t)1.0, sign.data() + n_active - 1, 1, raft::Compare<math_t>())); // Do it again with G == nullptr to test if X is properly changed n_active = 2; ML::Solver::Lars::moveToActive(handle.get_cublas_handle(), &n_active, 3, X.data(), n_rows, n_cols, n_rows, cor.data(), indices, (math_t*)nullptr, n_cols, sign.data(), stream); EXPECT_TRUE(raft::devArrMatchHost(X_exp, X.data(), n_rows * n_cols, raft::Compare<math_t>())); } void calcUExp(math_t* G, int n_cols, math_t* U_dev_exp) { auto allocator = handle.get_device_allocator(); MLCommon::device_buffer<int> devInfo(allocator, stream, 1); MLCommon::device_buffer<math_t> workspace(allocator, stream); int n_work; const int ld_U = n_cols; CUSOLVER_CHECK(raft::linalg::cusolverDnpotrf_bufferSize( handle.get_cusolver_dn_handle(), CUBLAS_FILL_MODE_UPPER, n_cols, U_dev_exp, ld_U, &n_work)); workspace.resize(n_work, stream); // Expected solution using Cholesky factorization from scratch raft::copy(U_dev_exp, G, n_cols * ld_U, stream); CUSOLVER_CHECK(raft::linalg::cusolverDnpotrf(handle.get_cusolver_dn_handle(), CUBLAS_FILL_MODE_UPPER, n_cols, U_dev_exp, ld_U, workspace.data(), n_work, devInfo.data(), stream)); } // Initialize a mix of G and U matrices to test updateCholesky void initGU(math_t* GU, math_t* G, math_t* U, int n_active, bool copy_G) { const int ld_U = n_cols; // First we copy over all elements, because the factorization only replaces // the upper triangular part. This way it will be easier to compare to the // reference solution. raft::copy(GU, G, n_cols * n_cols, stream); if (!copy_G) { // zero the new colum of G CUDA_CHECK(cudaMemsetAsync(GU + (n_active - 1) * n_cols, 0, n_cols * sizeof(math_t), stream)); } for (int i = 0; i < n_active - 1; i++) { raft::copy(GU + i * ld_U, U + i * ld_U, i + 1, stream); } } void testUpdateCholesky() { const int ld_X = n_rows; const int ld_G = n_cols; const int ld_U = ld_G; auto allocator = handle.get_device_allocator(); MLCommon::device_buffer<math_t> workspace(allocator, stream); MLCommon::device_buffer<math_t> U_dev_exp(allocator, stream, n_cols * n_cols); calcUExp(G.data(), n_cols, U_dev_exp.data()); MLCommon::device_buffer<math_t> U(allocator, stream, n_cols * n_cols); n_active = 4; math_t eps = -1; // First test with U already initialized initGU(U.data(), G.data(), U_dev_exp.data(), n_active, true); ML::Solver::Lars::updateCholesky(handle, n_active, X.data(), n_rows, n_cols, ld_X, U.data(), ld_U, U.data(), ld_G, workspace, eps, stream); EXPECT_TRUE(raft::devArrMatch( U_dev_exp.data(), U.data(), n_cols * n_cols, raft::CompareApprox<math_t>(1e-5))); // Next test where G and U are separate arrays initGU(U.data(), G.data(), U_dev_exp.data(), n_active, false); ML::Solver::Lars::updateCholesky(handle, n_active, X.data(), n_rows, n_cols, ld_X, U.data(), ld_U, G.data(), ld_G, workspace, eps, stream); EXPECT_TRUE(raft::devArrMatch( U_dev_exp.data(), U.data(), n_cols * n_cols, raft::CompareApprox<math_t>(1e-5))); // Third test without Gram matrix. initGU(U.data(), G.data(), U_dev_exp.data(), n_active, false); ML::Solver::Lars::updateCholesky(handle, n_active, X.data(), n_rows, n_cols, ld_X, U.data(), ld_U, (math_t*)nullptr, 0, workspace, eps, stream); EXPECT_TRUE(raft::devArrMatch( U_dev_exp.data(), U.data(), n_cols * n_cols, raft::CompareApprox<math_t>(1e-4))); } void testCalcW0() { n_active = 4; const int ld_U = n_cols; auto allocator = handle.get_device_allocator(); MLCommon::device_buffer<math_t> ws(allocator, stream, n_active); MLCommon::device_buffer<math_t> U(allocator, stream, n_cols * ld_U); calcUExp(G.data(), n_cols, U.data()); ML::Solver::Lars::calcW0( handle, n_active, n_cols, sign.data(), U.data(), ld_U, ws.data(), stream); EXPECT_TRUE( raft::devArrMatchHost(ws0_exp, ws.data(), n_active, raft::CompareApprox<math_t>(1e-3))); } void testCalcA() { n_active = 4; MLCommon::device_buffer<math_t> ws(handle.get_device_allocator(), stream, n_active); raft::update_device(ws.data(), ws0_exp, n_active, stream); ML::Solver::Lars::calcA(handle, A.data(), n_active, sign.data(), ws.data(), stream); EXPECT_TRUE(raft::devArrMatch( (math_t)0.20070615686577709, A.data(), 1, raft::CompareApprox<math_t>(1e-6))); } void testEquiangular() { n_active = 4; auto allocator = handle.get_device_allocator(); MLCommon::device_buffer<math_t> workspace(allocator, stream); MLCommon::device_buffer<math_t> u_eq(allocator, stream, n_rows); MLCommon::device_buffer<math_t> U(allocator, stream, n_cols * n_cols); calcUExp(G.data(), n_cols, U.data()); initGU(G.data(), G.data(), U.data(), n_active, true); const int ld_X = n_rows; const int ld_U = n_cols; const int ld_G = n_cols; ML::Solver::Lars::calcEquiangularVec(handle, n_active, X.data(), n_rows, n_cols, ld_X, sign.data(), G.data(), ld_U, G.data(), ld_G, workspace, ws.data(), A.data(), u_eq.data(), (math_t)-1, stream); EXPECT_TRUE( raft::devArrMatchHost(ws_exp, ws.data(), n_active, raft::CompareApprox<math_t>(1e-3))); EXPECT_TRUE(raft::devArrMatch( (math_t)0.20070615686577709, A.data(), 1, raft::CompareApprox<math_t>(1e-4))); // Now test without Gram matrix, u should be calculated in this case initGU(G.data(), G.data(), U.data(), n_active, false); ML::Solver::Lars::calcEquiangularVec(handle, n_active, X.data(), n_rows, n_cols, ld_X, sign.data(), G.data(), ld_U, (math_t*)nullptr, 0, workspace, ws.data(), A.data(), u_eq.data(), (math_t)-1, stream); EXPECT_TRUE(raft::devArrMatchHost(u_eq_exp, u_eq.data(), 1, raft::CompareApprox<math_t>(1e-3))); } void testCalcMaxStep() { n_active = 2; math_t A_host = 3.6534305290498055; math_t ws_host[2] = {0.25662594, -0.01708941}; math_t u_host[4] = {0.10282127, -0.01595011, 0.07092104, -0.99204011}; math_t cor_host[4] = {137, 42, 4.7, 13.2}; const int ld_X = n_rows; const int ld_G = n_cols; MLCommon::device_buffer<math_t> u(handle.get_device_allocator(), stream, n_rows); MLCommon::device_buffer<math_t> ws(handle.get_device_allocator(), stream, n_active); MLCommon::device_buffer<math_t> gamma(handle.get_device_allocator(), stream, 1); MLCommon::device_buffer<math_t> U(handle.get_device_allocator(), stream, n_cols * n_cols); MLCommon::device_buffer<math_t> a_vec(handle.get_device_allocator(), stream, n_cols - n_active); raft::update_device(A.data(), &A_host, 1, stream); raft::update_device(ws.data(), ws_host, n_active, stream); raft::update_device(u.data(), u_host, n_rows, stream); raft::update_device(cor.data(), cor_host, n_cols, stream); const int max_iter = n_cols; math_t cj = 42; ML::Solver::Lars::calcMaxStep(handle, max_iter, n_rows, n_cols, n_active, cj, A.data(), cor.data(), G.data(), ld_G, X.data(), ld_X, (math_t*)nullptr, ws.data(), gamma.data(), a_vec.data(), stream); math_t gamma_exp = 0.20095407186830386; EXPECT_TRUE(raft::devArrMatch(gamma_exp, gamma.data(), 1, raft::CompareApprox<math_t>(1e-6))); math_t a_vec_exp[2] = {24.69447886, -139.66289908}; EXPECT_TRUE(raft::devArrMatchHost( a_vec_exp, a_vec.data(), a_vec.size(), raft::CompareApprox<math_t>(1e-4))); // test without G matrix, we use U as input in this case CUDA_CHECK(cudaMemsetAsync(gamma.data(), 0, sizeof(math_t), stream)); CUDA_CHECK(cudaMemsetAsync(a_vec.data(), 0, a_vec.size() * sizeof(math_t), stream)); ML::Solver::Lars::calcMaxStep(handle, max_iter, n_rows, n_cols, n_active, cj, A.data(), cor.data(), (math_t*)nullptr, 0, X.data(), ld_X, u.data(), ws.data(), gamma.data(), a_vec.data(), stream); EXPECT_TRUE(raft::devArrMatch(gamma_exp, gamma.data(), 1, raft::CompareApprox<math_t>(1e-6))); EXPECT_TRUE(raft::devArrMatchHost( a_vec_exp, a_vec.data(), a_vec.size(), raft::CompareApprox<math_t>(1e-4))); // Last iteration n_active = max_iter; CUDA_CHECK(cudaMemsetAsync(gamma.data(), 0, sizeof(math_t), stream)); ML::Solver::Lars::calcMaxStep(handle, max_iter, n_rows, n_cols, n_active, cj, A.data(), cor.data(), (math_t*)nullptr, 0, X.data(), ld_X, u.data(), ws.data(), gamma.data(), a_vec.data(), stream); gamma_exp = 11.496044516528272; EXPECT_TRUE(raft::devArrMatch(gamma_exp, gamma.data(), 1, raft::CompareApprox<math_t>(1e-6))); } raft::handle_t handle; cudaStream_t stream; std::shared_ptr<raft::mr::device::allocator> allocator; const int n_rows = 4; const int n_cols = 4; int n_active = 2; math_t cor_host[4] = {0, 137, 4, 7}; math_t cor_exp[4] = {0, 137, 7, 4}; // clang-format off // Keep in mind that we actually define column major matrices, so a row here // corresponds to a column of the matrix. math_t X_host[16] = { 1., 4., 9., -3., 9., 61., 131., 13., 3., 22., 111., -17., 0., 40., 40., 143.}; math_t X_exp[16] = { 1., 4., 9., -3., 9., 61., 131., 13., 0., 40., 40., 143., 3., 22., 111., -17.}; math_t G_host[16] = { 107., 1393., 1141., 91., 1393., 21132., 15689., 9539., 1141., 15689., 13103., 2889., 91., 9539., 2889., 23649.}; math_t G_exp[16] = { 107., 1393., 91., 1141., 1393., 21132., 9539., 15689., 91., 9539., 23649., 2889., 1141., 15689., 2889., 13103.}; // clang-format on int indices[4] = {3, 2, 1, 0}; int indices_exp[4] = {3, 4, 0, 1}; math_t sign_host[4] = {1, -1, 1, -1}; math_t ws0_exp[4] = {22.98636271, -2.15225918, 0.41474128, 0.72897179}; math_t ws_exp[4] = {4.61350452, -0.43197167, 0.08324113, 0.14630913}; math_t u_eq_exp[4] = {0.97548288, -0.21258388, 0.02538227, 0.05096055}; MLCommon::device_buffer<math_t> cor; MLCommon::device_buffer<math_t> X; MLCommon::device_buffer<math_t> G; MLCommon::device_buffer<math_t> sign; MLCommon::device_buffer<math_t> ws; MLCommon::device_buffer<math_t> A; }; typedef ::testing::Types<float, double> FloatTypes; TYPED_TEST_CASE(LarsTest, FloatTypes); TYPED_TEST(LarsTest, select) { this->testSelectMostCorrelated(); } TYPED_TEST(LarsTest, moveToActive) { this->testMoveToActive(); } TYPED_TEST(LarsTest, updateCholesky) { this->testUpdateCholesky(); } TYPED_TEST(LarsTest, calcW0) { this->testCalcW0(); } TYPED_TEST(LarsTest, calcA) { this->testCalcA(); } TYPED_TEST(LarsTest, equiangular) { this->testEquiangular(); } TYPED_TEST(LarsTest, maxStep) { this->testCalcMaxStep(); } template <typename math_t> class LarsTestFitPredict : public ::testing::Test { protected: LarsTestFitPredict() : allocator(handle.get_device_allocator()), X(allocator, handle.get_stream(), n_cols * n_rows), y(allocator, handle.get_stream(), n_rows), G(allocator, handle.get_stream(), n_cols * n_cols), beta(allocator, handle.get_stream(), n_cols), coef_path(allocator, handle.get_stream(), (n_cols + 1) * n_cols), alphas(allocator, handle.get_stream(), n_cols + 1), active_idx(allocator, handle.get_stream(), n_cols) { CUDA_CHECK(cudaStreamCreate(&stream)); handle.set_stream(stream); raft::update_device(X.data(), X_host, n_cols * n_rows, stream); raft::update_device(y.data(), y_host, n_rows, stream); raft::update_device(G.data(), G_host, n_cols * n_cols, stream); } void TearDown() override { CUDA_CHECK(cudaStreamDestroy(stream)); } void testFitGram() { int max_iter = 10; int verbosity = 0; int n_active; ML::Solver::Lars::larsFit(handle, X.data(), n_rows, n_cols, y.data(), beta.data(), active_idx.data(), alphas.data(), &n_active, G.data(), max_iter, (math_t*)nullptr, // coef_path.data(), verbosity, n_rows, n_cols, (math_t)-1); EXPECT_EQ(n_cols, n_active); EXPECT_TRUE( raft::devArrMatchHost(beta_exp, beta.data(), n_cols, raft::CompareApprox<math_t>(1e-5))); EXPECT_TRUE(raft::devArrMatchHost( alphas_exp, alphas.data(), n_cols + 1, raft::CompareApprox<math_t>(1e-4))); EXPECT_TRUE( raft::devArrMatchHost(indices_exp, active_idx.data(), n_cols, raft::Compare<int>())); } void testFitX() { int max_iter = 10; int verbosity = 0; int n_active; ML::Solver::Lars::larsFit(handle, X.data(), n_rows, n_cols, y.data(), beta.data(), active_idx.data(), alphas.data(), &n_active, (math_t*)nullptr, max_iter, (math_t*)nullptr, // coef_path.data(), verbosity, n_rows, n_cols, (math_t)-1); EXPECT_EQ(n_cols, n_active); EXPECT_TRUE( raft::devArrMatchHost(beta_exp, beta.data(), n_cols, raft::CompareApprox<math_t>(2e-4))); EXPECT_TRUE(raft::devArrMatchHost( alphas_exp, alphas.data(), n_cols + 1, raft::CompareApprox<math_t>(1e-4))); EXPECT_TRUE( raft::devArrMatchHost(indices_exp, active_idx.data(), n_cols, raft::Compare<int>())); } void testPredictV1() { int ld_X = n_rows; int n_active = n_cols; raft::update_device(beta.data(), beta_exp, n_active, stream); raft::update_device(active_idx.data(), indices_exp, n_active, stream); CUDA_CHECK(cudaMemsetAsync(y.data(), 0, n_rows * sizeof(math_t), stream)); math_t intercept = 0; ML::Solver::Lars::larsPredict(handle, X.data(), n_rows, n_cols, ld_X, beta.data(), n_active, active_idx.data(), intercept, y.data()); EXPECT_TRUE( raft::devArrMatchHost(pred_exp, y.data(), n_rows, raft::CompareApprox<math_t>(1e-5))); } void testPredictV2() { int ld_X = n_rows; int n_active = n_cols; // We set n_cols > n_active to trigger prediction path where columns of X // are copied. int n_cols_loc = n_cols + 1; raft::update_device(beta.data(), beta_exp, n_active, stream); raft::update_device(active_idx.data(), indices_exp, n_active, stream); CUDA_CHECK(cudaMemsetAsync(y.data(), 0, n_rows * sizeof(math_t), stream)); math_t intercept = 0; ML::Solver::Lars::larsPredict(handle, X.data(), n_rows, n_cols_loc, ld_X, beta.data(), n_active, active_idx.data(), intercept, y.data()); EXPECT_TRUE( raft::devArrMatchHost(pred_exp, y.data(), n_rows, raft::CompareApprox<math_t>(1e-5))); } void testFitLarge() { int n_rows = 65536; int n_cols = 10; int max_iter = n_cols; int verbosity = 0; int n_active; MLCommon::device_buffer<math_t> X(allocator, stream, n_rows * n_cols); MLCommon::device_buffer<math_t> y(allocator, stream, n_rows); beta.resize(max_iter, stream); active_idx.resize(max_iter, stream); alphas.resize(max_iter + 1, stream); raft::random::Rng r(1234); r.uniform(X.data(), n_rows * n_cols, math_t(-1.0), math_t(1.0), stream); r.uniform(y.data(), n_rows, math_t(-1.0), math_t(1.0), stream); ML::Solver::Lars::larsFit(handle, X.data(), n_rows, n_cols, y.data(), beta.data(), active_idx.data(), alphas.data(), &n_active, (math_t*)nullptr, max_iter, (math_t*)nullptr, verbosity, n_rows, n_cols, (math_t)-1); EXPECT_EQ(n_cols, n_active); } raft::handle_t handle; cudaStream_t stream; std::shared_ptr<raft::mr::device::allocator> allocator; const int n_rows = 10; const int n_cols = 5; math_t cor_host[4] = {0, 137, 4, 7}; math_t cor_exp[4] = {0, 137, 7, 4}; // clang-format off // We actually define column major matrices, so a row here corresponds to a // column of the matrix. math_t X_host[50] = { -1.59595376, 1.02675861, 0.45079426, 0.32621407, 0.29018821, -1.30640121, 0.67025452, 0.30196285, 1.28636261, -1.45018015, -1.39544855, 0.90533337, -0.36980987, 0.23706301, 1.33296593, -0.524911 , -0.86187751, 0.30764958, -1.24415885, 1.61319389, -0.01500442, -2.25985187, -0.11147508, 1.08410381, 0.59451579, 0.62568849, 0.99811378, -1.09709453, -0.51940485, 0.70040887, -1.81995734, -0.24101756, 1.21308053, 0.87517302, -0.19806613, 1.50733111, 0.06332581, -0.65824129, 0.45640974, -1.19803788, 0.13838875, -1.01018604, -0.15828873, -1.26652781, 0.41229797, -0.00953721, -0.10602222, -0.51746536, -0.10397987, 2.62132051}; math_t G_host[25] = { 10. , -0.28482905, -3.98401069, 3.63094793, -5.77295066, -0.28482905, 10. , -0.68437245, -1.73251284, 3.49545153, -3.98401069, -0.68437245, 10. , 1.92006934, 3.51643227, 3.63094793, -1.73251284, 1.92006934, 10. , -4.25887055, -5.77295066, 3.49545153, 3.51643227, -4.25887055, 10. }; math_t y_host[10] = { -121.34354343, -170.25131089, 19.34173641, 89.75429795, 99.97210232, 83.67110463, 40.65749808, -109.1490306 , -72.97243308, 140.31957861}; // clang-format on math_t beta_exp[10] = { 7.48589389e+01, 3.90513025e+01, 3.81912823e+01, 2.69095277e+01, -4.74545001e-02}; math_t alphas_exp[6] = {8.90008255e+01, 4.00677648e+01, 2.46147690e+01, 2.06052321e+01, 3.70155968e-02, 0.0740366429090}; math_t pred_exp[10] = {-121.32409183, -170.25278892, 19.26177047, 89.73931476, 100.07545046, 83.71217894, 40.59397899, -109.19137223, -72.89633962, 140.28189898}; int indices_exp[5] = {2, 1, 3, 4, 0}; MLCommon::device_buffer<math_t> X; MLCommon::device_buffer<math_t> G; MLCommon::device_buffer<math_t> y; MLCommon::device_buffer<math_t> beta; MLCommon::device_buffer<math_t> alphas; MLCommon::device_buffer<math_t> coef_path; MLCommon::device_buffer<int> active_idx; }; TYPED_TEST_CASE(LarsTestFitPredict, FloatTypes); TYPED_TEST(LarsTestFitPredict, fitGram) { #if CUDART_VERSION >= 11020 GTEST_SKIP(); #else this->testFitGram(); #endif } TYPED_TEST(LarsTestFitPredict, fitX) { #if CUDART_VERSION >= 11020 GTEST_SKIP(); #else this->testFitX(); #endif } TYPED_TEST(LarsTestFitPredict, fitLarge) { this->testFitLarge(); } TYPED_TEST(LarsTestFitPredict, predictV1) { this->testPredictV1(); } TYPED_TEST(LarsTestFitPredict, predictV2) { this->testPredictV2(); } }; // namespace Lars }; // namespace Solver }; // namespace ML
350976f2e2a6121678c8d91890ff521362893888.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include<limits> double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } struct NUM_ADD { short2 read_reference_number; int address_array; }; __global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction, int * MM_global_s) //, char * resultaa { int offset=blockIdx.x; __shared__ short2 read_reference_number; __shared__ char * read_base_array; __shared__ char4 * reference_base_array; //__shared__ int mismatch; //__shared__ int match; // __shared__ int open; // __shared__ int extend; __shared__ short2 * direction_index; __shared__ int * MM_global; __shared__ int * gap_h_global; __shared__ short2 * gap_h_size_global; while(offset<size) { if( threadIdx.x==0) { read_reference_number=num_add[offset].read_reference_number; MM_global=(int *) (MM_global_s+offset*640*3); direction_index=(short2 *) (direction+offset*640*1100); read_base_array=(char *) (data+num_add[offset].address_array); gap_h_global=(int *) (MM_global+640); reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128); gap_h_size_global=(short2 *) (gap_h_global+640); } __syncthreads(); __shared__ char reference_base_in_char[600]; int hh=(read_reference_number.y+4-1)/4; int tt=(hh+blockDim.x-1)/blockDim.x; for(int ii=0;ii<tt;ii++) { int aa=threadIdx.x+ii*blockDim.x; if(aa< hh) { char4 reference_base_in_thread; reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory reference_base_in_char[aa*4]=reference_base_in_thread.x; reference_base_in_char[aa*4+1]=reference_base_in_thread.y; reference_base_in_char[aa*4+2]=reference_base_in_thread.z; reference_base_in_char[aa*4+3]=reference_base_in_thread.w; } } __shared__ int MM[130]; __shared__ int gap_h[130]; //insertion __shared__ short2 gap_size_h[130]; //insertion __shared__ int result_col; __shared__ int result_row; __shared__ int result_col_index; __shared__ int result_row_index; if(threadIdx.x==0) { MM[0]=0; gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2; gap_size_h[0].x=0; gap_size_h[0].y=0; // match=200; // mismatch=-150; // open=-260; // extend=-11; result_col=-1000000000;//std::numeric_limits<int>::min()/2; result_row=-1000000000;//std::numeric_limits<int>::min()/2; // for(int i=0;i<read_reference_number.y;i++) // printf("%c",reference_base_in_char[i]); // printf("\n"); // for(int i=0;i<read_reference_number.x;i++) // printf("%c",read_base_array[i]); } __syncthreads(); int read_number=read_reference_number.x; int round=(read_reference_number.x+blockDim.x-1)/blockDim.x; int round_size; int aaa=0; // printf("%d %d \n",round,read_number); for(int i=0;i<round;i++) { round_size=(read_number>blockDim.x)?blockDim.x: read_number; read_number=(read_number>blockDim.x)?read_number-blockDim.x:0; //printf("%d %d \n",i,read_number); int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;; //int gap_size_v=0; //Deletion int M=0; //now int step_right; //now //deletion v int MMM=0; //short mt=0; // short2 mt_gap_size_v; char read_base; if(threadIdx.x<round_size) read_base=read_base_array[aaa+threadIdx.x]; short2 curmt; int ki=0;//insertion h negetive int current_reference_id=0; for(int j=0;j<round_size+read_reference_number.y-1;j++) { int aa=j-threadIdx.x; if( aa>=0 && (current_reference_id<read_reference_number.y)) { if(i>0&& threadIdx.x==0) { MM[0]=MM_global[current_reference_id]; gap_h[0]=gap_h_global[current_reference_id]; gap_size_h[0]=gap_h_size_global[current_reference_id]; } int prev_gap=M-260; //M which is cacluated by last step in the same thread gap_v+=-11; if(prev_gap>gap_v) { gap_v=prev_gap; mt_gap_size_v.y=1; } else mt_gap_size_v.y++; char reference_base_each=reference_base_in_char[current_reference_id]; M=MMM+(read_base==reference_base_each? 200:-150); prev_gap=MM[threadIdx.x]-260; step_right=gap_h[threadIdx.x]-11; if(prev_gap>step_right) { step_right=prev_gap; ki=1; } else ki=gap_size_h[threadIdx.x].x+1; bool diag=(M>=gap_v)&&(M>=step_right); curmt.y=0; if(diag) { curmt.x=0; //if(threadIdx.x==0||current_reference_id==0) // curmt.y=0; // else curmt.y=mt_gap_size_v.x+1; // curBtrack=0; } else if(step_right>=gap_v) { M=step_right; curmt.x=0-ki; // curBtrack=0-ki; } else { M=gap_v; curmt.x=mt_gap_size_v.y; //curBtrack=gap_size_v; } MMM=MM[threadIdx.x]; mt_gap_size_v.x=gap_size_h[threadIdx.x].y; direction_index[640*j+(threadIdx.x+aaa)]=curmt; //if(threadIdx.x==read_reference_number.x-3) if(threadIdx.x==round_size-1 && i<round-1) { MM_global[current_reference_id]=M; gap_h_global[current_reference_id]=step_right; short2 aab; aab.x=ki; aab.y=curmt.y; gap_h_size_global[current_reference_id]= aab; } if(current_reference_id==read_reference_number.y-1) { if(M>=result_row) { result_row=M; result_row_index=aaa+threadIdx.x; // } // printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x); } //if(i==round-1 ) printf("%d %d %d %d \n", round,aaa, current_reference_id,threadIdx.x+aa+1 ); if( i==round-1 && read_reference_number.x==(threadIdx.x+aaa+1)) { if(M>=result_col) { result_col=M; result_col_index=current_reference_id; // +1 } // printf("%d %d %d %d\n",threadIdx.x+aa,M,result_col,result_col_index); } current_reference_id++; } __syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads. MM[threadIdx.x+1]=M; gap_h[threadIdx.x+1]=step_right; gap_size_h[threadIdx.x+1].x=ki; gap_size_h[threadIdx.x+1].y=curmt.y; __syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed. } aaa+=blockDim.x; } // char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion // __shared__ int cigar_index; // int segment_length; // short2 btr; // char new_state; // int step_length; int4 result4; if(threadIdx.x==0) { // printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index); if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1)) { // final_result=result_row; result4.x=read_reference_number.y-1; result4.y=result_row_index; result4.z=read_reference_number.x-1-result_row_index; } else { // final_result=result_col; result4.x=result_col_index; result4.y=read_reference_number.x-1; result4.z=0; } //result[offset*3]=final_result; //printf("%d\n",final_result); //result4.x=fina_i; //result4.y=fina_j; //result4.z=segment_length; result[offset]=result4; } __syncthreads(); offset+=gridDim.x; } } __global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result { int offset=blockIdx.x; int4 result4;; short2 * direction_index; __shared__ char * cigar_store; __shared__ int *cigar_int_store; __shared__ char cigar_m[128]; __shared__ int cigar_int_m[128]; while(offset<size) { char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion __shared__ int cigar_index; int segment_length; short2 btr; char new_state; int step_length; if( threadIdx.x==0) { result4=result[offset]; direction_index=(short2 *) (direction+offset*640*1100); cigar_store=(char *) (cigar+offset*sizeof(char)*128); cigar_int_store=(int *) (cigar_int+offset*128); //printf("\n %d %d\n", final_i,final_j); cigar_index=0; if(result4.z>0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.z; cigar_index++; } segment_length=0; state='N'; do { btr=direction_index[(result4.x+result4.y)*640+result4.y]; if(btr.x>0) { new_state='D'; step_length=btr.x; result4.x-=step_length; } else if(btr.x<0) { new_state='I'; step_length=0-btr.x; result4.y-=step_length; } else { new_state='M'; step_length=btr.y; result4.x-=step_length; result4.y-=step_length; } if(state=='N') state=new_state; if(state==new_state) { segment_length+=step_length; } else { cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; segment_length=step_length; cigar_index++; state=new_state; } }while(result4.x>=0&&result4.y>=0); cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; cigar_index++; if(result4.y>=0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.y+1; cigar_index++; } result4.z=result4.x+1; result4.w=cigar_index; result[offset]=result4; /* for(int i=cigar_index-1;i>=0;i--) { printf("%d%c",cigar_int_m[i],cigar_m[i]); } */ } __syncthreads(); if(threadIdx.x<cigar_index && cigar_index<=blockDim.x) { // if(threadIdx.x==0) // printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]); cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x]; cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x]; // if(threadIdx.x==0) // printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]); } offset+=gridDim.x; } } struct InputData { char read_base[600]; char reference_base[600]; }; int main(int artc, char* args[]) { int total_size=0; FILE * file; file=fopen(args[1],"r"); int size; double computation_time=0;//total_time=0; timespec start,finish; /* char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[1]); strcpy(inputdata[index].read_base,data[1]); index++; } */ /* fscanf(file,"%d",&size); while(!feof(file)) { InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { fscanf(file,"%s ",inputdata[i].reference_base); fscanf(file,"%s ",inputdata[i].read_base); } */ char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[i]); strcpy(inputdata[index].read_base,data[j]); index++; } //data preparation. char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128); NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total); char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align int data_size=0; char * data_d_total; hipMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4); int * result_h=(int *) malloc(sizeof(int)*size*4); char * cigar_h=(char *) malloc(sizeof(char)*size*128); int * cigar_int_h=(int *) malloc(sizeof(int)*size*128); for(int i=0;i<size;i++) { char4 reference_tep[150]; int read_len=strlen(inputdata[i].read_base); int ref_len=strlen(inputdata[i].reference_base); int new_len=(ref_len+4-1)/4; total_size+=ref_len*read_len; for(int j=0;j<new_len;j++) { reference_tep[j].x=inputdata[i].reference_base[j*4]; if(j*4+1<ref_len) reference_tep[j].y=inputdata[i].reference_base[j*4+1]; if(j*4+2<ref_len) reference_tep[j].z=inputdata[i].reference_base[j*4+2]; if(j*4+3<ref_len) reference_tep[j].w=inputdata[i].reference_base[j*4+3]; } data_num_add[i].read_reference_number.x=read_len; data_num_add[i].read_reference_number.y=ref_len; data_num_add[i].address_array=data_size; memcpy(data_h,inputdata[i].read_base,read_len); data_h+=(read_len+128-1)/128*128; data_size+=(read_len+128-1)/128*128; memcpy(data_h,reference_tep,sizeof(char4)* new_len); data_h+=(new_len*sizeof(char4)+127)/128*128; data_size+=(new_len*sizeof(char4)+127)/128*128; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; hipMemcpy(data_d_total,data_h_total,data_size_to_copy,hipMemcpyHostToDevice); NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128; int4 * result_d=(int4 *) (data_d_total+data_size_to_copy); char * cigar; hipMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int))); int * cigar_int=(int *) (cigar+size*128*sizeof(char)); int * direction; hipMalloc( (int **) & direction, (size+1) * (640*1100* sizeof (int))); int * MM_global; int * gap_v_global; int * gap_v_size_global; hipMalloc((int **)&MM_global,size*(640*3*sizeof(int))); dim3 block(128); dim3 grid(size); clock_gettime(CLOCK_MONOTONIC_RAW,&start); hipLaunchKernelGGL(( calculate_cigar), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d,result_d,direction,MM_global); //result // calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result // hipMemcpy(result_h,result_d,size*sizeof(int)*4,hipMemcpyDeviceToHost); // hipMemcpy(cigar_h,cigar,128*sizeof(char)*size, hipMemcpyDeviceToHost); // hipMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,hipMemcpyDeviceToHost); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); computation_time+=diff(start,finish); // for(int i=0;i<size;i++) { // printf("%d %d\n",result_h[i*4],result_h[i*4+1]); } /* for(int i=0;i<size;i++) { printf("%d\n",result_h[i*4+1]); printf("["); for(int j=0;j<result_h[i*4+3];j++) { if(j!=0) printf(", "); printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]); } printf("]\n"); } */ hipFree(direction); free(data_h_total); hipFree(data_d_total); free(inputdata); hipFree(cigar); free(cigar_int_h); free(cigar_h); // fscanf(file,"%d",&size); } // printf(" computation_time= %e total_time=%e \n",computation_time,0); printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000); return 0; }
350976f2e2a6121678c8d91890ff521362893888.cu
#include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <cuda.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include<limits> double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } struct NUM_ADD { short2 read_reference_number; int address_array; }; __global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction, int * MM_global_s) //, char * resultaa { int offset=blockIdx.x; __shared__ short2 read_reference_number; __shared__ char * read_base_array; __shared__ char4 * reference_base_array; //__shared__ int mismatch; //__shared__ int match; // __shared__ int open; // __shared__ int extend; __shared__ short2 * direction_index; __shared__ int * MM_global; __shared__ int * gap_h_global; __shared__ short2 * gap_h_size_global; while(offset<size) { if( threadIdx.x==0) { read_reference_number=num_add[offset].read_reference_number; MM_global=(int *) (MM_global_s+offset*640*3); direction_index=(short2 *) (direction+offset*640*1100); read_base_array=(char *) (data+num_add[offset].address_array); gap_h_global=(int *) (MM_global+640); reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128); gap_h_size_global=(short2 *) (gap_h_global+640); } __syncthreads(); __shared__ char reference_base_in_char[600]; int hh=(read_reference_number.y+4-1)/4; int tt=(hh+blockDim.x-1)/blockDim.x; for(int ii=0;ii<tt;ii++) { int aa=threadIdx.x+ii*blockDim.x; if(aa< hh) { char4 reference_base_in_thread; reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory reference_base_in_char[aa*4]=reference_base_in_thread.x; reference_base_in_char[aa*4+1]=reference_base_in_thread.y; reference_base_in_char[aa*4+2]=reference_base_in_thread.z; reference_base_in_char[aa*4+3]=reference_base_in_thread.w; } } __shared__ int MM[130]; __shared__ int gap_h[130]; //insertion __shared__ short2 gap_size_h[130]; //insertion __shared__ int result_col; __shared__ int result_row; __shared__ int result_col_index; __shared__ int result_row_index; if(threadIdx.x==0) { MM[0]=0; gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2; gap_size_h[0].x=0; gap_size_h[0].y=0; // match=200; // mismatch=-150; // open=-260; // extend=-11; result_col=-1000000000;//std::numeric_limits<int>::min()/2; result_row=-1000000000;//std::numeric_limits<int>::min()/2; // for(int i=0;i<read_reference_number.y;i++) // printf("%c",reference_base_in_char[i]); // printf("\n"); // for(int i=0;i<read_reference_number.x;i++) // printf("%c",read_base_array[i]); } __syncthreads(); int read_number=read_reference_number.x; int round=(read_reference_number.x+blockDim.x-1)/blockDim.x; int round_size; int aaa=0; // printf("%d %d \n",round,read_number); for(int i=0;i<round;i++) { round_size=(read_number>blockDim.x)?blockDim.x: read_number; read_number=(read_number>blockDim.x)?read_number-blockDim.x:0; //printf("%d %d \n",i,read_number); int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;; //int gap_size_v=0; //Deletion int M=0; //now int step_right; //now //deletion v int MMM=0; //short mt=0; // short2 mt_gap_size_v; char read_base; if(threadIdx.x<round_size) read_base=read_base_array[aaa+threadIdx.x]; short2 curmt; int ki=0;//insertion h negetive int current_reference_id=0; for(int j=0;j<round_size+read_reference_number.y-1;j++) { int aa=j-threadIdx.x; if( aa>=0 && (current_reference_id<read_reference_number.y)) { if(i>0&& threadIdx.x==0) { MM[0]=MM_global[current_reference_id]; gap_h[0]=gap_h_global[current_reference_id]; gap_size_h[0]=gap_h_size_global[current_reference_id]; } int prev_gap=M-260; //M which is cacluated by last step in the same thread gap_v+=-11; if(prev_gap>gap_v) { gap_v=prev_gap; mt_gap_size_v.y=1; } else mt_gap_size_v.y++; char reference_base_each=reference_base_in_char[current_reference_id]; M=MMM+(read_base==reference_base_each? 200:-150); prev_gap=MM[threadIdx.x]-260; step_right=gap_h[threadIdx.x]-11; if(prev_gap>step_right) { step_right=prev_gap; ki=1; } else ki=gap_size_h[threadIdx.x].x+1; bool diag=(M>=gap_v)&&(M>=step_right); curmt.y=0; if(diag) { curmt.x=0; //if(threadIdx.x==0||current_reference_id==0) // curmt.y=0; // else curmt.y=mt_gap_size_v.x+1; // curBtrack=0; } else if(step_right>=gap_v) { M=step_right; curmt.x=0-ki; // curBtrack=0-ki; } else { M=gap_v; curmt.x=mt_gap_size_v.y; //curBtrack=gap_size_v; } MMM=MM[threadIdx.x]; mt_gap_size_v.x=gap_size_h[threadIdx.x].y; direction_index[640*j+(threadIdx.x+aaa)]=curmt; //if(threadIdx.x==read_reference_number.x-3) if(threadIdx.x==round_size-1 && i<round-1) { MM_global[current_reference_id]=M; gap_h_global[current_reference_id]=step_right; short2 aab; aab.x=ki; aab.y=curmt.y; gap_h_size_global[current_reference_id]= aab; } if(current_reference_id==read_reference_number.y-1) { if(M>=result_row) { result_row=M; result_row_index=aaa+threadIdx.x; // } // printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x); } //if(i==round-1 ) printf("%d %d %d %d \n", round,aaa, current_reference_id,threadIdx.x+aa+1 ); if( i==round-1 && read_reference_number.x==(threadIdx.x+aaa+1)) { if(M>=result_col) { result_col=M; result_col_index=current_reference_id; // +1 } // printf("%d %d %d %d\n",threadIdx.x+aa,M,result_col,result_col_index); } current_reference_id++; } __syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads. MM[threadIdx.x+1]=M; gap_h[threadIdx.x+1]=step_right; gap_size_h[threadIdx.x+1].x=ki; gap_size_h[threadIdx.x+1].y=curmt.y; __syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed. } aaa+=blockDim.x; } // char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion // __shared__ int cigar_index; // int segment_length; // short2 btr; // char new_state; // int step_length; int4 result4; if(threadIdx.x==0) { // printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index); if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1)) { // final_result=result_row; result4.x=read_reference_number.y-1; result4.y=result_row_index; result4.z=read_reference_number.x-1-result_row_index; } else { // final_result=result_col; result4.x=result_col_index; result4.y=read_reference_number.x-1; result4.z=0; } //result[offset*3]=final_result; //printf("%d\n",final_result); //result4.x=fina_i; //result4.y=fina_j; //result4.z=segment_length; result[offset]=result4; } __syncthreads(); offset+=gridDim.x; } } __global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result { int offset=blockIdx.x; int4 result4;; short2 * direction_index; __shared__ char * cigar_store; __shared__ int *cigar_int_store; __shared__ char cigar_m[128]; __shared__ int cigar_int_m[128]; while(offset<size) { char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion __shared__ int cigar_index; int segment_length; short2 btr; char new_state; int step_length; if( threadIdx.x==0) { result4=result[offset]; direction_index=(short2 *) (direction+offset*640*1100); cigar_store=(char *) (cigar+offset*sizeof(char)*128); cigar_int_store=(int *) (cigar_int+offset*128); //printf("\n %d %d\n", final_i,final_j); cigar_index=0; if(result4.z>0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.z; cigar_index++; } segment_length=0; state='N'; do { btr=direction_index[(result4.x+result4.y)*640+result4.y]; if(btr.x>0) { new_state='D'; step_length=btr.x; result4.x-=step_length; } else if(btr.x<0) { new_state='I'; step_length=0-btr.x; result4.y-=step_length; } else { new_state='M'; step_length=btr.y; result4.x-=step_length; result4.y-=step_length; } if(state=='N') state=new_state; if(state==new_state) { segment_length+=step_length; } else { cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; segment_length=step_length; cigar_index++; state=new_state; } }while(result4.x>=0&&result4.y>=0); cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; cigar_index++; if(result4.y>=0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.y+1; cigar_index++; } result4.z=result4.x+1; result4.w=cigar_index; result[offset]=result4; /* for(int i=cigar_index-1;i>=0;i--) { printf("%d%c",cigar_int_m[i],cigar_m[i]); } */ } __syncthreads(); if(threadIdx.x<cigar_index && cigar_index<=blockDim.x) { // if(threadIdx.x==0) // printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]); cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x]; cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x]; // if(threadIdx.x==0) // printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]); } offset+=gridDim.x; } } struct InputData { char read_base[600]; char reference_base[600]; }; int main(int artc, char* args[]) { int total_size=0; FILE * file; file=fopen(args[1],"r"); int size; double computation_time=0;//total_time=0; timespec start,finish; /* char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[1]); strcpy(inputdata[index].read_base,data[1]); index++; } */ /* fscanf(file,"%d",&size); while(!feof(file)) { InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { fscanf(file,"%s ",inputdata[i].reference_base); fscanf(file,"%s ",inputdata[i].read_base); } */ char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[i]); strcpy(inputdata[index].read_base,data[j]); index++; } //data preparation. char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128); NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total); char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align int data_size=0; char * data_d_total; cudaMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4); int * result_h=(int *) malloc(sizeof(int)*size*4); char * cigar_h=(char *) malloc(sizeof(char)*size*128); int * cigar_int_h=(int *) malloc(sizeof(int)*size*128); for(int i=0;i<size;i++) { char4 reference_tep[150]; int read_len=strlen(inputdata[i].read_base); int ref_len=strlen(inputdata[i].reference_base); int new_len=(ref_len+4-1)/4; total_size+=ref_len*read_len; for(int j=0;j<new_len;j++) { reference_tep[j].x=inputdata[i].reference_base[j*4]; if(j*4+1<ref_len) reference_tep[j].y=inputdata[i].reference_base[j*4+1]; if(j*4+2<ref_len) reference_tep[j].z=inputdata[i].reference_base[j*4+2]; if(j*4+3<ref_len) reference_tep[j].w=inputdata[i].reference_base[j*4+3]; } data_num_add[i].read_reference_number.x=read_len; data_num_add[i].read_reference_number.y=ref_len; data_num_add[i].address_array=data_size; memcpy(data_h,inputdata[i].read_base,read_len); data_h+=(read_len+128-1)/128*128; data_size+=(read_len+128-1)/128*128; memcpy(data_h,reference_tep,sizeof(char4)* new_len); data_h+=(new_len*sizeof(char4)+127)/128*128; data_size+=(new_len*sizeof(char4)+127)/128*128; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; cudaMemcpy(data_d_total,data_h_total,data_size_to_copy,cudaMemcpyHostToDevice); NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128; int4 * result_d=(int4 *) (data_d_total+data_size_to_copy); char * cigar; cudaMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int))); int * cigar_int=(int *) (cigar+size*128*sizeof(char)); int * direction; cudaMalloc( (int **) & direction, (size+1) * (640*1100* sizeof (int))); int * MM_global; int * gap_v_global; int * gap_v_size_global; cudaMalloc((int **)&MM_global,size*(640*3*sizeof(int))); dim3 block(128); dim3 grid(size); clock_gettime(CLOCK_MONOTONIC_RAW,&start); calculate_cigar<<<grid,block>>> (size,data_d,num_add_d,result_d,direction,MM_global); //result // calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result // cudaMemcpy(result_h,result_d,size*sizeof(int)*4,cudaMemcpyDeviceToHost); // cudaMemcpy(cigar_h,cigar,128*sizeof(char)*size, cudaMemcpyDeviceToHost); // cudaMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); computation_time+=diff(start,finish); // for(int i=0;i<size;i++) { // printf("%d %d\n",result_h[i*4],result_h[i*4+1]); } /* for(int i=0;i<size;i++) { printf("%d\n",result_h[i*4+1]); printf("["); for(int j=0;j<result_h[i*4+3];j++) { if(j!=0) printf(", "); printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]); } printf("]\n"); } */ cudaFree(direction); free(data_h_total); cudaFree(data_d_total); free(inputdata); cudaFree(cigar); free(cigar_int_h); free(cigar_h); // fscanf(file,"%d",&size); } // printf(" computation_time= %e total_time=%e \n",computation_time,0); printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000); return 0; }
d05bf2307be9e49488fa4c97232f56a9c51f4dfa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/native/hip/UpSample.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/upsample_nearest1d_native.h> #include <ATen/ops/upsample_nearest1d_backward_native.h> #include <ATen/ops/_upsample_nearest_exact1d_native.h> #include <ATen/ops/_upsample_nearest_exact1d_backward_native.h> #endif namespace at::native { namespace { #define MAX_THREADS 512 // Define a typedef to dispatch to nearest_neighbor_compute_source_index or // nearest_neighbor_exact_compute_source_index typedef int (*nn_compute_source_index_fn_t)(const float, int, int); // Define a typedef to dispatch to nearest_neighbor_bw_compute_source_index or // nearest_neighbor_exact_bw_compute_source_index typedef int (*nn_bw_compute_source_index_fn_t)(const float, int, int); // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t, nn_compute_source_index_fn_t nn_compute_source_index_fn> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest1d_out_frame( const scalar_t* input, size_t dim_b, size_t dim_c, size_t src_dim_w, size_t dst_dim_w, scalar_t* output, float scale_factor) { int dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_w) return; int c = (dst_idx / dst_dim_w) % dim_c; int dst_x = dst_idx % dst_dim_w; int src_x = nn_compute_source_index_fn(scale_factor, dst_x, src_dim_w); int src_idx = c * src_dim_w + src_x; int src_stride = dim_c * src_dim_w; int dst_stride = dim_c * dst_dim_w; for (int b = 0; b < dim_b; b++) { output[dst_idx] = input[src_idx]; src_idx += src_stride; dst_idx += dst_stride; } } // see NOTE [ Nearest neighbor upsampling kernel implementation ] // Backward operation template <typename scalar_t, typename accscalar_t, nn_bw_compute_source_index_fn_t nn_bw_compute_source_index_fn> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest1d_backward_out_frame( const scalar_t* grad_o, size_t dim_b, size_t dim_c, size_t src_dim_w, size_t dst_dim_w, scalar_t* grad_i, float scale_factor) { int dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_w) return; int c = (dst_idx / (dst_dim_w)) % dim_c; int dst_x = dst_idx % dst_dim_w; // note that we do not want to clamp src_x to src_dim_w, since we might // intentionally want to skip in case of scale_factor < 1.0 int src_x = nn_bw_compute_source_index_fn(scale_factor, dst_x, src_dim_w); int src_x_up = nn_bw_compute_source_index_fn(scale_factor, dst_x+1, src_dim_w); for (int b = 0; b < dim_b; b++) { accscalar_t grad = 0; int src_idx = b * dim_c * src_dim_w + c * src_dim_w + src_x; for (int x = src_x; x < src_x_up; x++) { grad += grad_o[src_idx++]; } grad_i[dst_idx] = grad; dst_idx += dim_c * dst_dim_w; } } template<nn_compute_source_index_fn_t nn_compute_source_index_fn> static void upsample_nearest1d_out_cuda_template( const Tensor& output, const Tensor& input_, IntArrayRef output_size, c10::optional<double> scales) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_nearest1d_out_cuda", {input_arg, output_arg}); int output_width = output_size[0]; int nbatch = input_.size(0); int channels = input_.size(1); int input_width = input_.size(2); Tensor input = input_.contiguous(); if (input.numel() == 0) { return; } // upsample_nearest1d meta call makes sure `nbatch != 0` unsigned int n = output.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{ceil_div(n, bdim.x)}; // safe check for int32 indexing; implicitly restrict launch config for kernel TORCH_CHECK(output.numel() <= std::numeric_limits<int32_t>::max()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Byte, input.scalar_type(), "upsample_nearest1d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.data_ptr<scalar_t>(); auto odata = output.data_ptr<scalar_t>(); const float scale_factor = compute_scales_value<float>(scales, input_width, output_width); hipLaunchKernelGGL(( upsample_nearest1d_out_frame<scalar_t, nn_compute_source_index_fn>), dim3(gdim), dim3(bdim), 0, stream, idata, nbatch, channels, input_width, output_width, odata, scale_factor); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } template<nn_compute_source_index_fn_t nn_bw_compute_source_index_fn> static void upsample_nearest1d_backward_out_cuda_template( const Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_nearest1d_backward_out_cuda_template", {grad_output_arg, grad_input_arg}); int output_width = output_size[0]; int nbatch = input_size[0]; int channels = input_size[1]; int input_width = input_size[2]; Tensor grad_output = grad_output_.contiguous(); if (grad_input.numel() == 0) { return; } // upsample_nearest1d meta call makes sure `nbatch != 0` unsigned int n = grad_input.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{ceil_div(n, bdim.x)}; // safe check for int32 indexing; implicitly restrict launch config for kernel TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest1d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.data_ptr<scalar_t>(); auto odata = grad_output.data_ptr<scalar_t>(); const float scale_factor = compute_scales_value_backwards<float>(scales, output_width, input_width); hipLaunchKernelGGL(( upsample_nearest1d_backward_out_frame<scalar_t, accscalar_t, nn_bw_compute_source_index_fn>) , dim3(gdim), dim3(bdim), 0, stream, odata, nbatch, channels, output_width, input_width, idata, scale_factor); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } } // namespace TORCH_IMPL_FUNC(upsample_nearest1d_out_cuda) ( const Tensor& input, IntArrayRef output_size, c10::optional<double> scales, const Tensor& output ) { upsample_nearest1d_out_cuda_template<nearest_neighbor_compute_source_index>( output, input, output_size, scales); } TORCH_IMPL_FUNC(_upsample_nearest_exact1d_out_cuda) ( const Tensor& input, IntArrayRef output_size, c10::optional<double> scales, const Tensor& output ) { upsample_nearest1d_out_cuda_template<nearest_neighbor_exact_compute_source_index>(output, input, output_size, scales); } TORCH_IMPL_FUNC(upsample_nearest1d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales, const Tensor& grad_input ) { upsample_nearest1d_backward_out_cuda_template<nearest_neighbor_bw_compute_source_index>( grad_input, grad_output, output_size, input_size, scales); } TORCH_IMPL_FUNC(_upsample_nearest_exact1d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales, const Tensor& grad_input ) { upsample_nearest1d_backward_out_cuda_template<nearest_neighbor_exact_bw_compute_source_index>( grad_input, grad_output, output_size, input_size, scales); } } // namespace at::native
d05bf2307be9e49488fa4c97232f56a9c51f4dfa.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/cuda/UpSample.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/upsample_nearest1d_native.h> #include <ATen/ops/upsample_nearest1d_backward_native.h> #include <ATen/ops/_upsample_nearest_exact1d_native.h> #include <ATen/ops/_upsample_nearest_exact1d_backward_native.h> #endif namespace at::native { namespace { #define MAX_THREADS 512 // Define a typedef to dispatch to nearest_neighbor_compute_source_index or // nearest_neighbor_exact_compute_source_index typedef int (*nn_compute_source_index_fn_t)(const float, int, int); // Define a typedef to dispatch to nearest_neighbor_bw_compute_source_index or // nearest_neighbor_exact_bw_compute_source_index typedef int (*nn_bw_compute_source_index_fn_t)(const float, int, int); // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t, nn_compute_source_index_fn_t nn_compute_source_index_fn> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest1d_out_frame( const scalar_t* input, size_t dim_b, size_t dim_c, size_t src_dim_w, size_t dst_dim_w, scalar_t* output, float scale_factor) { int dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_w) return; int c = (dst_idx / dst_dim_w) % dim_c; int dst_x = dst_idx % dst_dim_w; int src_x = nn_compute_source_index_fn(scale_factor, dst_x, src_dim_w); int src_idx = c * src_dim_w + src_x; int src_stride = dim_c * src_dim_w; int dst_stride = dim_c * dst_dim_w; for (int b = 0; b < dim_b; b++) { output[dst_idx] = input[src_idx]; src_idx += src_stride; dst_idx += dst_stride; } } // see NOTE [ Nearest neighbor upsampling kernel implementation ] // Backward operation template <typename scalar_t, typename accscalar_t, nn_bw_compute_source_index_fn_t nn_bw_compute_source_index_fn> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest1d_backward_out_frame( const scalar_t* grad_o, size_t dim_b, size_t dim_c, size_t src_dim_w, size_t dst_dim_w, scalar_t* grad_i, float scale_factor) { int dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_w) return; int c = (dst_idx / (dst_dim_w)) % dim_c; int dst_x = dst_idx % dst_dim_w; // note that we do not want to clamp src_x to src_dim_w, since we might // intentionally want to skip in case of scale_factor < 1.0 int src_x = nn_bw_compute_source_index_fn(scale_factor, dst_x, src_dim_w); int src_x_up = nn_bw_compute_source_index_fn(scale_factor, dst_x+1, src_dim_w); for (int b = 0; b < dim_b; b++) { accscalar_t grad = 0; int src_idx = b * dim_c * src_dim_w + c * src_dim_w + src_x; for (int x = src_x; x < src_x_up; x++) { grad += grad_o[src_idx++]; } grad_i[dst_idx] = grad; dst_idx += dim_c * dst_dim_w; } } template<nn_compute_source_index_fn_t nn_compute_source_index_fn> static void upsample_nearest1d_out_cuda_template( const Tensor& output, const Tensor& input_, IntArrayRef output_size, c10::optional<double> scales) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_nearest1d_out_cuda", {input_arg, output_arg}); int output_width = output_size[0]; int nbatch = input_.size(0); int channels = input_.size(1); int input_width = input_.size(2); Tensor input = input_.contiguous(); if (input.numel() == 0) { return; } // upsample_nearest1d meta call makes sure `nbatch != 0` unsigned int n = output.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{ceil_div(n, bdim.x)}; // safe check for int32 indexing; implicitly restrict launch config for kernel TORCH_CHECK(output.numel() <= std::numeric_limits<int32_t>::max()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Byte, input.scalar_type(), "upsample_nearest1d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.data_ptr<scalar_t>(); auto odata = output.data_ptr<scalar_t>(); const float scale_factor = compute_scales_value<float>(scales, input_width, output_width); upsample_nearest1d_out_frame<scalar_t, nn_compute_source_index_fn><<<gdim, bdim, 0, stream>>>( idata, nbatch, channels, input_width, output_width, odata, scale_factor); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } template<nn_compute_source_index_fn_t nn_bw_compute_source_index_fn> static void upsample_nearest1d_backward_out_cuda_template( const Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_nearest1d_backward_out_cuda_template", {grad_output_arg, grad_input_arg}); int output_width = output_size[0]; int nbatch = input_size[0]; int channels = input_size[1]; int input_width = input_size[2]; Tensor grad_output = grad_output_.contiguous(); if (grad_input.numel() == 0) { return; } // upsample_nearest1d meta call makes sure `nbatch != 0` unsigned int n = grad_input.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{ceil_div(n, bdim.x)}; // safe check for int32 indexing; implicitly restrict launch config for kernel TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest1d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.data_ptr<scalar_t>(); auto odata = grad_output.data_ptr<scalar_t>(); const float scale_factor = compute_scales_value_backwards<float>(scales, output_width, input_width); upsample_nearest1d_backward_out_frame<scalar_t, accscalar_t, nn_bw_compute_source_index_fn> <<<gdim, bdim, 0, stream>>>( odata, nbatch, channels, output_width, input_width, idata, scale_factor); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } // namespace TORCH_IMPL_FUNC(upsample_nearest1d_out_cuda) ( const Tensor& input, IntArrayRef output_size, c10::optional<double> scales, const Tensor& output ) { upsample_nearest1d_out_cuda_template<nearest_neighbor_compute_source_index>( output, input, output_size, scales); } TORCH_IMPL_FUNC(_upsample_nearest_exact1d_out_cuda) ( const Tensor& input, IntArrayRef output_size, c10::optional<double> scales, const Tensor& output ) { upsample_nearest1d_out_cuda_template<nearest_neighbor_exact_compute_source_index>(output, input, output_size, scales); } TORCH_IMPL_FUNC(upsample_nearest1d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales, const Tensor& grad_input ) { upsample_nearest1d_backward_out_cuda_template<nearest_neighbor_bw_compute_source_index>( grad_input, grad_output, output_size, input_size, scales); } TORCH_IMPL_FUNC(_upsample_nearest_exact1d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales, const Tensor& grad_input ) { upsample_nearest1d_backward_out_cuda_template<nearest_neighbor_exact_bw_compute_source_index>( grad_input, grad_output, output_size, input_size, scales); } } // namespace at::native
742954c66582fe2a36d5f496fa213a7c10ba3ed4.hip
// !!! This is a file automatically generated by hipify!!! // Utilities and system includes #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP #define SIZE 60000000 #define TILE_DIM 1024 #define INNER_REPS 16 template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; T ra, rb, rc, rd; if (xIndex < SIZE) { ra=A[xIndex]; rb=A[SIZE-xIndex]; rc=A[xIndex]; rd=A[SIZE-xIndex]; // rb=A[xIndex]; #pragma unroll 16 for (int i=0;i<INNER_REPS;i++) { ra=ra*rb; rb=rb*rc; rc=rc*rd; rd=rd*ra; } C1[xIndex]=ra; C2[xIndex]=rb; C3[xIndex]=rc; C4[xIndex]=rd; } } int main(int argc, char **argv) { int outer_reps, vector_size, tile_dim; vector_size = SIZE; tile_dim = TILE_DIM; if (argc>1){ outer_reps = atoi(argv[1]); }else{ outer_reps = 1; } // execution configuration parameters dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1); // CUDA events hipEvent_t start, stop; size_t mem_size = static_cast<size_t>(sizeof(float) * vector_size); // allocate host memory float *h_iA = (float *) malloc(mem_size); float *h_oC1 = (float *) malloc(mem_size); float *h_oC2 = (float *) malloc(mem_size); float *h_oC3 = (float *) malloc(mem_size); float *h_oC4 = (float *) malloc(mem_size); // initalize host data for (int i = 0; i < vector_size; ++i) { h_iA[i] = (float) i+3; // h_iB[i] = (float) i+3; } // allocate device memory float *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4; hipMalloc((void **) &d_iA, mem_size); // hipMalloc((void **) &d_iB, mem_size); hipMalloc((void **) &d_oC1, mem_size); hipMalloc((void **) &d_oC2, mem_size); hipMalloc((void **) &d_oC3, mem_size); hipMalloc((void **) &d_oC4, mem_size); // copy host data to device hipMemcpy(d_iA, h_iA, mem_size, hipMemcpyHostToDevice); // hipMemcpy(d_iB, h_iB, mem_size, hipMemcpyHostToDevice); // print out common data for all kernels printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x); // initialize events hipEventCreate(&start); hipEventCreate(&stop); // take measurements for loop over kernel launches hipEventRecord(start, 0); for (int i=0; i < outer_reps; i++) { hipLaunchKernelGGL(( simpleKernel<float>), dim3(grid), dim3(threads), 0, 0, d_iA, d_oC1, d_oC2, d_oC3, d_oC4); } hipEventRecord(stop, 0); hipEventSynchronize(stop); float kernelTime; hipEventElapsedTime(&kernelTime, start, stop); // take measurements for loop inside kernel hipMemcpy(h_oC1, d_oC1, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC2, d_oC2, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC3, d_oC3, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC4, d_oC4, mem_size, hipMemcpyDeviceToHost); printf("teste: %f\n", h_oC1[0]); // report effective bandwidths float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps); printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", kernelBandwidth, kernelTime/outer_reps, vector_size, 1, tile_dim * 1); free(h_iA); // free(h_iB); free(h_oC1); free(h_oC2); free(h_oC3); free(h_oC4); hipFree(d_iA); // hipFree(d_iB); hipFree(d_oC1); hipFree(d_oC2); hipFree(d_oC3); hipFree(d_oC4); hipEventDestroy(start); hipEventDestroy(stop); hipDeviceReset(); printf("Test passed\n"); exit(EXIT_SUCCESS); }
742954c66582fe2a36d5f496fa213a7c10ba3ed4.cu
// Utilities and system includes #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_profiler_api.h> #define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP #define SIZE 60000000 #define TILE_DIM 1024 #define INNER_REPS 16 template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; T ra, rb, rc, rd; if (xIndex < SIZE) { ra=A[xIndex]; rb=A[SIZE-xIndex]; rc=A[xIndex]; rd=A[SIZE-xIndex]; // rb=A[xIndex]; #pragma unroll 16 for (int i=0;i<INNER_REPS;i++) { ra=ra*rb; rb=rb*rc; rc=rc*rd; rd=rd*ra; } C1[xIndex]=ra; C2[xIndex]=rb; C3[xIndex]=rc; C4[xIndex]=rd; } } int main(int argc, char **argv) { int outer_reps, vector_size, tile_dim; vector_size = SIZE; tile_dim = TILE_DIM; if (argc>1){ outer_reps = atoi(argv[1]); }else{ outer_reps = 1; } // execution configuration parameters dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1); // CUDA events cudaEvent_t start, stop; size_t mem_size = static_cast<size_t>(sizeof(float) * vector_size); // allocate host memory float *h_iA = (float *) malloc(mem_size); float *h_oC1 = (float *) malloc(mem_size); float *h_oC2 = (float *) malloc(mem_size); float *h_oC3 = (float *) malloc(mem_size); float *h_oC4 = (float *) malloc(mem_size); // initalize host data for (int i = 0; i < vector_size; ++i) { h_iA[i] = (float) i+3; // h_iB[i] = (float) i+3; } // allocate device memory float *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4; cudaMalloc((void **) &d_iA, mem_size); // cudaMalloc((void **) &d_iB, mem_size); cudaMalloc((void **) &d_oC1, mem_size); cudaMalloc((void **) &d_oC2, mem_size); cudaMalloc((void **) &d_oC3, mem_size); cudaMalloc((void **) &d_oC4, mem_size); // copy host data to device cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice); // cudaMemcpy(d_iB, h_iB, mem_size, cudaMemcpyHostToDevice); // print out common data for all kernels printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x); // initialize events cudaEventCreate(&start); cudaEventCreate(&stop); // take measurements for loop over kernel launches cudaEventRecord(start, 0); for (int i=0; i < outer_reps; i++) { simpleKernel<float><<<grid, threads>>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float kernelTime; cudaEventElapsedTime(&kernelTime, start, stop); // take measurements for loop inside kernel cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC2, d_oC2, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC3, d_oC3, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC4, d_oC4, mem_size, cudaMemcpyDeviceToHost); printf("teste: %f\n", h_oC1[0]); // report effective bandwidths float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps); printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", kernelBandwidth, kernelTime/outer_reps, vector_size, 1, tile_dim * 1); free(h_iA); // free(h_iB); free(h_oC1); free(h_oC2); free(h_oC3); free(h_oC4); cudaFree(d_iA); // cudaFree(d_iB); cudaFree(d_oC1); cudaFree(d_oC2); cudaFree(d_oC3); cudaFree(d_oC4); cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); printf("Test passed\n"); exit(EXIT_SUCCESS); }
simpleGLES_screen.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // Copyright 2014 - 2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* This example demonstrates how to use the CUDA C bindings to OpenGL ES to dynamically modify a vertex buffer using a CUDA C kernel. The steps are: 1. Create an empty vertex buffer object (VBO) 2. Register the VBO with CUDA C 3. Map the VBO for writing from CUDA C 4. Run CUDA C kernel to modify the vertex positions 5. Unmap the VBO 6. Render the results using OpenGL ES Host code */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <stdarg.h> #include <unistd.h> #include <screen/screen.h> #include "graphics_interface.c" // includes, cuda #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h #include <timer.h> // timing functions // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check #include <hip/hip_vector_types.h> void checkCUDAError() { hipError_t res = hipGetLastError(); if (res != hipSuccess) { fprintf(stderr, "Line %d: CUDA Error (%d): %s\n", __LINE__, res, hipGetErrorString(res)); hipDeviceReset(); exit(1); } } #define MAX_EPSILON_ERROR 0.0f #define THRESHOLD 0.0f #define REFRESH_DELAY 1 //ms #define GUI_IDLE 0x100 #define GUI_ROTATE 0x101 #define GUI_TRANSLATE 0x102 int gui_mode; //////////////////////////////////////////////////////////////////////////////// // Default configuration unsigned int window_width = 512; unsigned int window_height = 512; unsigned int dispno = 0; // constants const unsigned int mesh_width = 256; const unsigned int mesh_height = 256; // OpenGL ES variables and interop with CUDA C GLuint mesh_vao, mesh_vbo; struct cudaGraphicsResource *cuda_vbo_resource; void *d_vbo_buffer = NULL; float g_fAnim = 0.0; // UI / mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -3.0; StopWatchInterface *timer = NULL; // Frame statistics int frame; int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling int g_Index = 0; float avgFPS = 0.0f; unsigned int frameCount = 0; unsigned int g_TotalErrors = 0; // The default number of seconds after which the test will end. #define TIME_LIMIT 10.0 // 10 secs // Flag indicating it is time to shut down static GLboolean shutdown = GL_FALSE; // Callback to close window static void closeCB_app(void) { shutdown = GL_TRUE; } // Callback to handle key presses static void keyCB_app(char key, int state) { // Ignoring releases if (!state) return; if ((key == 'q') || (key == 'Q') || (key == NvGlDemoKeyCode_Escape)) shutdown = GL_TRUE; } // Auto-Verification Code bool g_bQAReadback = false; int *pArgc = NULL; char **pArgv = NULL; #define MAX(a,b) ((a > b) ? a : b) //////////////////////////////////////////////////////////////////////////////// // declaration, forward // CUDA functionality void runCuda(struct cudaGraphicsResource **vbo_resource); void runAutoTest(int devID, char **argv, char *ref_file); void checkResultCuda(int argc, char **argv, const GLuint &vbo); const char *sSDKsample = "simpleGLES on Screen (VBO)"; void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)MAX(avgFPS, 1.f); sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "Cuda/OpenGL ES Interop (VBO): %3.1f fps (Max 1000 fps)", avgFPS); graphics_set_windowtitle(fps); } /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; // write output vertex pos[y*width+x] = make_float4(u, w, v, 1.0f); } void launch_kernel(float4 *pos, unsigned int mesh_width, unsigned int mesh_height, float time) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); hipLaunchKernelGGL(( simple_vbo_kernel), dim3(grid), dim3(block), 0, 0, pos, mesh_width, mesh_height, time); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource) { // map OpenGL buffer object for writing from CUDA float4 *dptr; hipGraphicsMapResources(1, vbo_resource, 0); size_t num_bytes; hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource); launch_kernel(dptr, mesh_width, mesh_height, g_fAnim); // unmap buffer object hipGraphicsUnmapResources(1, vbo_resource, 0); } #ifndef FOPEN #define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode)) #endif void sdkDumpBin2(void *data, unsigned int bytes, const char *filename) { printf("sdkDumpBin: <%s>\n", filename); FILE *fp; FOPEN(fp, filename, "wb"); fwrite(data, bytes, 1, fp); fflush(fp); fclose(fp); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runAutoTest(int devID, char **argv, char *ref_file) { char *reference_file = NULL; void *imageData = malloc(mesh_width*mesh_height*sizeof(float)); // execute the kernel launch_kernel((float4 *)d_vbo_buffer, mesh_width, mesh_height, g_fAnim); hipDeviceSynchronize(); getLastCudaError("launch_kernel failed"); hipMemcpy(imageData, d_vbo_buffer, mesh_width*mesh_height*sizeof(float), hipMemcpyDeviceToHost); sdkDumpBin2(imageData, mesh_width*mesh_height*sizeof(float), "simpleGLES_screen.bin"); reference_file = sdkFindFilePath(ref_file, argv[0]); if (reference_file && !sdkCompareBin2BinFloat("simpleGLES_screen.bin", reference_file, mesh_width*mesh_height*sizeof(float), MAX_EPSILON_ERROR, THRESHOLD, pArgv[0])) { g_TotalErrors++; } } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display_thisframe(float time_delta) { sdkStartTimer(&timer); // run CUDA kernel to generate vertex positions runCuda(&cuda_vbo_resource); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height); glFinish(); g_fAnim += time_delta; sdkStopTimer(&timer); computeFPS(); } //////////////////////////////////////////////////////////////////////////////// //! Check if the result is correct or write data to file for external //! regression testing //////////////////////////////////////////////////////////////////////////////// void checkResultCuda(int argc, char **argv, const GLuint &vbo) { if (!d_vbo_buffer) { printf("%s: Mapping result buffer from OpenGL ES\n", __FUNCTION__); hipGraphicsUnregisterResource(cuda_vbo_resource); // map buffer object glBindBuffer(GL_ARRAY_BUFFER, vbo); float *data = (float *) glMapBufferRange(GL_ARRAY_BUFFER, 0, mesh_width * mesh_height * 4 * sizeof(float), GL_READ_ONLY); // check result if (checkCmdLineFlag(argc, (const char **) argv, "regression")) { // write file for regression test sdkWriteFile<float>("./data/regression.dat", data, mesh_width * mesh_height * 3, 0.0, false); } // unmap GL buffer object if (!glUnmapBuffer(GL_ARRAY_BUFFER)) { fprintf(stderr, "Unmap buffer failed.\n"); fflush(stderr); } checkCudaErrors(hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo, hipGraphicsMapFlagsWriteDiscard)); CHECK_GLERROR(); } } GLuint mesh_shader = 0; void readAndCompileShaderFromGLSLFile(GLuint new_shaderprogram, const char *filename, GLenum shaderType) { FILE *file = fopen(filename,"rb"); // open shader text file if (!file) { error_exit("Filename %s does not exist\n", filename); } // get the size of the file and read it fseek(file,0,SEEK_END); GLint size = ftell(file); char *data = (char*)malloc(sizeof(char)*(size + 1)); memset(data, 0, sizeof(char)*(size + 1)); fseek(file,0,SEEK_SET); size_t res = fread(data,1,size,file); fclose(file); GLuint shader = glCreateShader(shaderType); glShaderSource(shader, 1, (const GLchar**)&data, &size); glCompileShader(shader); CHECK_GLERROR(); GLint compile_success = 0; glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_success); CHECK_GLERROR(); if (compile_success == GL_FALSE) { printf("Compilation of %s failed!\n Reason:\n", filename); GLint maxLength = 0; glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &maxLength); char errorLog[maxLength]; glGetShaderInfoLog(shader, maxLength, &maxLength, &errorLog[0]); printf("%s", errorLog); glDeleteShader(shader); exit(1); } glAttachShader(new_shaderprogram, shader); glDeleteShader(shader); free(data); } GLuint ShaderCreate(const char *vshader_filename, const char *fshader_filename) { printf("Loading GLSL shaders %s %s\n", vshader_filename, fshader_filename); GLuint new_shaderprogram = glCreateProgram(); CHECK_GLERROR(); if (vshader_filename) { readAndCompileShaderFromGLSLFile(new_shaderprogram, vshader_filename, GL_VERTEX_SHADER); } CHECK_GLERROR(); if (fshader_filename) { readAndCompileShaderFromGLSLFile(new_shaderprogram, fshader_filename, GL_FRAGMENT_SHADER); } CHECK_GLERROR(); glLinkProgram(new_shaderprogram); CHECK_GLERROR(); GLint link_success; glGetProgramiv(new_shaderprogram, GL_LINK_STATUS, &link_success); if (link_success == GL_FALSE) { printf("Linking of %s with %s failed!\n Reason:\n", vshader_filename, fshader_filename); GLint maxLength = 0; glGetShaderiv(new_shaderprogram, GL_INFO_LOG_LENGTH, &maxLength); char errorLog[maxLength]; glGetShaderInfoLog(new_shaderprogram, maxLength, &maxLength, &errorLog[0]); printf("%s", errorLog); exit(EXIT_FAILURE); } return new_shaderprogram; } //=========================================================================== // InitGraphicsState() - initialize OpenGL //=========================================================================== static void InitGraphicsState(void) { char *GL_version=(char *)glGetString(GL_VERSION); char *GL_vendor=(char *)glGetString(GL_VENDOR); char *GL_renderer=(char *)glGetString(GL_RENDERER); printf("Version: %s\n", GL_version); printf("Vendor: %s\n", GL_vendor); printf("Renderer: %s\n", GL_renderer); // RENDERING SETUP (OpenGL ES or OpenGL Core Profile!) glGenVertexArrays(1, &mesh_vao); // Features' Vertex Array Object allocation glBindVertexArray(mesh_vao); // bind VAO // initialize buffer object glGenBuffers(1, &mesh_vbo); glBindBuffer(GL_ARRAY_BUFFER, mesh_vbo); unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, NULL, GL_DYNAMIC_DRAW); glVertexAttribPointer((GLuint)0, 4, GL_FLOAT, GL_FALSE, 0, 0); glEnableVertexAttribArray(0); hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, mesh_vbo, hipGraphicsMapFlagsNone); checkCUDAError(); // GLSL stuff char *vertex_shader_path = sdkFindFilePath("mesh.vert.glsl", pArgv[0]); char *fragment_shader_path = sdkFindFilePath("mesh.frag.glsl", pArgv[0]); if (vertex_shader_path == NULL || fragment_shader_path == NULL) { printf("Error finding shader file\n"); exit(EXIT_FAILURE); } mesh_shader = ShaderCreate(vertex_shader_path, fragment_shader_path); CHECK_GLERROR(); free(vertex_shader_path); free(fragment_shader_path); glUseProgram(mesh_shader); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// bool runTest(int argc, char **argv, char *ref_file) { // command line mode only if (ref_file != NULL) { // This will pick the best possible CUDA capable device int devID = findCudaDevice(argc, (const char **)argv); // create VBO checkCudaErrors(hipMalloc((void **)&d_vbo_buffer, mesh_width*mesh_height*4*sizeof(float))); // run the cuda part runAutoTest(devID, argv, ref_file); // check result of Cuda step checkResultCuda(argc, argv, mesh_vbo); hipFree(d_vbo_buffer); d_vbo_buffer = NULL; } else { double endTime = TIME_LIMIT; // this would use command-line specified CUDA device, note that CUDA defaults to highest Gflops/s device if (checkCmdLineFlag(argc, (const char **)argv, "device")) { error_exit("Device setting not yet implemented!\n"); } // display selection if (checkCmdLineFlag(argc, (const char **)argv, "dispno")) { dispno = getCmdLineArgumentInt(argc, (const char **)argv, "dispno"); } // Window width if (checkCmdLineFlag(argc, (const char **)argv, "width")) { window_width = getCmdLineArgumentInt(argc, (const char **)argv, "width"); } // Window Height if (checkCmdLineFlag(argc, (const char **)argv, "height")) { window_height = getCmdLineArgumentInt(argc, (const char **)argv, "height"); } // Determine how long to run for in secs: default is 10s if (checkCmdLineFlag(argc, (const char **)argv, "runtime")) { endTime = getCmdLineArgumentInt(argc, (const char **)argv, "runtime"); } SetCloseCB(closeCB_app); SetKeyCB(keyCB_app); // create QNX screen window and set up associated OpenGL ES context graphics_setup_window(0,0, window_width, window_height, sSDKsample, dispno); InitGraphicsState(); // set up GLES stuff glClearColor( 0, 0.5, 1, 1 ); // blue-ish background glClear( GL_COLOR_BUFFER_BIT ); graphics_swap_buffers(); int frame = 0; struct timeval begin, end; gettimeofday(&begin, NULL); // Print runtime if (endTime < 0.0) { endTime = TIME_LIMIT; printf(" running forever...\n"); } else { printf(" running for %f seconds...\n", endTime); } while (!shutdown) { frame++; display_thisframe(0.010); usleep(1000); graphics_swap_buffers(); CheckEvents(); gettimeofday(&end, 0); double elapsed = (end.tv_sec - begin.tv_sec) + ((end.tv_usec - begin.tv_usec)/1000000.0); // Check whether time limit has been exceeded if (!shutdown) shutdown = (endTime <= elapsed); } // NOTE: Before destroying OpenGL ES context, must unregister all shared resources from CUDA ! checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource)); graphics_close_window(); // close window and destroy OpenGL ES context } return true; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { char *ref_file = NULL; pArgc = &argc; pArgv = argv; #if defined(__linux__) setenv ("DISPLAY", ":0", 0); #endif printf("%s starting...\n", sSDKsample); if (argc > 1) { if (checkCmdLineFlag(argc, (const char **)argv, "file")) { // In this mode, we run without OpenGL and see if VBO is generated correctly getCmdLineArgumentString(argc, (const char **)argv, "file", (char **)&ref_file); } } printf("\n"); runTest(argc, argv, ref_file); printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!"); exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE); }
simpleGLES_screen.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 2014 - 2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* This example demonstrates how to use the CUDA C bindings to OpenGL ES to dynamically modify a vertex buffer using a CUDA C kernel. The steps are: 1. Create an empty vertex buffer object (VBO) 2. Register the VBO with CUDA C 3. Map the VBO for writing from CUDA C 4. Run CUDA C kernel to modify the vertex positions 5. Unmap the VBO 6. Render the results using OpenGL ES Host code */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <stdarg.h> #include <unistd.h> #include <screen/screen.h> #include "graphics_interface.c" // includes, cuda #include <cuda_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h #include <timer.h> // timing functions // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check #include <vector_types.h> void checkCUDAError() { cudaError_t res = cudaGetLastError(); if (res != cudaSuccess) { fprintf(stderr, "Line %d: CUDA Error (%d): %s\n", __LINE__, res, cudaGetErrorString(res)); cudaThreadExit(); exit(1); } } #define MAX_EPSILON_ERROR 0.0f #define THRESHOLD 0.0f #define REFRESH_DELAY 1 //ms #define GUI_IDLE 0x100 #define GUI_ROTATE 0x101 #define GUI_TRANSLATE 0x102 int gui_mode; //////////////////////////////////////////////////////////////////////////////// // Default configuration unsigned int window_width = 512; unsigned int window_height = 512; unsigned int dispno = 0; // constants const unsigned int mesh_width = 256; const unsigned int mesh_height = 256; // OpenGL ES variables and interop with CUDA C GLuint mesh_vao, mesh_vbo; struct cudaGraphicsResource *cuda_vbo_resource; void *d_vbo_buffer = NULL; float g_fAnim = 0.0; // UI / mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -3.0; StopWatchInterface *timer = NULL; // Frame statistics int frame; int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling int g_Index = 0; float avgFPS = 0.0f; unsigned int frameCount = 0; unsigned int g_TotalErrors = 0; // The default number of seconds after which the test will end. #define TIME_LIMIT 10.0 // 10 secs // Flag indicating it is time to shut down static GLboolean shutdown = GL_FALSE; // Callback to close window static void closeCB_app(void) { shutdown = GL_TRUE; } // Callback to handle key presses static void keyCB_app(char key, int state) { // Ignoring releases if (!state) return; if ((key == 'q') || (key == 'Q') || (key == NvGlDemoKeyCode_Escape)) shutdown = GL_TRUE; } // Auto-Verification Code bool g_bQAReadback = false; int *pArgc = NULL; char **pArgv = NULL; #define MAX(a,b) ((a > b) ? a : b) //////////////////////////////////////////////////////////////////////////////// // declaration, forward // CUDA functionality void runCuda(struct cudaGraphicsResource **vbo_resource); void runAutoTest(int devID, char **argv, char *ref_file); void checkResultCuda(int argc, char **argv, const GLuint &vbo); const char *sSDKsample = "simpleGLES on Screen (VBO)"; void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)MAX(avgFPS, 1.f); sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "Cuda/OpenGL ES Interop (VBO): %3.1f fps (Max 1000 fps)", avgFPS); graphics_set_windowtitle(fps); } /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; // write output vertex pos[y*width+x] = make_float4(u, w, v, 1.0f); } void launch_kernel(float4 *pos, unsigned int mesh_width, unsigned int mesh_height, float time) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); simple_vbo_kernel<<< grid, block>>>(pos, mesh_width, mesh_height, time); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource) { // map OpenGL buffer object for writing from CUDA float4 *dptr; cudaGraphicsMapResources(1, vbo_resource, 0); size_t num_bytes; cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource); launch_kernel(dptr, mesh_width, mesh_height, g_fAnim); // unmap buffer object cudaGraphicsUnmapResources(1, vbo_resource, 0); } #ifndef FOPEN #define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode)) #endif void sdkDumpBin2(void *data, unsigned int bytes, const char *filename) { printf("sdkDumpBin: <%s>\n", filename); FILE *fp; FOPEN(fp, filename, "wb"); fwrite(data, bytes, 1, fp); fflush(fp); fclose(fp); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runAutoTest(int devID, char **argv, char *ref_file) { char *reference_file = NULL; void *imageData = malloc(mesh_width*mesh_height*sizeof(float)); // execute the kernel launch_kernel((float4 *)d_vbo_buffer, mesh_width, mesh_height, g_fAnim); cudaDeviceSynchronize(); getLastCudaError("launch_kernel failed"); cudaMemcpy(imageData, d_vbo_buffer, mesh_width*mesh_height*sizeof(float), cudaMemcpyDeviceToHost); sdkDumpBin2(imageData, mesh_width*mesh_height*sizeof(float), "simpleGLES_screen.bin"); reference_file = sdkFindFilePath(ref_file, argv[0]); if (reference_file && !sdkCompareBin2BinFloat("simpleGLES_screen.bin", reference_file, mesh_width*mesh_height*sizeof(float), MAX_EPSILON_ERROR, THRESHOLD, pArgv[0])) { g_TotalErrors++; } } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display_thisframe(float time_delta) { sdkStartTimer(&timer); // run CUDA kernel to generate vertex positions runCuda(&cuda_vbo_resource); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height); glFinish(); g_fAnim += time_delta; sdkStopTimer(&timer); computeFPS(); } //////////////////////////////////////////////////////////////////////////////// //! Check if the result is correct or write data to file for external //! regression testing //////////////////////////////////////////////////////////////////////////////// void checkResultCuda(int argc, char **argv, const GLuint &vbo) { if (!d_vbo_buffer) { printf("%s: Mapping result buffer from OpenGL ES\n", __FUNCTION__); cudaGraphicsUnregisterResource(cuda_vbo_resource); // map buffer object glBindBuffer(GL_ARRAY_BUFFER, vbo); float *data = (float *) glMapBufferRange(GL_ARRAY_BUFFER, 0, mesh_width * mesh_height * 4 * sizeof(float), GL_READ_ONLY); // check result if (checkCmdLineFlag(argc, (const char **) argv, "regression")) { // write file for regression test sdkWriteFile<float>("./data/regression.dat", data, mesh_width * mesh_height * 3, 0.0, false); } // unmap GL buffer object if (!glUnmapBuffer(GL_ARRAY_BUFFER)) { fprintf(stderr, "Unmap buffer failed.\n"); fflush(stderr); } checkCudaErrors(cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo, cudaGraphicsMapFlagsWriteDiscard)); CHECK_GLERROR(); } } GLuint mesh_shader = 0; void readAndCompileShaderFromGLSLFile(GLuint new_shaderprogram, const char *filename, GLenum shaderType) { FILE *file = fopen(filename,"rb"); // open shader text file if (!file) { error_exit("Filename %s does not exist\n", filename); } // get the size of the file and read it fseek(file,0,SEEK_END); GLint size = ftell(file); char *data = (char*)malloc(sizeof(char)*(size + 1)); memset(data, 0, sizeof(char)*(size + 1)); fseek(file,0,SEEK_SET); size_t res = fread(data,1,size,file); fclose(file); GLuint shader = glCreateShader(shaderType); glShaderSource(shader, 1, (const GLchar**)&data, &size); glCompileShader(shader); CHECK_GLERROR(); GLint compile_success = 0; glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_success); CHECK_GLERROR(); if (compile_success == GL_FALSE) { printf("Compilation of %s failed!\n Reason:\n", filename); GLint maxLength = 0; glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &maxLength); char errorLog[maxLength]; glGetShaderInfoLog(shader, maxLength, &maxLength, &errorLog[0]); printf("%s", errorLog); glDeleteShader(shader); exit(1); } glAttachShader(new_shaderprogram, shader); glDeleteShader(shader); free(data); } GLuint ShaderCreate(const char *vshader_filename, const char *fshader_filename) { printf("Loading GLSL shaders %s %s\n", vshader_filename, fshader_filename); GLuint new_shaderprogram = glCreateProgram(); CHECK_GLERROR(); if (vshader_filename) { readAndCompileShaderFromGLSLFile(new_shaderprogram, vshader_filename, GL_VERTEX_SHADER); } CHECK_GLERROR(); if (fshader_filename) { readAndCompileShaderFromGLSLFile(new_shaderprogram, fshader_filename, GL_FRAGMENT_SHADER); } CHECK_GLERROR(); glLinkProgram(new_shaderprogram); CHECK_GLERROR(); GLint link_success; glGetProgramiv(new_shaderprogram, GL_LINK_STATUS, &link_success); if (link_success == GL_FALSE) { printf("Linking of %s with %s failed!\n Reason:\n", vshader_filename, fshader_filename); GLint maxLength = 0; glGetShaderiv(new_shaderprogram, GL_INFO_LOG_LENGTH, &maxLength); char errorLog[maxLength]; glGetShaderInfoLog(new_shaderprogram, maxLength, &maxLength, &errorLog[0]); printf("%s", errorLog); exit(EXIT_FAILURE); } return new_shaderprogram; } //=========================================================================== // InitGraphicsState() - initialize OpenGL //=========================================================================== static void InitGraphicsState(void) { char *GL_version=(char *)glGetString(GL_VERSION); char *GL_vendor=(char *)glGetString(GL_VENDOR); char *GL_renderer=(char *)glGetString(GL_RENDERER); printf("Version: %s\n", GL_version); printf("Vendor: %s\n", GL_vendor); printf("Renderer: %s\n", GL_renderer); // RENDERING SETUP (OpenGL ES or OpenGL Core Profile!) glGenVertexArrays(1, &mesh_vao); // Features' Vertex Array Object allocation glBindVertexArray(mesh_vao); // bind VAO // initialize buffer object glGenBuffers(1, &mesh_vbo); glBindBuffer(GL_ARRAY_BUFFER, mesh_vbo); unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, NULL, GL_DYNAMIC_DRAW); glVertexAttribPointer((GLuint)0, 4, GL_FLOAT, GL_FALSE, 0, 0); glEnableVertexAttribArray(0); cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, mesh_vbo, cudaGraphicsMapFlagsNone); checkCUDAError(); // GLSL stuff char *vertex_shader_path = sdkFindFilePath("mesh.vert.glsl", pArgv[0]); char *fragment_shader_path = sdkFindFilePath("mesh.frag.glsl", pArgv[0]); if (vertex_shader_path == NULL || fragment_shader_path == NULL) { printf("Error finding shader file\n"); exit(EXIT_FAILURE); } mesh_shader = ShaderCreate(vertex_shader_path, fragment_shader_path); CHECK_GLERROR(); free(vertex_shader_path); free(fragment_shader_path); glUseProgram(mesh_shader); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// bool runTest(int argc, char **argv, char *ref_file) { // command line mode only if (ref_file != NULL) { // This will pick the best possible CUDA capable device int devID = findCudaDevice(argc, (const char **)argv); // create VBO checkCudaErrors(cudaMalloc((void **)&d_vbo_buffer, mesh_width*mesh_height*4*sizeof(float))); // run the cuda part runAutoTest(devID, argv, ref_file); // check result of Cuda step checkResultCuda(argc, argv, mesh_vbo); cudaFree(d_vbo_buffer); d_vbo_buffer = NULL; } else { double endTime = TIME_LIMIT; // this would use command-line specified CUDA device, note that CUDA defaults to highest Gflops/s device if (checkCmdLineFlag(argc, (const char **)argv, "device")) { error_exit("Device setting not yet implemented!\n"); } // display selection if (checkCmdLineFlag(argc, (const char **)argv, "dispno")) { dispno = getCmdLineArgumentInt(argc, (const char **)argv, "dispno"); } // Window width if (checkCmdLineFlag(argc, (const char **)argv, "width")) { window_width = getCmdLineArgumentInt(argc, (const char **)argv, "width"); } // Window Height if (checkCmdLineFlag(argc, (const char **)argv, "height")) { window_height = getCmdLineArgumentInt(argc, (const char **)argv, "height"); } // Determine how long to run for in secs: default is 10s if (checkCmdLineFlag(argc, (const char **)argv, "runtime")) { endTime = getCmdLineArgumentInt(argc, (const char **)argv, "runtime"); } SetCloseCB(closeCB_app); SetKeyCB(keyCB_app); // create QNX screen window and set up associated OpenGL ES context graphics_setup_window(0,0, window_width, window_height, sSDKsample, dispno); InitGraphicsState(); // set up GLES stuff glClearColor( 0, 0.5, 1, 1 ); // blue-ish background glClear( GL_COLOR_BUFFER_BIT ); graphics_swap_buffers(); int frame = 0; struct timeval begin, end; gettimeofday(&begin, NULL); // Print runtime if (endTime < 0.0) { endTime = TIME_LIMIT; printf(" running forever...\n"); } else { printf(" running for %f seconds...\n", endTime); } while (!shutdown) { frame++; display_thisframe(0.010); usleep(1000); graphics_swap_buffers(); CheckEvents(); gettimeofday(&end, 0); double elapsed = (end.tv_sec - begin.tv_sec) + ((end.tv_usec - begin.tv_usec)/1000000.0); // Check whether time limit has been exceeded if (!shutdown) shutdown = (endTime <= elapsed); } // NOTE: Before destroying OpenGL ES context, must unregister all shared resources from CUDA ! checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource)); graphics_close_window(); // close window and destroy OpenGL ES context } return true; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { char *ref_file = NULL; pArgc = &argc; pArgv = argv; #if defined(__linux__) setenv ("DISPLAY", ":0", 0); #endif printf("%s starting...\n", sSDKsample); if (argc > 1) { if (checkCmdLineFlag(argc, (const char **)argv, "file")) { // In this mode, we run without OpenGL and see if VBO is generated correctly getCmdLineArgumentString(argc, (const char **)argv, "file", (char **)&ref_file); } } printf("\n"); runTest(argc, argv, ref_file); printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!"); exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE); }
5d5f7be73c42990832e5fbceb59a44b867e3d6da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> hipError_t forwardPass(double *x1, double *y1, double *W1, double *x2, double *y2, double *W2, int row, int column); __global__ void vectorMultiplicationKernel(double *x, double *y, double *W, int row, int column) { int tid = blockIdx.x; if (tid >= row) { return; } double result = 0; for (int j = 0; j < column; j++) { result += W[tid * column + j] * x[j]; } for (int j = 0; j < 10000; j++) { for (int k = 0; k < 10000; k++) { result++; result--; } } y[tid] = result; } int main(int argc, char *argv[]) { int row = atoi(argv[1]); int column = atoi(argv[2]); double *W1 = (double*)malloc(row * column * sizeof(double)); double *x1 = (double*)malloc(column * sizeof(double)); double *y1 = (double*)malloc(row * sizeof(double)); double *W2 = (double*)malloc(row * column * sizeof(double)); double *x2 = (double*)malloc(column * sizeof(double)); double *y2 = (double*)malloc(row * sizeof(double)); for (int i = 0; i < column; i++) { x1[i] = 10; } for (int i = 0; i < row * column; i++) { W1[i] = 10; } for (int i = 0; i < column; i++) { x2[i] = 10; } for (int i = 0; i < row * column; i++) { W2[i] = 10; } hipError_t cudaStatus = forwardPass(x1, y1, W1, x2, y2, W2, row, column); if (cudaStatus != hipSuccess) { fprintf(stderr, "vectorMultiplicationWithCuda failed!"); return 1; } for (int i = 0; i < row; i++) { printf("%.2f ", y1[i]); } printf("\n"); for (int i = 0; i < row; i++) { printf("%.2f ", y2[i]); } printf("\n"); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } hipError_t forwardPass(double *x1, double *y1, double *W1, double *x2, double *y2, double *W2, int row, int column) { double *dev_x1 = 0; double *dev_y1 = 0; double *dev_W1 = 0; double *dev_x2 = 0; double *dev_y2 = 0; double *dev_W2 = 0; hipError_t cudaStatus; // Allocate GPU buffers for three vectors (two input, one output) . hipMalloc((void**)&dev_x1, column * sizeof(double)); hipMalloc((void**)&dev_y1, row * sizeof(double)); hipMalloc((void**)&dev_W1, row * column * sizeof(double)); hipMemcpy(dev_x1, x1, column * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_W1, W1, row * column * sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**)&dev_x2, column * sizeof(double)); hipMalloc((void**)&dev_y2, row * sizeof(double)); hipMalloc((void**)&dev_W2, row * column * sizeof(double)); hipMemcpy(dev_x2, x2, column * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_W2, W2, row * column * sizeof(double), hipMemcpyHostToDevice); //Launch a kernel on the GPU with one thread for each element. hipStream_t s1; hipStream_t s2; hipStreamCreate(&s1); hipLaunchKernelGGL(( vectorMultiplicationKernel), dim3(row), dim3(1), 0, s1, dev_x1, dev_y1, dev_W1, row, column); hipStreamCreate(&s2); hipLaunchKernelGGL(( vectorMultiplicationKernel), dim3(row), dim3(1), 0, s2, dev_x2, dev_y2, dev_W2, row, column); // vectorMultiplicationKernel<<<row, 1>>>(dev_x1, dev_y1, dev_W1, row, column); // vectorMultiplicationKernel<<<row, 1>>>(dev_x2, dev_y2, dev_W2, row, column); // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(y1, dev_y1, row * sizeof(double), hipMemcpyDeviceToHost); cudaStatus = hipMemcpy(y2, dev_y2, row * sizeof(double), hipMemcpyDeviceToHost); hipFree(dev_x1); hipFree(dev_y1); hipFree(dev_W1); hipFree(dev_x2); hipFree(dev_y2); hipFree(dev_W2); return cudaStatus; }
5d5f7be73c42990832e5fbceb59a44b867e3d6da.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> cudaError_t forwardPass(double *x1, double *y1, double *W1, double *x2, double *y2, double *W2, int row, int column); __global__ void vectorMultiplicationKernel(double *x, double *y, double *W, int row, int column) { int tid = blockIdx.x; if (tid >= row) { return; } double result = 0; for (int j = 0; j < column; j++) { result += W[tid * column + j] * x[j]; } for (int j = 0; j < 10000; j++) { for (int k = 0; k < 10000; k++) { result++; result--; } } y[tid] = result; } int main(int argc, char *argv[]) { int row = atoi(argv[1]); int column = atoi(argv[2]); double *W1 = (double*)malloc(row * column * sizeof(double)); double *x1 = (double*)malloc(column * sizeof(double)); double *y1 = (double*)malloc(row * sizeof(double)); double *W2 = (double*)malloc(row * column * sizeof(double)); double *x2 = (double*)malloc(column * sizeof(double)); double *y2 = (double*)malloc(row * sizeof(double)); for (int i = 0; i < column; i++) { x1[i] = 10; } for (int i = 0; i < row * column; i++) { W1[i] = 10; } for (int i = 0; i < column; i++) { x2[i] = 10; } for (int i = 0; i < row * column; i++) { W2[i] = 10; } cudaError_t cudaStatus = forwardPass(x1, y1, W1, x2, y2, W2, row, column); if (cudaStatus != cudaSuccess) { fprintf(stderr, "vectorMultiplicationWithCuda failed!"); return 1; } for (int i = 0; i < row; i++) { printf("%.2f ", y1[i]); } printf("\n"); for (int i = 0; i < row; i++) { printf("%.2f ", y2[i]); } printf("\n"); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } cudaError_t forwardPass(double *x1, double *y1, double *W1, double *x2, double *y2, double *W2, int row, int column) { double *dev_x1 = 0; double *dev_y1 = 0; double *dev_W1 = 0; double *dev_x2 = 0; double *dev_y2 = 0; double *dev_W2 = 0; cudaError_t cudaStatus; // Allocate GPU buffers for three vectors (two input, one output) . cudaMalloc((void**)&dev_x1, column * sizeof(double)); cudaMalloc((void**)&dev_y1, row * sizeof(double)); cudaMalloc((void**)&dev_W1, row * column * sizeof(double)); cudaMemcpy(dev_x1, x1, column * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_W1, W1, row * column * sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_x2, column * sizeof(double)); cudaMalloc((void**)&dev_y2, row * sizeof(double)); cudaMalloc((void**)&dev_W2, row * column * sizeof(double)); cudaMemcpy(dev_x2, x2, column * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_W2, W2, row * column * sizeof(double), cudaMemcpyHostToDevice); //Launch a kernel on the GPU with one thread for each element. cudaStream_t s1; cudaStream_t s2; cudaStreamCreate(&s1); vectorMultiplicationKernel<<<row, 1, 0, s1>>>(dev_x1, dev_y1, dev_W1, row, column); cudaStreamCreate(&s2); vectorMultiplicationKernel<<<row, 1, 0, s2>>>(dev_x2, dev_y2, dev_W2, row, column); // vectorMultiplicationKernel<<<row, 1>>>(dev_x1, dev_y1, dev_W1, row, column); // vectorMultiplicationKernel<<<row, 1>>>(dev_x2, dev_y2, dev_W2, row, column); // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(y1, dev_y1, row * sizeof(double), cudaMemcpyDeviceToHost); cudaStatus = cudaMemcpy(y2, dev_y2, row * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(dev_x1); cudaFree(dev_y1); cudaFree(dev_W1); cudaFree(dev_x2); cudaFree(dev_y2); cudaFree(dev_W2); return cudaStatus; }
516e0cac12d5a7f50a6f2bf25228ce5a7d255e3b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ProjectionFan_2D.h" #include <cstdlib> // 2018/04/16 //__device__ const double PI = 3.141592653589793; __device__ const double EPS = 1e-15; __device__ const double ERR = 1e-5; __global__ void ProjectionFan(const float *dev_Pic, double *dev_Projection, const double *dev_Pdomain, const double *dev_BetaScanRange, const double Center_t, const double Center_s, const double *dev_resolution, const int t_length, const int s_length, const int Pstart, const int Betastart, const double Distance, const int LP, const int LBeta, const double *dev_RandomErr) { const unsigned short Pindex = threadIdx.x + Pstart; const unsigned short Betaindex = blockIdx.x + Betastart; const unsigned long threadid = Betaindex * LP + Pindex; dev_Projection[threadid] = 0; double P = dev_Pdomain[Pindex]; double Beta = dev_BetaScanRange[Betaindex]; double resolution_1 = dev_resolution[0]; double resolution_2 = dev_resolution[1]; // according to euler equation double source_t = Center_t + Distance * sin(-Beta); // define the source in matlab coordinate //double source_t = Center_t - Distance * sin(Beta) + dev_RandomErr[threadid]; double source_s = Center_s + Distance * cos(-Beta); double Theta = atan(P / Distance); // radian angle in s'-t coordinate plane double Smax = 2 * Distance; // define end detect point in matlab coordinate, Note that : 0 is the start double DetectPoint_tend = Center_t + Smax * sin(Theta) * cos(-Beta) + (Distance - Smax * cos(Theta)) * sin(-Beta); double DetectPoint_send = Center_s - Smax * sin(Theta) * sin(-Beta) + (Distance - Smax * cos(Theta)) * cos(-Beta); double T2S, S2T; if ((DetectPoint_tend - source_t) == 0) T2S = (DetectPoint_send - source_s) / (DetectPoint_tend - source_t + EPS); else T2S = (DetectPoint_send - source_s) / (DetectPoint_tend - source_t); if ((DetectPoint_send - source_s) == 0) S2T = (DetectPoint_tend - source_t) / (DetectPoint_send - source_s + EPS); else S2T = (DetectPoint_tend - source_t) / (DetectPoint_send - source_s); // limit the range of slope T2S = Maxlim(T2S); T2S = Minlim(T2S); S2T = Maxlim(S2T); S2T = Minlim(S2T); // to determine the range of t short t_signal = 0; if (DetectPoint_tend >= source_t) t_signal = 1; else t_signal = -1; // to determine the range of s short s_signal = 0; if (DetectPoint_send >= source_s) s_signal = 1; else s_signal = -1; // actual Size double tlow = 0, thigh = t_length*resolution_1, slow = 0, shigh = s_length*resolution_2; //compute the first and last point in the ROI // using DetectPoint_end set up projection equation double tlow_s = source_s + (tlow - source_t) * T2S; double thigh_s = source_s + (thigh - source_t) * T2S; double slow_t = source_t + (slow - source_s) * S2T; double shigh_t = source_t + (shigh - source_s) * S2T; //double *Range = new double [6]; // XYXY small-big(number) double T1 = 0, S1 = 0, T2 = 0, S2 = 0; if (tlow_s >= 0 && tlow_s <= shigh ) { T1 = tlow; S1 = tlow_s; if (thigh_s >= 0 && thigh_s <= shigh) { T2 = thigh; S2 = thigh_s; } else if (slow_t >= 0 && slow_t <= thigh) { T2 = slow_t; S2 = slow; } else if (shigh_t >= 0 && shigh_t <= thigh) { T2 = shigh_t; S2 = shigh; } } else if (thigh_s >= 0 && thigh_s <= shigh ) { T1 = thigh; S1 = thigh_s; if (slow_t >= 0 && slow_t <= thigh) { T2 = slow_t; S2 = slow; } else if (shigh_t >= 0 && shigh_t <= thigh) { T2 = shigh_t; S2 = shigh; } } else if (slow_t >= 0 && slow_t <= thigh) { T1 = slow_t; S1 = slow; if (shigh_t >= 0 && shigh_t <= thigh) { T2 = shigh_t; S2 = shigh; } } else { //dev_Projection[threadid] = threadid; return; } // set the start point double TStart = 0, SStart = 0; if (Distancesq(T1, S1, source_t, source_s) >= Distancesq(T2, S2, source_t, source_s)) { TStart = T2; SStart = S2; } else { TStart = T1; SStart = S1; } // adjust the order if (T2 < T1) { double c = T1; T1 = T2; T2 = c; } if (S2 < S1) { double c = S1; S1 = S2; S2 = c; } //// enter the ROI double weight = 0, Ray = 0; short GridT = 0, GridS = 0; // candidate crosspoint index in matlab(0~t_length) double GridT_s = 0, GridS_t = 0; // candidate crosspoint index in matlab(0~256) short DetectPoint_t = 0, DetectPoint_s = 0; // current pixel index in matlab pixel index in matlab(0~255) long Pointid = 0; double TCross = TStart / resolution_1, SCross = SStart / resolution_2; // current crosspoint index in matlab(0~256) //while (((XCross * dev_resolution[1]) >= Range[0]) && ((XCross * dev_resolution[1]) <= Range[2]) // && ((YCross * dev_resolution[0]) >= Range[1]) && ((YCross * dev_resolution[0]) <= Range[3])) for (short i = 0;i<(t_length + s_length - 1);i++) { // judge whether XCross/YCross is integer if (abs(TCross - round(TCross)) < EPS) { GridT = round(TCross) + t_signal; } else { GridT = floor(TCross) + flag1to1or_1to0(t_signal); } GridT_s = (source_s + (GridT * resolution_1 - source_t) * T2S) / resolution_2; if (abs(SCross - round(SCross)) < EPS) { GridS = round(SCross) + s_signal; } else { GridS = floor(SCross) + flag1to1or_1to0(s_signal); } GridS_t = (source_t + (GridS * resolution_2 - source_s) * S2T) / resolution_1; //judge which crosspoint is the nearest, means the smallest distance if (Distancesq(GridT, GridT_s, TCross, SCross) <= Distancesq(GridS_t, GridS, TCross, SCross)) { weight = sqrt(Distancesq(GridT * resolution_1, GridT_s * resolution_2, TCross * resolution_1, SCross * resolution_2)); DetectPoint_t = floor(MID(GridT, TCross)); // the midpoint locates the pixel DetectPoint_s = floor(MID(GridT_s, SCross)); TCross = GridT; // update SCross = GridT_s; } else { weight = sqrt(Distancesq(GridS_t * resolution_1, GridS * resolution_2, TCross * resolution_1, SCross * resolution_2)); DetectPoint_t = floor(MID(GridS_t, TCross)); // the midpoint locates the pixel DetectPoint_s = floor(MID(GridS, SCross)); TCross = GridS_t; // update SCross = GridS; } //judge whether the point is in the ROI if ((DetectPoint_t >= 0) && (DetectPoint_t < t_length) && (DetectPoint_s >= 0) && (DetectPoint_s < s_length)) { Pointid = DetectPoint_s * t_length + DetectPoint_t; Ray += weight * dev_Pic[Pointid]; } else { //dev_Projection[threadid] = 9000; break; } } __syncthreads(); dev_Projection[threadid] = Ray; } // Helser function for using CUDA to add vectors in parallel. hipError_t ProjectionFan_2D(const float *Pic, double *Projection, const double *BetaScanRange, const double *Pdomain, const int t_length, const int s_length, const double Center_t, const double Center_s, const int LBeta, const int LP, const double Distance, const double *resolution) { mexPrintf("Hello GenMatParalell!\n"); float *dev_Pic = 0; double *dev_Pdomain = 0, *dev_BetaScanRange = 0, *dev_Projection = 0, *dev_RandomErr = 0; double *dev_resolution = 0; int threadcubic_x = MIN(threadX, LP); int blockcubic_x = MIN(blockX, LBeta); int LPResidual = LP % threadX; int LBetaResidual = LBeta % blockX; int PTime = LP / threadX; int BetaTime = LBeta / blockX; int Pstart = 0; int Betastart = 0; double Beta = 0; const dim3 thread_cubic(threadcubic_x, 1, 1); const dim3 block_cubic(blockcubic_x, 1, 1); dim3 thread_cubic_residual(1, 1, 1); // initial dim3 block_cubic_residual(1, 1, 1); // initial mexPrintf("threadcubic_x: %d blockcubic_x: %d LPResidual: %d LBetaResidual: %d\n", threadcubic_x, blockcubic_x, LPResidual, LBetaResidual); if (LPResidual != 0) { thread_cubic_residual.x = LPResidual; } if (LBetaResidual != 0) { block_cubic_residual.x = LBetaResidual; } hipError_t cudaStatus; double *RandomErr =new double[LBeta * LP]; for (int beta = 0; beta < LBeta; beta++) { for (int P = 0; P < LP; P++) { RandomErr[beta * LP + P] = 0.01 * (rand()/double(RAND_MAX)*2-1); } } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); mexPrintf("hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); goto Error; } mexPrintf("Call for GPU space.\n"); // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&dev_Pic, t_length * s_length * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "dev_Pic hipMalloc failed!"); mexPrintf("dev_Pic hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&dev_Projection, LBeta * LP * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "dev_Projection hipMalloc failed!"); mexPrintf("dev_Projection hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&dev_RandomErr, LBeta * LP * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "dev_Projection hipMalloc failed!"); mexPrintf("dev_Projection hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&dev_Pdomain, LP * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "dev_t_Range hipMalloc failed!"); mexPrintf("dev_t_Range hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&dev_BetaScanRange, LBeta * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "dev_thetaRange hipMalloc failed!"); mexPrintf("dev_thetaRange hipMalloc failed!\n"); goto Error; } cudaStatus = hipMalloc((void**)&dev_resolution, 2 * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "dev_resolution hipMalloc failed!"); mexPrintf("dev_resolution hipMalloc failed!\n"); goto Error; } mexPrintf("Copy data from CPU to GPU.\n"); // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_Pic, Pic, t_length * s_length * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "thetaRange hipMemcpy failed!"); mexPrintf("thetaRange hipMemcpy failed!\n"); goto Error; } cudaStatus = hipMemcpy(dev_Pdomain, Pdomain, LP * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "t_Range hipMemcpy failed!"); mexPrintf("t_Range hipMemcpy failed!\n"); goto Error; } cudaStatus = hipMemcpy(dev_BetaScanRange, BetaScanRange, LBeta * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "thetaRange hipMemcpy failed!"); mexPrintf("thetaRange hipMemcpy failed!\n"); goto Error; } cudaStatus = hipMemcpy(dev_resolution, resolution, 2 * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "dev_resolution hipMemcpy failed!"); mexPrintf("dev_resolution hipMemcpy failed!\n"); goto Error; } cudaStatus = hipMemcpy(dev_RandomErr, RandomErr, LBeta * LP * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "dev_RandomErr hipMemcpy failed!"); mexPrintf("dev_RandomErr hipMemcpy failed!\n"); goto Error; } mexPrintf("Launch computation projection of each lines.\n"); // Launch a kernel on the GPU with one thread for each element. for (int numP = 0; numP < PTime; numP++) { for (int numB = 0; numB < BetaTime; numB++) { Pstart = numP * threadX; Betastart = numB * blockX; //mexPrintf("%d %d\n", Pstart, Betastart); ProjectionFan << <block_cubic, thread_cubic >> >(dev_Pic, dev_Projection, dev_Pdomain, dev_BetaScanRange, Center_t, Center_s, dev_resolution, t_length, s_length, Pstart, Betastart, Distance, LP, LBeta, dev_RandomErr); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "ProjectionFan launch failed: %s\n", hipGetErrorString(cudaStatus)); mexPrintf("ProjectionFan launch failed %s\n", hipGetErrorString(cudaStatus)); mexPrintf("Error happens at Pstart: %d Betastart: %d \n", Pstart, Betastart); goto Error; } } } if (LPResidual != 0) { Pstart = LP - LPResidual; if (LBetaResidual != 0) { Betastart = LBeta - LBetaResidual; //("%d %d\n", Pstart, Betastart); ProjectionFan << <block_cubic_residual, thread_cubic_residual >> >(dev_Pic, dev_Projection, dev_Pdomain, dev_BetaScanRange, Center_t, Center_s, dev_resolution, t_length, s_length, Pstart, Betastart, Distance, LP, LBeta, dev_RandomErr); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "ProjectionFan launch failed: %s\n", hipGetErrorString(cudaStatus)); mexPrintf("ProjectionFan launch failed %s\n", hipGetErrorString(cudaStatus)); mexPrintf("Error happens at Pstart: %d Betastart: %d \n", Pstart, Betastart); goto Error; } } for (int numB = 0; numB < BetaTime; numB++) { Betastart = numB * blockX; //("%d %d\n", Pstart, Betastart); ProjectionFan << <block_cubic, thread_cubic_residual >> >(dev_Pic, dev_Projection, dev_Pdomain, dev_BetaScanRange, Center_t, Center_s, dev_resolution, t_length, s_length, Pstart, Betastart, Distance, LP, LBeta, dev_RandomErr); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "ProjectionFan launch failed: %s\n", hipGetErrorString(cudaStatus)); mexPrintf("ProjectionFan launch failed %s\n", hipGetErrorString(cudaStatus)); mexPrintf("Error happens at Pstart: %d Betastart: %d \n", Pstart, Betastart); goto Error; } } } if (LBetaResidual != 0) { Betastart = LBeta - LBetaResidual; for (int numP = 0; numP < PTime; numP++) { Pstart = numP * threadX; //mexPrintf("%d %d\n", Pstart, Betastart); ProjectionFan << <block_cubic_residual, thread_cubic >> >(dev_Pic, dev_Projection, dev_Pdomain, dev_BetaScanRange, Center_t, Center_s, dev_resolution, t_length, s_length, Pstart, Betastart, Distance, LP, LBeta, dev_RandomErr); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "ProjectionFan launch failed: %s\n", hipGetErrorString(cudaStatus)); mexPrintf("ProjectionFan launch failed %s\n", hipGetErrorString(cudaStatus)); mexPrintf("Error happens at Pstart: %d Betastart: %d \n", Pstart, Betastart); goto Error; } } } // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "Projection launch failed: %s\n", hipGetErrorString(cudaStatus)); mexPrintf("Projection launch failed\n"); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); mexPrintf("hipDeviceSynchronize returned error %s\n", hipGetErrorString(cudaStatus)); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(Projection, dev_Projection, LP * LBeta * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!\n"); mexPrintf("hipMemcpy failed! %s\n", hipGetErrorString(cudaStatus)); goto Error; } Error: hipFree(dev_Pdomain); hipFree(dev_BetaScanRange); hipFree(dev_Projection); hipFree(dev_Pic); hipFree(dev_resolution); hipFree(dev_RandomErr); return cudaStatus; }
516e0cac12d5a7f50a6f2bf25228ce5a7d255e3b.cu
#include "ProjectionFan_2D.h" #include <cstdlib> // 2018/04/16 //__device__ const double PI = 3.141592653589793; __device__ const double EPS = 1e-15; __device__ const double ERR = 1e-5; __global__ void ProjectionFan(const float *dev_Pic, double *dev_Projection, const double *dev_Pdomain, const double *dev_BetaScanRange, const double Center_t, const double Center_s, const double *dev_resolution, const int t_length, const int s_length, const int Pstart, const int Betastart, const double Distance, const int LP, const int LBeta, const double *dev_RandomErr) { const unsigned short Pindex = threadIdx.x + Pstart; const unsigned short Betaindex = blockIdx.x + Betastart; const unsigned long threadid = Betaindex * LP + Pindex; dev_Projection[threadid] = 0; double P = dev_Pdomain[Pindex]; double Beta = dev_BetaScanRange[Betaindex]; double resolution_1 = dev_resolution[0]; double resolution_2 = dev_resolution[1]; // according to euler equation double source_t = Center_t + Distance * sin(-Beta); // define the source in matlab coordinate //double source_t = Center_t - Distance * sin(Beta) + dev_RandomErr[threadid]; double source_s = Center_s + Distance * cos(-Beta); double Theta = atan(P / Distance); // radian angle in s'-t coordinate plane double Smax = 2 * Distance; // define end detect point in matlab coordinate, Note that : 0 is the start double DetectPoint_tend = Center_t + Smax * sin(Theta) * cos(-Beta) + (Distance - Smax * cos(Theta)) * sin(-Beta); double DetectPoint_send = Center_s - Smax * sin(Theta) * sin(-Beta) + (Distance - Smax * cos(Theta)) * cos(-Beta); double T2S, S2T; if ((DetectPoint_tend - source_t) == 0) T2S = (DetectPoint_send - source_s) / (DetectPoint_tend - source_t + EPS); else T2S = (DetectPoint_send - source_s) / (DetectPoint_tend - source_t); if ((DetectPoint_send - source_s) == 0) S2T = (DetectPoint_tend - source_t) / (DetectPoint_send - source_s + EPS); else S2T = (DetectPoint_tend - source_t) / (DetectPoint_send - source_s); // limit the range of slope T2S = Maxlim(T2S); T2S = Minlim(T2S); S2T = Maxlim(S2T); S2T = Minlim(S2T); // to determine the range of t short t_signal = 0; if (DetectPoint_tend >= source_t) t_signal = 1; else t_signal = -1; // to determine the range of s short s_signal = 0; if (DetectPoint_send >= source_s) s_signal = 1; else s_signal = -1; // actual Size double tlow = 0, thigh = t_length*resolution_1, slow = 0, shigh = s_length*resolution_2; //compute the first and last point in the ROI // using DetectPoint_end set up projection equation double tlow_s = source_s + (tlow - source_t) * T2S; double thigh_s = source_s + (thigh - source_t) * T2S; double slow_t = source_t + (slow - source_s) * S2T; double shigh_t = source_t + (shigh - source_s) * S2T; //double *Range = new double [6]; // XYXY small-big(number) double T1 = 0, S1 = 0, T2 = 0, S2 = 0; if (tlow_s >= 0 && tlow_s <= shigh ) { T1 = tlow; S1 = tlow_s; if (thigh_s >= 0 && thigh_s <= shigh) { T2 = thigh; S2 = thigh_s; } else if (slow_t >= 0 && slow_t <= thigh) { T2 = slow_t; S2 = slow; } else if (shigh_t >= 0 && shigh_t <= thigh) { T2 = shigh_t; S2 = shigh; } } else if (thigh_s >= 0 && thigh_s <= shigh ) { T1 = thigh; S1 = thigh_s; if (slow_t >= 0 && slow_t <= thigh) { T2 = slow_t; S2 = slow; } else if (shigh_t >= 0 && shigh_t <= thigh) { T2 = shigh_t; S2 = shigh; } } else if (slow_t >= 0 && slow_t <= thigh) { T1 = slow_t; S1 = slow; if (shigh_t >= 0 && shigh_t <= thigh) { T2 = shigh_t; S2 = shigh; } } else { //dev_Projection[threadid] = threadid; return; } // set the start point double TStart = 0, SStart = 0; if (Distancesq(T1, S1, source_t, source_s) >= Distancesq(T2, S2, source_t, source_s)) { TStart = T2; SStart = S2; } else { TStart = T1; SStart = S1; } // adjust the order if (T2 < T1) { double c = T1; T1 = T2; T2 = c; } if (S2 < S1) { double c = S1; S1 = S2; S2 = c; } //// enter the ROI double weight = 0, Ray = 0; short GridT = 0, GridS = 0; // candidate crosspoint index in matlab(0~t_length) double GridT_s = 0, GridS_t = 0; // candidate crosspoint index in matlab(0~256) short DetectPoint_t = 0, DetectPoint_s = 0; // current pixel index in matlab pixel index in matlab(0~255) long Pointid = 0; double TCross = TStart / resolution_1, SCross = SStart / resolution_2; // current crosspoint index in matlab(0~256) //while (((XCross * dev_resolution[1]) >= Range[0]) && ((XCross * dev_resolution[1]) <= Range[2]) // && ((YCross * dev_resolution[0]) >= Range[1]) && ((YCross * dev_resolution[0]) <= Range[3])) for (short i = 0;i<(t_length + s_length - 1);i++) { // judge whether XCross/YCross is integer if (abs(TCross - round(TCross)) < EPS) { GridT = round(TCross) + t_signal; } else { GridT = floor(TCross) + flag1to1or_1to0(t_signal); } GridT_s = (source_s + (GridT * resolution_1 - source_t) * T2S) / resolution_2; if (abs(SCross - round(SCross)) < EPS) { GridS = round(SCross) + s_signal; } else { GridS = floor(SCross) + flag1to1or_1to0(s_signal); } GridS_t = (source_t + (GridS * resolution_2 - source_s) * S2T) / resolution_1; //judge which crosspoint is the nearest, means the smallest distance if (Distancesq(GridT, GridT_s, TCross, SCross) <= Distancesq(GridS_t, GridS, TCross, SCross)) { weight = sqrt(Distancesq(GridT * resolution_1, GridT_s * resolution_2, TCross * resolution_1, SCross * resolution_2)); DetectPoint_t = floor(MID(GridT, TCross)); // the midpoint locates the pixel DetectPoint_s = floor(MID(GridT_s, SCross)); TCross = GridT; // update SCross = GridT_s; } else { weight = sqrt(Distancesq(GridS_t * resolution_1, GridS * resolution_2, TCross * resolution_1, SCross * resolution_2)); DetectPoint_t = floor(MID(GridS_t, TCross)); // the midpoint locates the pixel DetectPoint_s = floor(MID(GridS, SCross)); TCross = GridS_t; // update SCross = GridS; } //judge whether the point is in the ROI if ((DetectPoint_t >= 0) && (DetectPoint_t < t_length) && (DetectPoint_s >= 0) && (DetectPoint_s < s_length)) { Pointid = DetectPoint_s * t_length + DetectPoint_t; Ray += weight * dev_Pic[Pointid]; } else { //dev_Projection[threadid] = 9000; break; } } __syncthreads(); dev_Projection[threadid] = Ray; } // Helser function for using CUDA to add vectors in parallel. cudaError_t ProjectionFan_2D(const float *Pic, double *Projection, const double *BetaScanRange, const double *Pdomain, const int t_length, const int s_length, const double Center_t, const double Center_s, const int LBeta, const int LP, const double Distance, const double *resolution) { mexPrintf("Hello GenMatParalell!\n"); float *dev_Pic = 0; double *dev_Pdomain = 0, *dev_BetaScanRange = 0, *dev_Projection = 0, *dev_RandomErr = 0; double *dev_resolution = 0; int threadcubic_x = MIN(threadX, LP); int blockcubic_x = MIN(blockX, LBeta); int LPResidual = LP % threadX; int LBetaResidual = LBeta % blockX; int PTime = LP / threadX; int BetaTime = LBeta / blockX; int Pstart = 0; int Betastart = 0; double Beta = 0; const dim3 thread_cubic(threadcubic_x, 1, 1); const dim3 block_cubic(blockcubic_x, 1, 1); dim3 thread_cubic_residual(1, 1, 1); // initial dim3 block_cubic_residual(1, 1, 1); // initial mexPrintf("threadcubic_x: %d blockcubic_x: %d LPResidual: %d LBetaResidual: %d\n", threadcubic_x, blockcubic_x, LPResidual, LBetaResidual); if (LPResidual != 0) { thread_cubic_residual.x = LPResidual; } if (LBetaResidual != 0) { block_cubic_residual.x = LBetaResidual; } cudaError_t cudaStatus; double *RandomErr =new double[LBeta * LP]; for (int beta = 0; beta < LBeta; beta++) { for (int P = 0; P < LP; P++) { RandomErr[beta * LP + P] = 0.01 * (rand()/double(RAND_MAX)*2-1); } } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); mexPrintf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); goto Error; } mexPrintf("Call for GPU space.\n"); // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&dev_Pic, t_length * s_length * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "dev_Pic cudaMalloc failed!"); mexPrintf("dev_Pic cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_Projection, LBeta * LP * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "dev_Projection cudaMalloc failed!"); mexPrintf("dev_Projection cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_RandomErr, LBeta * LP * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "dev_Projection cudaMalloc failed!"); mexPrintf("dev_Projection cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_Pdomain, LP * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "dev_t_Range cudaMalloc failed!"); mexPrintf("dev_t_Range cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_BetaScanRange, LBeta * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "dev_thetaRange cudaMalloc failed!"); mexPrintf("dev_thetaRange cudaMalloc failed!\n"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_resolution, 2 * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "dev_resolution cudaMalloc failed!"); mexPrintf("dev_resolution cudaMalloc failed!\n"); goto Error; } mexPrintf("Copy data from CPU to GPU.\n"); // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_Pic, Pic, t_length * s_length * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "thetaRange cudaMemcpy failed!"); mexPrintf("thetaRange cudaMemcpy failed!\n"); goto Error; } cudaStatus = cudaMemcpy(dev_Pdomain, Pdomain, LP * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "t_Range cudaMemcpy failed!"); mexPrintf("t_Range cudaMemcpy failed!\n"); goto Error; } cudaStatus = cudaMemcpy(dev_BetaScanRange, BetaScanRange, LBeta * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "thetaRange cudaMemcpy failed!"); mexPrintf("thetaRange cudaMemcpy failed!\n"); goto Error; } cudaStatus = cudaMemcpy(dev_resolution, resolution, 2 * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "dev_resolution cudaMemcpy failed!"); mexPrintf("dev_resolution cudaMemcpy failed!\n"); goto Error; } cudaStatus = cudaMemcpy(dev_RandomErr, RandomErr, LBeta * LP * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "dev_RandomErr cudaMemcpy failed!"); mexPrintf("dev_RandomErr cudaMemcpy failed!\n"); goto Error; } mexPrintf("Launch computation projection of each lines.\n"); // Launch a kernel on the GPU with one thread for each element. for (int numP = 0; numP < PTime; numP++) { for (int numB = 0; numB < BetaTime; numB++) { Pstart = numP * threadX; Betastart = numB * blockX; //mexPrintf("%d %d\n", Pstart, Betastart); ProjectionFan << <block_cubic, thread_cubic >> >(dev_Pic, dev_Projection, dev_Pdomain, dev_BetaScanRange, Center_t, Center_s, dev_resolution, t_length, s_length, Pstart, Betastart, Distance, LP, LBeta, dev_RandomErr); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ProjectionFan launch failed: %s\n", cudaGetErrorString(cudaStatus)); mexPrintf("ProjectionFan launch failed %s\n", cudaGetErrorString(cudaStatus)); mexPrintf("Error happens at Pstart: %d Betastart: %d \n", Pstart, Betastart); goto Error; } } } if (LPResidual != 0) { Pstart = LP - LPResidual; if (LBetaResidual != 0) { Betastart = LBeta - LBetaResidual; //("%d %d\n", Pstart, Betastart); ProjectionFan << <block_cubic_residual, thread_cubic_residual >> >(dev_Pic, dev_Projection, dev_Pdomain, dev_BetaScanRange, Center_t, Center_s, dev_resolution, t_length, s_length, Pstart, Betastart, Distance, LP, LBeta, dev_RandomErr); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ProjectionFan launch failed: %s\n", cudaGetErrorString(cudaStatus)); mexPrintf("ProjectionFan launch failed %s\n", cudaGetErrorString(cudaStatus)); mexPrintf("Error happens at Pstart: %d Betastart: %d \n", Pstart, Betastart); goto Error; } } for (int numB = 0; numB < BetaTime; numB++) { Betastart = numB * blockX; //("%d %d\n", Pstart, Betastart); ProjectionFan << <block_cubic, thread_cubic_residual >> >(dev_Pic, dev_Projection, dev_Pdomain, dev_BetaScanRange, Center_t, Center_s, dev_resolution, t_length, s_length, Pstart, Betastart, Distance, LP, LBeta, dev_RandomErr); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ProjectionFan launch failed: %s\n", cudaGetErrorString(cudaStatus)); mexPrintf("ProjectionFan launch failed %s\n", cudaGetErrorString(cudaStatus)); mexPrintf("Error happens at Pstart: %d Betastart: %d \n", Pstart, Betastart); goto Error; } } } if (LBetaResidual != 0) { Betastart = LBeta - LBetaResidual; for (int numP = 0; numP < PTime; numP++) { Pstart = numP * threadX; //mexPrintf("%d %d\n", Pstart, Betastart); ProjectionFan << <block_cubic_residual, thread_cubic >> >(dev_Pic, dev_Projection, dev_Pdomain, dev_BetaScanRange, Center_t, Center_s, dev_resolution, t_length, s_length, Pstart, Betastart, Distance, LP, LBeta, dev_RandomErr); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ProjectionFan launch failed: %s\n", cudaGetErrorString(cudaStatus)); mexPrintf("ProjectionFan launch failed %s\n", cudaGetErrorString(cudaStatus)); mexPrintf("Error happens at Pstart: %d Betastart: %d \n", Pstart, Betastart); goto Error; } } } // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Projection launch failed: %s\n", cudaGetErrorString(cudaStatus)); mexPrintf("Projection launch failed\n"); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); mexPrintf("cudaDeviceSynchronize returned error %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(Projection, dev_Projection, LP * LBeta * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!\n"); mexPrintf("cudaMemcpy failed! %s\n", cudaGetErrorString(cudaStatus)); goto Error; } Error: cudaFree(dev_Pdomain); cudaFree(dev_BetaScanRange); cudaFree(dev_Projection); cudaFree(dev_Pic); cudaFree(dev_resolution); cudaFree(dev_RandomErr); return cudaStatus; }
0df72ac895d2d07a34cde4565d5322b61496925c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ATen/ATen.h" #include "ATen/hip/HIPContext.h" #include "ATen/hip/HIPApplyUtils.cuh" #include <vector> #include "common.h" #include "device_tensor.h" namespace { template <typename DType, typename Acctype, typename DeviceTensor3> struct GradOp { __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) : mean(m), input(i), gradOutput(g) {} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = gradOutput[batch][plane][n]; DType c = ScalarConverter<Acctype, DType>::to(input[batch][plane][n] - mean); return Float2<DType, Acctype>(g, g * c); } const Acctype mean; const DeviceTensor3 input; const DeviceTensor3 gradOutput; }; template <typename DType, typename Acctype> struct SumOp { __device__ SumOp(DeviceTensor<DType, 3> i) : input(i){} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = input[batch][plane][n]; return Float2<DType, Acctype>(g, g * g); } DType mean; DeviceTensor<DType, 3> input; }; // Sum across (batch, x/y/z) applying Op() pointwise template<typename T, typename Op, typename DeviceTensor3> __device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <typename DType> __global__ void BatchNorm_Forward_kernel ( DeviceTensor<DType, 3> output, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta) { int c = blockIdx.x; /* main operation */ for (int b = 0; b < input.getSize(0); ++b) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { DType inp = input[b][c][x]; output[b][c][x] = gamma[c] * (inp - mean[c]) / std[c] + beta[c]; } } } template <typename DType> __global__ void BatchNorm_Backward_kernel ( DeviceTensor<DType, 3> gradoutput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 3> gradinput, DeviceTensor<DType, 1> gradgamma, DeviceTensor<DType, 1> gradbeta, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta, DeviceTensor<DType, 1> gradMean, DeviceTensor<DType, 1> gradStd, bool train) { /* declarations of the variables */ /* Get the index and channels */ int c = blockIdx.x; /* main operation */ GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput); Float2<DType, DType> res = reduce<Float2<DType, DType>, GradOp<DType, DType, DeviceTensor<DType, 3>>, DeviceTensor<DType, 3>>(g, gradoutput, c); DType gradOutputSum = res.v1; DType dotP = res.v2; DType invstd = DType(1.0) / std[c]; DType gradScale = invstd * gamma[c]; if (train && threadIdx.x == 0) { gradMean[c] = - gradOutputSum * gamma[c] * invstd; gradStd[c] = - dotP * gamma[c] * invstd * invstd; } if (gradinput.numElements() > 0) { for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; } } } if (gradgamma.numElements() > 0) { if (threadIdx.x == 0) { gradgamma[c] += dotP * invstd; } } if (gradbeta.numElements() > 0) { if (threadIdx.x == 0) { gradbeta[c] += gradOutputSum; } } } template <typename DType> __global__ void Sum_Square_Forward_kernel ( DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> sum, DeviceTensor<DType, 1> square) { int c = blockIdx.x; /* main operation */ SumOp<DType, DType> g(input); Float2<DType, DType> res = reduce<Float2<DType, DType>, SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c); DType xsum = res.v1; DType xsquare = res.v2; if (threadIdx.x == 0) { sum[c] = xsum; square[c] = xsquare; } } template <typename DType> __global__ void Sum_Square_Backward_kernel ( DeviceTensor<DType, 3> gradInput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> gradSum, DeviceTensor<DType, 1> gradSquare) { int c = blockIdx.x; /* main operation */ for (int batch = 0; batch < gradInput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { gradInput[batch][c][x] = gradSum[c] + 2 * gradSquare[c] * input[batch][c][x]; } } } } // namespcae at::Tensor BatchNorm_Forward_CUDA( const at::Tensor input_, const at::Tensor mean_, const at::Tensor std_, const at::Tensor gamma_, const at::Tensor beta_) { auto output_ = at::zeros_like(input_); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); /* kernel function */ hipLaunchKernelGGL(( BatchNorm_Forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, output, input, mean, std, gamma, beta); })); AT_ASSERT(hipGetLastError() == hipSuccess); return output_; } std::vector<at::Tensor> BatchNorm_Backward_CUDA( const at::Tensor gradoutput_, const at::Tensor input_, const at::Tensor mean_, const at::Tensor std_, const at::Tensor gamma_, const at::Tensor beta_, bool train) { /* outputs*/ at::Tensor gradinput_ = at::zeros_like(input_); at::Tensor gradgamma_ = at::zeros_like(gamma_); at::Tensor gradbeta_ = at::zeros_like(beta_); at::Tensor gradMean_ = at::zeros_like(mean_); at::Tensor gradStd_ = at::zeros_like(std_); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_); DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_); DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_); DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); DeviceTensor<scalar_t, 1> gradMean = devicetensor<scalar_t, 1>(gradMean_); DeviceTensor<scalar_t, 1> gradStd = devicetensor<scalar_t, 1>(gradStd_); /* kernel function */ hipLaunchKernelGGL(( BatchNorm_Backward_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, gradoutput, input, gradinput, gradgamma, gradbeta, mean, std, gamma, beta, gradMean, gradStd, train); })); AT_ASSERT(hipGetLastError() == hipSuccess); return {gradinput_, gradMean_, gradStd_, gradgamma_, gradbeta_}; } std::vector<at::Tensor> Sum_Square_Forward_CUDA( const at::Tensor input_) { /* outputs */ at::Tensor sum_ = input_.type().tensor({input_.size(1)}).zero_(); at::Tensor square_ = input_.type().tensor({input_.size(1)}).zero_(); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> sum = devicetensor<scalar_t, 1>(sum_); DeviceTensor<scalar_t, 1> square = devicetensor<scalar_t, 1>(square_); /* kernel function */ hipLaunchKernelGGL(( Sum_Square_Forward_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, input, sum, square); })); AT_ASSERT(hipGetLastError() == hipSuccess); return {sum_, square_}; } at::Tensor Sum_Square_Backward_CUDA( const at::Tensor input_, const at::Tensor gradSum_, const at::Tensor gradSquare_) { /* outputs */ at::Tensor gradInput_ = at::zeros_like(input_); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> gradSum = devicetensor<scalar_t, 1>(gradSum_); DeviceTensor<scalar_t, 1> gradSquare =devicetensor<scalar_t, 1>(gradSquare_); /* kernel function */ hipLaunchKernelGGL(( Sum_Square_Backward_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, gradInput, input, gradSum, gradSquare); })); AT_ASSERT(hipGetLastError() == hipSuccess); return gradInput_; }
0df72ac895d2d07a34cde4565d5322b61496925c.cu
#include "ATen/ATen.h" #include "ATen/cuda/CUDAContext.h" #include "ATen/cuda/CUDAApplyUtils.cuh" #include <vector> #include "common.h" #include "device_tensor.h" namespace { template <typename DType, typename Acctype, typename DeviceTensor3> struct GradOp { __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) : mean(m), input(i), gradOutput(g) {} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = gradOutput[batch][plane][n]; DType c = ScalarConverter<Acctype, DType>::to(input[batch][plane][n] - mean); return Float2<DType, Acctype>(g, g * c); } const Acctype mean; const DeviceTensor3 input; const DeviceTensor3 gradOutput; }; template <typename DType, typename Acctype> struct SumOp { __device__ SumOp(DeviceTensor<DType, 3> i) : input(i){} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = input[batch][plane][n]; return Float2<DType, Acctype>(g, g * g); } DType mean; DeviceTensor<DType, 3> input; }; // Sum across (batch, x/y/z) applying Op() pointwise template<typename T, typename Op, typename DeviceTensor3> __device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <typename DType> __global__ void BatchNorm_Forward_kernel ( DeviceTensor<DType, 3> output, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta) { int c = blockIdx.x; /* main operation */ for (int b = 0; b < input.getSize(0); ++b) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { DType inp = input[b][c][x]; output[b][c][x] = gamma[c] * (inp - mean[c]) / std[c] + beta[c]; } } } template <typename DType> __global__ void BatchNorm_Backward_kernel ( DeviceTensor<DType, 3> gradoutput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 3> gradinput, DeviceTensor<DType, 1> gradgamma, DeviceTensor<DType, 1> gradbeta, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta, DeviceTensor<DType, 1> gradMean, DeviceTensor<DType, 1> gradStd, bool train) { /* declarations of the variables */ /* Get the index and channels */ int c = blockIdx.x; /* main operation */ GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput); Float2<DType, DType> res = reduce<Float2<DType, DType>, GradOp<DType, DType, DeviceTensor<DType, 3>>, DeviceTensor<DType, 3>>(g, gradoutput, c); DType gradOutputSum = res.v1; DType dotP = res.v2; DType invstd = DType(1.0) / std[c]; DType gradScale = invstd * gamma[c]; if (train && threadIdx.x == 0) { gradMean[c] = - gradOutputSum * gamma[c] * invstd; gradStd[c] = - dotP * gamma[c] * invstd * invstd; } if (gradinput.numElements() > 0) { for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; } } } if (gradgamma.numElements() > 0) { if (threadIdx.x == 0) { gradgamma[c] += dotP * invstd; } } if (gradbeta.numElements() > 0) { if (threadIdx.x == 0) { gradbeta[c] += gradOutputSum; } } } template <typename DType> __global__ void Sum_Square_Forward_kernel ( DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> sum, DeviceTensor<DType, 1> square) { int c = blockIdx.x; /* main operation */ SumOp<DType, DType> g(input); Float2<DType, DType> res = reduce<Float2<DType, DType>, SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c); DType xsum = res.v1; DType xsquare = res.v2; if (threadIdx.x == 0) { sum[c] = xsum; square[c] = xsquare; } } template <typename DType> __global__ void Sum_Square_Backward_kernel ( DeviceTensor<DType, 3> gradInput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> gradSum, DeviceTensor<DType, 1> gradSquare) { int c = blockIdx.x; /* main operation */ for (int batch = 0; batch < gradInput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { gradInput[batch][c][x] = gradSum[c] + 2 * gradSquare[c] * input[batch][c][x]; } } } } // namespcae at::Tensor BatchNorm_Forward_CUDA( const at::Tensor input_, const at::Tensor mean_, const at::Tensor std_, const at::Tensor gamma_, const at::Tensor beta_) { auto output_ = at::zeros_like(input_); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); /* kernel function */ BatchNorm_Forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>( output, input, mean, std, gamma, beta); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return output_; } std::vector<at::Tensor> BatchNorm_Backward_CUDA( const at::Tensor gradoutput_, const at::Tensor input_, const at::Tensor mean_, const at::Tensor std_, const at::Tensor gamma_, const at::Tensor beta_, bool train) { /* outputs*/ at::Tensor gradinput_ = at::zeros_like(input_); at::Tensor gradgamma_ = at::zeros_like(gamma_); at::Tensor gradbeta_ = at::zeros_like(beta_); at::Tensor gradMean_ = at::zeros_like(mean_); at::Tensor gradStd_ = at::zeros_like(std_); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_); DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_); DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_); DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); DeviceTensor<scalar_t, 1> gradMean = devicetensor<scalar_t, 1>(gradMean_); DeviceTensor<scalar_t, 1> gradStd = devicetensor<scalar_t, 1>(gradStd_); /* kernel function */ BatchNorm_Backward_kernel<scalar_t> <<<blocks, threads, 0, stream>>>( gradoutput, input, gradinput, gradgamma, gradbeta, mean, std, gamma, beta, gradMean, gradStd, train); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return {gradinput_, gradMean_, gradStd_, gradgamma_, gradbeta_}; } std::vector<at::Tensor> Sum_Square_Forward_CUDA( const at::Tensor input_) { /* outputs */ at::Tensor sum_ = input_.type().tensor({input_.size(1)}).zero_(); at::Tensor square_ = input_.type().tensor({input_.size(1)}).zero_(); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> sum = devicetensor<scalar_t, 1>(sum_); DeviceTensor<scalar_t, 1> square = devicetensor<scalar_t, 1>(square_); /* kernel function */ Sum_Square_Forward_kernel<scalar_t> <<<blocks, threads, 0, stream>>>(input, sum, square); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return {sum_, square_}; } at::Tensor Sum_Square_Backward_CUDA( const at::Tensor input_, const at::Tensor gradSum_, const at::Tensor gradSquare_) { /* outputs */ at::Tensor gradInput_ = at::zeros_like(input_); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> gradSum = devicetensor<scalar_t, 1>(gradSum_); DeviceTensor<scalar_t, 1> gradSquare =devicetensor<scalar_t, 1>(gradSquare_); /* kernel function */ Sum_Square_Backward_kernel<scalar_t> <<<blocks, threads, 0, stream>>>(gradInput, input, gradSum, gradSquare); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return gradInput_; }
e15c7dbe6c37495a2135a80131ff9dd803bf2863.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <fstream> #include <vector> #include <string> using namespace std; #pragma comment(lib, "cuda.lib") #pragma comment(lib, "cudart.lib") #include <hip/hip_runtime.h> #include <math.h> #include "device_launch_parameters.h" #include <rocblas.h> #define TILE_WIDTH 32 void ler_Matlab(double *L, int ordem, char nome[50]) { FILE *arq; double valor; int i = 0; arq = fopen(nome, "r"); if (arq == NULL) { printf("\nERRO AO ABRIR ARQUIVO!!!\n"); return; } while (!feof(arq)) { fscanf(arq, "%lf", &valor); L[i] = valor; i++; } fclose(arq); } void escrever_Matriz(double *L, int ordem, char nome[20]) { FILE *arq; int i, j; arq = fopen(nome, "w"); if (arq == NULL) { printf("\nERRO AO CRIAR ARQUIVO!!!\n"); return; } for (i = 0; i < ordem; i++) { for (j = 0; j < ordem; j++) { fprintf(arq, "%.2lf,", L[i*ordem + j]); } fprintf(arq, "\n"); } fclose(arq); } __global__ void mult_Matriz(double *A, double *B, double *C, int ordem) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; double value = 0; __shared__ double a_sub[TILE_WIDTH][TILE_WIDTH]; //Matrizes Compartilhadas __shared__ double b_sub[TILE_WIDTH][TILE_WIDTH]; //Matrizes Compartilhadas a_sub[ty][tx] = 0.0; b_sub[ty][tx] = 0.0; for (int m = 0; m < (ordem - 1) / TILE_WIDTH + 1; ++m){ if (Row < ordem && m*TILE_WIDTH + tx < ordem) a_sub[ty][tx] = A[Row*ordem + (m*TILE_WIDTH + tx)]; else a_sub[ty][tx] = 0; if (Col < ordem && m*TILE_WIDTH + ty < ordem) b_sub[ty][tx] = B[(m*TILE_WIDTH + ty)*ordem + Col]; else b_sub[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; k++) value += a_sub[ty][k] * b_sub[k][tx]; __syncthreads(); } if (Row < ordem && Col < ordem) C[Row * ordem + Col] = value; } void multiplica_Matriz_Cuda(double *matA, double *matB, double *matC, int ordem) { double *dev_A; double *dev_B; double *dev_C; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMalloc((void**)&dev_A, ordem*ordem * sizeof(double)); hipMalloc((void**)&dev_B, ordem*ordem * sizeof(double)); hipMalloc((void**)&dev_C, ordem*ordem * sizeof(double)); hipMemcpy(dev_A, matA, ordem*ordem * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_B, matB, ordem*ordem * sizeof(double), hipMemcpyHostToDevice); hipEventRecord(start, 0); dim3 dimGrid((ordem - 1) / TILE_WIDTH + 1, (ordem - 1) / TILE_WIDTH + 1, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); mult_Matriz << <dimGrid, dimBlock >> >(dev_A, dev_B, dev_C, ordem); hipDeviceSynchronize(); hipMemcpy(matC, dev_C, ordem*ordem * sizeof(double), hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); float milisegundos = 0; hipEventElapsedTime(&milisegundos, start, stop); hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); printf("\nMultiplicacao realizada em: %.2f milliseconds", milisegundos); int device; hipGetDevice(&device); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, device); printf("\nDevice name: %s\n", prop.name); } int main(int argc, char** argv) { double *matA; double *matB; double *matC; int i, j; int ordem = atoi(argv[3]); matA = (double*)malloc(ordem*ordem * sizeof(double)); matB = (double*)malloc(ordem*ordem * sizeof(double)); matC = (double*)malloc(ordem*ordem * sizeof(double)); for (i = 0; i < ordem; i++) { for (j = 0; j < ordem; j++) { matA[i*ordem + j] = 0; matB[i*ordem + j] = 0; matC[i*ordem + j] = 0; } } ler_Matlab(matA, ordem, argv[1]); ler_Matlab(matB, ordem, argv[2]); multiplica_Matriz_Cuda(matA, matB, matC, ordem); escrever_Matriz(matC, ordem, "Mult_em_CUDA.txt"); printf("\nDimensao: %d", ordem); printf("\nFINALIZADO\n\n"); return 0; }
e15c7dbe6c37495a2135a80131ff9dd803bf2863.cu
#include <stdio.h> #include <iostream> #include <fstream> #include <vector> #include <string> using namespace std; #pragma comment(lib, "cuda.lib") #pragma comment(lib, "cudart.lib") #include <cuda.h> #include <math.h> #include "device_launch_parameters.h" #include <cublas_v2.h> #define TILE_WIDTH 32 void ler_Matlab(double *L, int ordem, char nome[50]) { FILE *arq; double valor; int i = 0; arq = fopen(nome, "r"); if (arq == NULL) { printf("\nERRO AO ABRIR ARQUIVO!!!\n"); return; } while (!feof(arq)) { fscanf(arq, "%lf", &valor); L[i] = valor; i++; } fclose(arq); } void escrever_Matriz(double *L, int ordem, char nome[20]) { FILE *arq; int i, j; arq = fopen(nome, "w"); if (arq == NULL) { printf("\nERRO AO CRIAR ARQUIVO!!!\n"); return; } for (i = 0; i < ordem; i++) { for (j = 0; j < ordem; j++) { fprintf(arq, "%.2lf,", L[i*ordem + j]); } fprintf(arq, "\n"); } fclose(arq); } __global__ void mult_Matriz(double *A, double *B, double *C, int ordem) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; double value = 0; __shared__ double a_sub[TILE_WIDTH][TILE_WIDTH]; //Matrizes Compartilhadas __shared__ double b_sub[TILE_WIDTH][TILE_WIDTH]; //Matrizes Compartilhadas a_sub[ty][tx] = 0.0; b_sub[ty][tx] = 0.0; for (int m = 0; m < (ordem - 1) / TILE_WIDTH + 1; ++m){ if (Row < ordem && m*TILE_WIDTH + tx < ordem) a_sub[ty][tx] = A[Row*ordem + (m*TILE_WIDTH + tx)]; else a_sub[ty][tx] = 0; if (Col < ordem && m*TILE_WIDTH + ty < ordem) b_sub[ty][tx] = B[(m*TILE_WIDTH + ty)*ordem + Col]; else b_sub[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; k++) value += a_sub[ty][k] * b_sub[k][tx]; __syncthreads(); } if (Row < ordem && Col < ordem) C[Row * ordem + Col] = value; } void multiplica_Matriz_Cuda(double *matA, double *matB, double *matC, int ordem) { double *dev_A; double *dev_B; double *dev_C; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc((void**)&dev_A, ordem*ordem * sizeof(double)); cudaMalloc((void**)&dev_B, ordem*ordem * sizeof(double)); cudaMalloc((void**)&dev_C, ordem*ordem * sizeof(double)); cudaMemcpy(dev_A, matA, ordem*ordem * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_B, matB, ordem*ordem * sizeof(double), cudaMemcpyHostToDevice); cudaEventRecord(start, 0); dim3 dimGrid((ordem - 1) / TILE_WIDTH + 1, (ordem - 1) / TILE_WIDTH + 1, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); mult_Matriz << <dimGrid, dimBlock >> >(dev_A, dev_B, dev_C, ordem); cudaDeviceSynchronize(); cudaMemcpy(matC, dev_C, ordem*ordem * sizeof(double), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float milisegundos = 0; cudaEventElapsedTime(&milisegundos, start, stop); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); printf("\nMultiplicacao realizada em: %.2f milliseconds", milisegundos); int device; cudaGetDevice(&device); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, device); printf("\nDevice name: %s\n", prop.name); } int main(int argc, char** argv) { double *matA; double *matB; double *matC; int i, j; int ordem = atoi(argv[3]); matA = (double*)malloc(ordem*ordem * sizeof(double)); matB = (double*)malloc(ordem*ordem * sizeof(double)); matC = (double*)malloc(ordem*ordem * sizeof(double)); for (i = 0; i < ordem; i++) { for (j = 0; j < ordem; j++) { matA[i*ordem + j] = 0; matB[i*ordem + j] = 0; matC[i*ordem + j] = 0; } } ler_Matlab(matA, ordem, argv[1]); ler_Matlab(matB, ordem, argv[2]); multiplica_Matriz_Cuda(matA, matB, matC, ordem); escrever_Matriz(matC, ordem, "Mult_em_CUDA.txt"); printf("\nDimensao: %d", ordem); printf("\nFINALIZADO\n\n"); return 0; }
b14c120247190143136a562be22a39604a0d9f7b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/map.cuh> #include <raft/mr/device/buffer.hpp> #include <raft/random/rng.cuh> #include "../test_utils.h" namespace raft { namespace linalg { template <typename InType, typename IdxType, typename OutType> void mapLaunch(OutType *out, const InType *in1, const InType *in2, const InType *in3, InType scalar, IdxType len, hipStream_t stream) { map( out, len, [=] __device__(InType a, InType b, InType c) { return a + b + c + scalar; }, stream, in1, in2, in3); } template <typename InType, typename IdxType = int, typename OutType = InType> struct MapInputs { InType tolerance; IdxType len; unsigned long long int seed; InType scalar; }; template <typename InType, typename IdxType, typename OutType = InType> void create_ref(OutType *out_ref, const InType *in1, const InType *in2, const InType *in3, InType scalar, IdxType len, hipStream_t stream) { InType *tmp; allocate(tmp, len); eltwiseAdd(tmp, in1, in2, len, stream); eltwiseAdd(out_ref, tmp, in3, len, stream); scalarAdd(out_ref, out_ref, (OutType)scalar, len, stream); CUDA_CHECK(hipFree(tmp)); } template <typename InType, typename IdxType, typename OutType = InType> class MapTest : public ::testing::TestWithParam<MapInputs<InType, IdxType, OutType>> { protected: void SetUp() override { params = ::testing::TestWithParam<MapInputs<InType, IdxType, OutType>>::GetParam(); raft::random::Rng r(params.seed); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); IdxType len = params.len; allocate(in1, len); allocate(in2, len); allocate(in3, len); allocate(out_ref, len); allocate(out, len); r.uniform(in1, len, InType(-1.0), InType(1.0), stream); r.uniform(in2, len, InType(-1.0), InType(1.0), stream); r.uniform(in3, len, InType(-1.0), InType(1.0), stream); create_ref(out_ref, in1, in2, in3, params.scalar, len, stream); mapLaunch(out, in1, in2, in3, params.scalar, len, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(in1)); CUDA_CHECK(hipFree(in2)); CUDA_CHECK(hipFree(in3)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); } protected: MapInputs<InType, IdxType, OutType> params; InType *in1, *in2, *in3; OutType *out_ref, *out; }; const std::vector<MapInputs<float, int>> inputsf_i32 = { {0.000001f, 1024 * 1024, 1234ULL, 3.2}}; typedef MapTest<float, int> MapTestF_i32; TEST_P(MapTestF_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MapTests, MapTestF_i32, ::testing::ValuesIn(inputsf_i32)); const std::vector<MapInputs<float, size_t>> inputsf_i64 = { {0.000001f, 1024 * 1024, 1234ULL, 9.4}}; typedef MapTest<float, size_t> MapTestF_i64; TEST_P(MapTestF_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MapTests, MapTestF_i64, ::testing::ValuesIn(inputsf_i64)); const std::vector<MapInputs<float, int, double>> inputsf_i32_d = { {0.000001f, 1024 * 1024, 1234ULL, 5.9}}; typedef MapTest<float, int, double> MapTestF_i32_D; TEST_P(MapTestF_i32_D, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MapTests, MapTestF_i32_D, ::testing::ValuesIn(inputsf_i32_d)); const std::vector<MapInputs<double, int>> inputsd_i32 = { {0.00000001, 1024 * 1024, 1234ULL, 7.5}}; typedef MapTest<double, int> MapTestD_i32; TEST_P(MapTestD_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MapTests, MapTestD_i32, ::testing::ValuesIn(inputsd_i32)); const std::vector<MapInputs<double, size_t>> inputsd_i64 = { {0.00000001, 1024 * 1024, 1234ULL, 5.2}}; typedef MapTest<double, size_t> MapTestD_i64; TEST_P(MapTestD_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MapTests, MapTestD_i64, ::testing::ValuesIn(inputsd_i64)); } // namespace linalg } // namespace raft
b14c120247190143136a562be22a39604a0d9f7b.cu
/* * Copyright (c) 2018-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/map.cuh> #include <raft/mr/device/buffer.hpp> #include <raft/random/rng.cuh> #include "../test_utils.h" namespace raft { namespace linalg { template <typename InType, typename IdxType, typename OutType> void mapLaunch(OutType *out, const InType *in1, const InType *in2, const InType *in3, InType scalar, IdxType len, cudaStream_t stream) { map( out, len, [=] __device__(InType a, InType b, InType c) { return a + b + c + scalar; }, stream, in1, in2, in3); } template <typename InType, typename IdxType = int, typename OutType = InType> struct MapInputs { InType tolerance; IdxType len; unsigned long long int seed; InType scalar; }; template <typename InType, typename IdxType, typename OutType = InType> void create_ref(OutType *out_ref, const InType *in1, const InType *in2, const InType *in3, InType scalar, IdxType len, cudaStream_t stream) { InType *tmp; allocate(tmp, len); eltwiseAdd(tmp, in1, in2, len, stream); eltwiseAdd(out_ref, tmp, in3, len, stream); scalarAdd(out_ref, out_ref, (OutType)scalar, len, stream); CUDA_CHECK(cudaFree(tmp)); } template <typename InType, typename IdxType, typename OutType = InType> class MapTest : public ::testing::TestWithParam<MapInputs<InType, IdxType, OutType>> { protected: void SetUp() override { params = ::testing::TestWithParam<MapInputs<InType, IdxType, OutType>>::GetParam(); raft::random::Rng r(params.seed); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); IdxType len = params.len; allocate(in1, len); allocate(in2, len); allocate(in3, len); allocate(out_ref, len); allocate(out, len); r.uniform(in1, len, InType(-1.0), InType(1.0), stream); r.uniform(in2, len, InType(-1.0), InType(1.0), stream); r.uniform(in3, len, InType(-1.0), InType(1.0), stream); create_ref(out_ref, in1, in2, in3, params.scalar, len, stream); mapLaunch(out, in1, in2, in3, params.scalar, len, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(in1)); CUDA_CHECK(cudaFree(in2)); CUDA_CHECK(cudaFree(in3)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); } protected: MapInputs<InType, IdxType, OutType> params; InType *in1, *in2, *in3; OutType *out_ref, *out; }; const std::vector<MapInputs<float, int>> inputsf_i32 = { {0.000001f, 1024 * 1024, 1234ULL, 3.2}}; typedef MapTest<float, int> MapTestF_i32; TEST_P(MapTestF_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MapTests, MapTestF_i32, ::testing::ValuesIn(inputsf_i32)); const std::vector<MapInputs<float, size_t>> inputsf_i64 = { {0.000001f, 1024 * 1024, 1234ULL, 9.4}}; typedef MapTest<float, size_t> MapTestF_i64; TEST_P(MapTestF_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MapTests, MapTestF_i64, ::testing::ValuesIn(inputsf_i64)); const std::vector<MapInputs<float, int, double>> inputsf_i32_d = { {0.000001f, 1024 * 1024, 1234ULL, 5.9}}; typedef MapTest<float, int, double> MapTestF_i32_D; TEST_P(MapTestF_i32_D, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MapTests, MapTestF_i32_D, ::testing::ValuesIn(inputsf_i32_d)); const std::vector<MapInputs<double, int>> inputsd_i32 = { {0.00000001, 1024 * 1024, 1234ULL, 7.5}}; typedef MapTest<double, int> MapTestD_i32; TEST_P(MapTestD_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MapTests, MapTestD_i32, ::testing::ValuesIn(inputsd_i32)); const std::vector<MapInputs<double, size_t>> inputsd_i64 = { {0.00000001, 1024 * 1024, 1234ULL, 5.2}}; typedef MapTest<double, size_t> MapTestD_i64; TEST_P(MapTestD_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MapTests, MapTestD_i64, ::testing::ValuesIn(inputsd_i64)); } // namespace linalg } // namespace raft
7bef5020b9842e5813ad611adfc92d08bb9a1c8c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO CSV writer class implementation */ #include "writer_impl.hpp" #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/null_mask.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/detail/combine.hpp> #include <cudf/strings/detail/converters.hpp> #include <cudf/strings/detail/replace.hpp> #include <cudf/strings/detail/utilities.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/execution_policy.h> #include <thrust/logical.h> #include <thrust/scan.h> #include <algorithm> #include <sstream> namespace cudf { namespace io { namespace detail { namespace csv { namespace { /** * @brief Functor to modify a string column for CSV format. * * If a row contains specific characters, the entire row must be * output in double-quotes. Also, if a double-quote appears it * must be escaped using a 2nd double-quote. */ struct escape_strings_fn { column_device_view const d_column; string_view const d_delimiter; // check for column delimiter offset_type* d_offsets{}; char* d_chars{}; __device__ void write_char(char_utf8 chr, char*& d_buffer, offset_type& bytes) { if (d_buffer) d_buffer += cudf::strings::detail::from_char_utf8(chr, d_buffer); else bytes += cudf::strings::detail::bytes_in_char_utf8(chr); } __device__ void operator()(size_type idx) { if (d_column.is_null(idx)) { if (!d_chars) d_offsets[idx] = 0; return; } constexpr char_utf8 const quote = '\"'; // check for quote constexpr char_utf8 const new_line = '\n'; // and for new-line auto const d_str = d_column.element<string_view>(idx); // if quote, new-line or a column delimiter appear in the string // the entire string must be double-quoted. bool const quote_row = thrust::any_of( thrust::seq, d_str.begin(), d_str.end(), [d_delimiter = d_delimiter](auto chr) { return chr == quote || chr == new_line || chr == d_delimiter[0]; }); char* d_buffer = d_chars ? d_chars + d_offsets[idx] : nullptr; offset_type bytes = 0; if (quote_row) write_char(quote, d_buffer, bytes); for (auto chr : d_str) { if (chr == quote) write_char(quote, d_buffer, bytes); write_char(chr, d_buffer, bytes); } if (quote_row) write_char(quote, d_buffer, bytes); if (!d_chars) d_offsets[idx] = bytes; } }; struct column_to_strings_fn { // compile-time predicate that defines unsupported column types; // based on the conditions used for instantiations of individual // converters in strings/convert/convert_*.hpp; //(this should have been a `variable template`, // instead of a static function, but nvcc (10.0) // fails to compile var-templs); // template <typename column_type> constexpr static bool is_not_handled(void) { // Note: the case (not std::is_same<column_type, bool>::value) // is already covered by is_integral) // return not((std::is_same<column_type, cudf::string_view>::value) || (std::is_integral<column_type>::value) || (std::is_floating_point<column_type>::value) || (cudf::is_fixed_point<column_type>()) || (cudf::is_timestamp<column_type>()) || (cudf::is_duration<column_type>())); } explicit column_to_strings_fn( csv_writer_options const& options, rmm::cuda_stream_view stream = rmm::cuda_stream_default, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) : options_(options), stream_(stream), mr_(mr) { } // Note: `null` replacement with `na_rep` deferred to `concatenate()` // instead of column-wise; might be faster // // Note: Cannot pass `stream` to detail::<fname> version of <fname> calls below, because they are // not exposed in header (see, for example, detail::concatenate(tbl_view, separator, na_rep, mr, // stream) is declared and defined in combine.cu); Possible solution: declare `extern`, or just // declare a prototype inside `namespace cudf::strings::detail`; // bools: // template <typename column_type> std::enable_if_t<std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()( column_view const& column) const { return cudf::strings::detail::from_booleans( column, options_.get_true_value(), options_.get_false_value(), stream_, mr_); } // strings: // template <typename column_type> std::enable_if_t<std::is_same<column_type, cudf::string_view>::value, std::unique_ptr<column>> operator()(column_view const& column_v) const { // handle special characters: {delimiter, '\n', "} in row: string_scalar delimiter{std::string{options_.get_inter_column_delimiter()}, true, stream_}; auto d_column = column_device_view::create(column_v, stream_); escape_strings_fn fn{*d_column, delimiter.value(stream_)}; auto children = cudf::strings::detail::make_strings_children(fn, column_v.size(), stream_, mr_); return make_strings_column(column_v.size(), std::move(children.first), std::move(children.second), column_v.null_count(), cudf::detail::copy_bitmask(column_v, stream_, mr_), stream_, mr_); } // ints: // template <typename column_type> std::enable_if_t<std::is_integral<column_type>::value && !std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()(column_view const& column) const { return cudf::strings::detail::from_integers(column, stream_, mr_); } // floats: // template <typename column_type> std::enable_if_t<std::is_floating_point<column_type>::value, std::unique_ptr<column>> operator()( column_view const& column) const { return cudf::strings::detail::from_floats(column, stream_, mr_); } // fixed point: // template <typename column_type> std::enable_if_t<cudf::is_fixed_point<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { return cudf::strings::detail::from_fixed_point(column, stream_, mr_); } // timestamps: // template <typename column_type> std::enable_if_t<cudf::is_timestamp<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { std::string format = [&]() { if (std::is_same<cudf::timestamp_s, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%SZ"}; } else if (std::is_same<cudf::timestamp_ms, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%3fZ"}; } else if (std::is_same<cudf::timestamp_us, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%6fZ"}; } else if (std::is_same<cudf::timestamp_ns, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%9fZ"}; } else { return std::string{"%Y-%m-%d"}; } }(); // handle the cases where delimiter / line-terminator can be // "-" or ":", in which case we need to add quotes to the format // std::string delimiter{options_.get_inter_column_delimiter()}; std::string newline{options_.get_line_terminator()}; constexpr char const* dash{"-"}; constexpr char const* colon{":"}; if (delimiter == dash || newline == dash || delimiter == colon || newline == colon) { format = "\"" + format + "\""; } return cudf::strings::detail::from_timestamps(column, format, stream_, mr_); } template <typename column_type> std::enable_if_t<cudf::is_duration<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { return cudf::io::detail::csv::pandas_format_durations(column, stream_, mr_); } // unsupported type of column: // template <typename column_type> std::enable_if_t<is_not_handled<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { CUDF_FAIL("Unsupported column type."); } private: csv_writer_options const& options_; rmm::cuda_stream_view stream_; rmm::mr::device_memory_resource* mr_; }; } // unnamed namespace // Forward to implementation writer::writer(std::unique_ptr<data_sink> sink, csv_writer_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sink), options, mr)) { } // Destructor within this translation unit writer::~writer() = default; writer::impl::impl(std::unique_ptr<data_sink> sink, csv_writer_options const& options, rmm::mr::device_memory_resource* mr) : out_sink_(std::move(sink)), mr_(mr), options_(options) { } // write the header: column names: // void writer::impl::write_chunked_begin(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { if ((metadata != nullptr) && (options_.is_enabled_include_header())) { CUDF_EXPECTS(metadata->column_names.size() == static_cast<size_t>(table.num_columns()), "Mismatch between number of column headers and table columns."); std::string delimiter_str{options_.get_inter_column_delimiter()}; // avoid delimiter after last element: // std::stringstream ss; std::copy(metadata->column_names.begin(), metadata->column_names.end() - 1, std::ostream_iterator<std::string>(ss, delimiter_str.c_str())); ss << metadata->column_names.back() << options_.get_line_terminator(); out_sink_->host_write(ss.str().data(), ss.str().size()); } } void writer::impl::write_chunked(strings_column_view const& str_column_view, const table_metadata* metadata, rmm::cuda_stream_view stream) { // algorithm outline: // // for_each(strings_column.begin(), strings_column.end(), // [sink = out_sink_](auto str_row) mutable { // auto host_buffer = str_row.host_buffer(); // sink->host_write(host_buffer_.data(), host_buffer_.size()); // });//or...sink->device_write(device_buffer,...); // // added line_terminator functionality // CUDF_EXPECTS(str_column_view.size() > 0, "Unexpected empty strings column."); cudf::string_scalar newline{options_.get_line_terminator()}; auto p_str_col_w_nl = cudf::strings::detail::join_strings(str_column_view, newline, string_scalar("", false), stream); strings_column_view strings_column{p_str_col_w_nl->view()}; auto total_num_bytes = strings_column.chars_size(); char const* ptr_all_bytes = strings_column.chars().data<char>(); if (out_sink_->is_device_write_preferred(total_num_bytes)) { // Direct write from device memory out_sink_->device_write(ptr_all_bytes, total_num_bytes, stream); } else { // copy the bytes to host to write them out thrust::host_vector<char> h_bytes(total_num_bytes); CUDA_TRY(hipMemcpyAsync(h_bytes.data(), ptr_all_bytes, total_num_bytes * sizeof(char), hipMemcpyDeviceToHost, stream.value())); stream.synchronize(); out_sink_->host_write(h_bytes.data(), total_num_bytes); } // Needs newline at the end, to separate from next chunk if (out_sink_->is_device_write_preferred(newline.size())) { out_sink_->device_write(newline.data(), newline.size(), stream); } else { out_sink_->host_write(options_.get_line_terminator().data(), options_.get_line_terminator().size()); } } void writer::impl::write(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { CUDF_EXPECTS(table.num_columns() > 0, "Empty table."); // write header: column names separated by delimiter: // (even for tables with no rows) // write_chunked_begin(table, metadata, stream); if (table.num_rows() > 0) { // no need to check same-size columns constraint; auto-enforced by table_view auto n_rows_per_chunk = options_.get_rows_per_chunk(); // // This outputs the CSV in row chunks to save memory. // Maybe we can use the total_rows*count calculation and a memory threshold // instead of an arbitrary chunk count. // The entire CSV chunk must fit in CPU memory before writing it out. // if (n_rows_per_chunk % 8) // must be divisible by 8 n_rows_per_chunk += 8 - (n_rows_per_chunk % 8); CUDF_EXPECTS(n_rows_per_chunk >= 8, "write_csv: invalid chunk_rows; must be at least 8"); auto num_rows = table.num_rows(); std::vector<table_view> vector_views; if (num_rows <= n_rows_per_chunk) { vector_views.push_back(table); } else { auto const n_chunks = num_rows / n_rows_per_chunk; std::vector<size_type> splits(n_chunks); thrust::tabulate(splits.begin(), splits.end(), [n_rows_per_chunk](auto idx) { return (idx + 1) * n_rows_per_chunk; }); // split table_view into chunks: vector_views = cudf::split(table, splits); } // convert each chunk to CSV: // column_to_strings_fn converter{options_, stream, rmm::mr::get_current_device_resource()}; for (auto&& sub_view : vector_views) { // Skip if the table has no rows if (sub_view.num_rows() == 0) continue; std::vector<std::unique_ptr<column>> str_column_vec; // populate vector of string-converted columns: // std::transform(sub_view.begin(), sub_view.end(), std::back_inserter(str_column_vec), [converter](auto const& current_col) { return cudf::type_dispatcher(current_col.type(), converter, current_col); }); // create string table view from str_column_vec: // auto str_table_ptr = std::make_unique<cudf::table>(std::move(str_column_vec)); auto str_table_view = str_table_ptr->view(); // concatenate columns in each row into one big string column // (using null representation and delimiter): // std::string delimiter_str{options_.get_inter_column_delimiter()}; auto str_concat_col = [&] { if (str_table_view.num_columns() > 1) return cudf::strings::detail::concatenate(str_table_view, delimiter_str, options_.get_na_rep(), strings::separator_on_nulls::YES, stream); cudf::string_scalar narep{options_.get_na_rep()}; return cudf::strings::detail::replace_nulls(str_table_view.column(0), narep, stream); }(); write_chunked(str_concat_col->view(), metadata, stream); } } // finalize (no-op, for now, but offers a hook for future extensions): // write_chunked_end(table, metadata, stream); } void writer::write(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { _impl->write(table, metadata, stream); } } // namespace csv } // namespace detail } // namespace io } // namespace cudf
7bef5020b9842e5813ad611adfc92d08bb9a1c8c.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO CSV writer class implementation */ #include "writer_impl.hpp" #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/null_mask.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/detail/combine.hpp> #include <cudf/strings/detail/converters.hpp> #include <cudf/strings/detail/replace.hpp> #include <cudf/strings/detail/utilities.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/execution_policy.h> #include <thrust/logical.h> #include <thrust/scan.h> #include <algorithm> #include <sstream> namespace cudf { namespace io { namespace detail { namespace csv { namespace { /** * @brief Functor to modify a string column for CSV format. * * If a row contains specific characters, the entire row must be * output in double-quotes. Also, if a double-quote appears it * must be escaped using a 2nd double-quote. */ struct escape_strings_fn { column_device_view const d_column; string_view const d_delimiter; // check for column delimiter offset_type* d_offsets{}; char* d_chars{}; __device__ void write_char(char_utf8 chr, char*& d_buffer, offset_type& bytes) { if (d_buffer) d_buffer += cudf::strings::detail::from_char_utf8(chr, d_buffer); else bytes += cudf::strings::detail::bytes_in_char_utf8(chr); } __device__ void operator()(size_type idx) { if (d_column.is_null(idx)) { if (!d_chars) d_offsets[idx] = 0; return; } constexpr char_utf8 const quote = '\"'; // check for quote constexpr char_utf8 const new_line = '\n'; // and for new-line auto const d_str = d_column.element<string_view>(idx); // if quote, new-line or a column delimiter appear in the string // the entire string must be double-quoted. bool const quote_row = thrust::any_of( thrust::seq, d_str.begin(), d_str.end(), [d_delimiter = d_delimiter](auto chr) { return chr == quote || chr == new_line || chr == d_delimiter[0]; }); char* d_buffer = d_chars ? d_chars + d_offsets[idx] : nullptr; offset_type bytes = 0; if (quote_row) write_char(quote, d_buffer, bytes); for (auto chr : d_str) { if (chr == quote) write_char(quote, d_buffer, bytes); write_char(chr, d_buffer, bytes); } if (quote_row) write_char(quote, d_buffer, bytes); if (!d_chars) d_offsets[idx] = bytes; } }; struct column_to_strings_fn { // compile-time predicate that defines unsupported column types; // based on the conditions used for instantiations of individual // converters in strings/convert/convert_*.hpp; //(this should have been a `variable template`, // instead of a static function, but nvcc (10.0) // fails to compile var-templs); // template <typename column_type> constexpr static bool is_not_handled(void) { // Note: the case (not std::is_same<column_type, bool>::value) // is already covered by is_integral) // return not((std::is_same<column_type, cudf::string_view>::value) || (std::is_integral<column_type>::value) || (std::is_floating_point<column_type>::value) || (cudf::is_fixed_point<column_type>()) || (cudf::is_timestamp<column_type>()) || (cudf::is_duration<column_type>())); } explicit column_to_strings_fn( csv_writer_options const& options, rmm::cuda_stream_view stream = rmm::cuda_stream_default, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) : options_(options), stream_(stream), mr_(mr) { } // Note: `null` replacement with `na_rep` deferred to `concatenate()` // instead of column-wise; might be faster // // Note: Cannot pass `stream` to detail::<fname> version of <fname> calls below, because they are // not exposed in header (see, for example, detail::concatenate(tbl_view, separator, na_rep, mr, // stream) is declared and defined in combine.cu); Possible solution: declare `extern`, or just // declare a prototype inside `namespace cudf::strings::detail`; // bools: // template <typename column_type> std::enable_if_t<std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()( column_view const& column) const { return cudf::strings::detail::from_booleans( column, options_.get_true_value(), options_.get_false_value(), stream_, mr_); } // strings: // template <typename column_type> std::enable_if_t<std::is_same<column_type, cudf::string_view>::value, std::unique_ptr<column>> operator()(column_view const& column_v) const { // handle special characters: {delimiter, '\n', "} in row: string_scalar delimiter{std::string{options_.get_inter_column_delimiter()}, true, stream_}; auto d_column = column_device_view::create(column_v, stream_); escape_strings_fn fn{*d_column, delimiter.value(stream_)}; auto children = cudf::strings::detail::make_strings_children(fn, column_v.size(), stream_, mr_); return make_strings_column(column_v.size(), std::move(children.first), std::move(children.second), column_v.null_count(), cudf::detail::copy_bitmask(column_v, stream_, mr_), stream_, mr_); } // ints: // template <typename column_type> std::enable_if_t<std::is_integral<column_type>::value && !std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()(column_view const& column) const { return cudf::strings::detail::from_integers(column, stream_, mr_); } // floats: // template <typename column_type> std::enable_if_t<std::is_floating_point<column_type>::value, std::unique_ptr<column>> operator()( column_view const& column) const { return cudf::strings::detail::from_floats(column, stream_, mr_); } // fixed point: // template <typename column_type> std::enable_if_t<cudf::is_fixed_point<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { return cudf::strings::detail::from_fixed_point(column, stream_, mr_); } // timestamps: // template <typename column_type> std::enable_if_t<cudf::is_timestamp<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { std::string format = [&]() { if (std::is_same<cudf::timestamp_s, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%SZ"}; } else if (std::is_same<cudf::timestamp_ms, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%3fZ"}; } else if (std::is_same<cudf::timestamp_us, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%6fZ"}; } else if (std::is_same<cudf::timestamp_ns, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%9fZ"}; } else { return std::string{"%Y-%m-%d"}; } }(); // handle the cases where delimiter / line-terminator can be // "-" or ":", in which case we need to add quotes to the format // std::string delimiter{options_.get_inter_column_delimiter()}; std::string newline{options_.get_line_terminator()}; constexpr char const* dash{"-"}; constexpr char const* colon{":"}; if (delimiter == dash || newline == dash || delimiter == colon || newline == colon) { format = "\"" + format + "\""; } return cudf::strings::detail::from_timestamps(column, format, stream_, mr_); } template <typename column_type> std::enable_if_t<cudf::is_duration<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { return cudf::io::detail::csv::pandas_format_durations(column, stream_, mr_); } // unsupported type of column: // template <typename column_type> std::enable_if_t<is_not_handled<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { CUDF_FAIL("Unsupported column type."); } private: csv_writer_options const& options_; rmm::cuda_stream_view stream_; rmm::mr::device_memory_resource* mr_; }; } // unnamed namespace // Forward to implementation writer::writer(std::unique_ptr<data_sink> sink, csv_writer_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sink), options, mr)) { } // Destructor within this translation unit writer::~writer() = default; writer::impl::impl(std::unique_ptr<data_sink> sink, csv_writer_options const& options, rmm::mr::device_memory_resource* mr) : out_sink_(std::move(sink)), mr_(mr), options_(options) { } // write the header: column names: // void writer::impl::write_chunked_begin(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { if ((metadata != nullptr) && (options_.is_enabled_include_header())) { CUDF_EXPECTS(metadata->column_names.size() == static_cast<size_t>(table.num_columns()), "Mismatch between number of column headers and table columns."); std::string delimiter_str{options_.get_inter_column_delimiter()}; // avoid delimiter after last element: // std::stringstream ss; std::copy(metadata->column_names.begin(), metadata->column_names.end() - 1, std::ostream_iterator<std::string>(ss, delimiter_str.c_str())); ss << metadata->column_names.back() << options_.get_line_terminator(); out_sink_->host_write(ss.str().data(), ss.str().size()); } } void writer::impl::write_chunked(strings_column_view const& str_column_view, const table_metadata* metadata, rmm::cuda_stream_view stream) { // algorithm outline: // // for_each(strings_column.begin(), strings_column.end(), // [sink = out_sink_](auto str_row) mutable { // auto host_buffer = str_row.host_buffer(); // sink->host_write(host_buffer_.data(), host_buffer_.size()); // });//or...sink->device_write(device_buffer,...); // // added line_terminator functionality // CUDF_EXPECTS(str_column_view.size() > 0, "Unexpected empty strings column."); cudf::string_scalar newline{options_.get_line_terminator()}; auto p_str_col_w_nl = cudf::strings::detail::join_strings(str_column_view, newline, string_scalar("", false), stream); strings_column_view strings_column{p_str_col_w_nl->view()}; auto total_num_bytes = strings_column.chars_size(); char const* ptr_all_bytes = strings_column.chars().data<char>(); if (out_sink_->is_device_write_preferred(total_num_bytes)) { // Direct write from device memory out_sink_->device_write(ptr_all_bytes, total_num_bytes, stream); } else { // copy the bytes to host to write them out thrust::host_vector<char> h_bytes(total_num_bytes); CUDA_TRY(cudaMemcpyAsync(h_bytes.data(), ptr_all_bytes, total_num_bytes * sizeof(char), cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); out_sink_->host_write(h_bytes.data(), total_num_bytes); } // Needs newline at the end, to separate from next chunk if (out_sink_->is_device_write_preferred(newline.size())) { out_sink_->device_write(newline.data(), newline.size(), stream); } else { out_sink_->host_write(options_.get_line_terminator().data(), options_.get_line_terminator().size()); } } void writer::impl::write(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { CUDF_EXPECTS(table.num_columns() > 0, "Empty table."); // write header: column names separated by delimiter: // (even for tables with no rows) // write_chunked_begin(table, metadata, stream); if (table.num_rows() > 0) { // no need to check same-size columns constraint; auto-enforced by table_view auto n_rows_per_chunk = options_.get_rows_per_chunk(); // // This outputs the CSV in row chunks to save memory. // Maybe we can use the total_rows*count calculation and a memory threshold // instead of an arbitrary chunk count. // The entire CSV chunk must fit in CPU memory before writing it out. // if (n_rows_per_chunk % 8) // must be divisible by 8 n_rows_per_chunk += 8 - (n_rows_per_chunk % 8); CUDF_EXPECTS(n_rows_per_chunk >= 8, "write_csv: invalid chunk_rows; must be at least 8"); auto num_rows = table.num_rows(); std::vector<table_view> vector_views; if (num_rows <= n_rows_per_chunk) { vector_views.push_back(table); } else { auto const n_chunks = num_rows / n_rows_per_chunk; std::vector<size_type> splits(n_chunks); thrust::tabulate(splits.begin(), splits.end(), [n_rows_per_chunk](auto idx) { return (idx + 1) * n_rows_per_chunk; }); // split table_view into chunks: vector_views = cudf::split(table, splits); } // convert each chunk to CSV: // column_to_strings_fn converter{options_, stream, rmm::mr::get_current_device_resource()}; for (auto&& sub_view : vector_views) { // Skip if the table has no rows if (sub_view.num_rows() == 0) continue; std::vector<std::unique_ptr<column>> str_column_vec; // populate vector of string-converted columns: // std::transform(sub_view.begin(), sub_view.end(), std::back_inserter(str_column_vec), [converter](auto const& current_col) { return cudf::type_dispatcher(current_col.type(), converter, current_col); }); // create string table view from str_column_vec: // auto str_table_ptr = std::make_unique<cudf::table>(std::move(str_column_vec)); auto str_table_view = str_table_ptr->view(); // concatenate columns in each row into one big string column // (using null representation and delimiter): // std::string delimiter_str{options_.get_inter_column_delimiter()}; auto str_concat_col = [&] { if (str_table_view.num_columns() > 1) return cudf::strings::detail::concatenate(str_table_view, delimiter_str, options_.get_na_rep(), strings::separator_on_nulls::YES, stream); cudf::string_scalar narep{options_.get_na_rep()}; return cudf::strings::detail::replace_nulls(str_table_view.column(0), narep, stream); }(); write_chunked(str_concat_col->view(), metadata, stream); } } // finalize (no-op, for now, but offers a hook for future extensions): // write_chunked_end(table, metadata, stream); } void writer::write(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { _impl->write(table, metadata, stream); } } // namespace csv } // namespace detail } // namespace io } // namespace cudf
5cbbe2a45e4696964dcfa79bd7eeeed104038ba6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "GPU/fft.h" #include "SERIAL/fft.h" float GPU(const char *filename, const char *output){ // MEMORY ALLOCATION t_image *image = read_ppm_image(filename); t_gray_image *gray_image = image_2_gray_image(image); t_complex_image *complex_image = gray_image_2_complex_image(gray_image); int bits; float *DEV_REAL, *DEV_IMAG; int *DEV_NBITS, *DEV_W; // SHIFT FREQUENCY shift_frequency_domain(complex_image); // NUMBER OF BITS TO REPRESENT A SINGLE ROW bits = (int)log2((float)complex_image->y); // CUDA EVENTS hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // START TIME GPU hipEventRecord(start); // MEMORY ALLOCATION ON GPU hipMalloc((void**)&DEV_REAL, sizeof(float)*(complex_image->x * complex_image->y)); hipMalloc((void**)&DEV_IMAG, sizeof(float)*(complex_image->x * complex_image->y)); hipMalloc((void**)&DEV_NBITS, sizeof(int)); hipMalloc((void**)&DEV_W, sizeof(int)); // COPY DATA TO GPU hipMemcpy(DEV_REAL, complex_image->real, sizeof(float)*(complex_image->x * complex_image->y), hipMemcpyHostToDevice); hipMemcpy(DEV_IMAG, complex_image->imag, sizeof(float)*(complex_image->x * complex_image->y), hipMemcpyHostToDevice); hipMemcpy(DEV_NBITS, &bits, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(DEV_W, &complex_image->y, sizeof(int), hipMemcpyHostToDevice); // BLOCKS dim3 blocks_row(complex_image->x, 1, 1); // THREADS PER BLOCK dim3 threads_row(complex_image->y, 1, 1); // KERNEL TO COMPUTE ALL ROWS ON GPU hipLaunchKernelGGL(( fft_by_row_GPU), dim3(blocks_row), dim3(threads_row), 0, 0, DEV_REAL,DEV_IMAG, DEV_NBITS); // NUMBER OF BITS TO REPRESENT A SINGLE COLUMN bits = (int)log2((float)complex_image->x); // BLOCKS dim3 blocks_col(1,complex_image->y, 1); // THREADS PER BLOCK dim3 threads_col(1, complex_image->x, 1); // COPY DATA TO GPU hipMemcpy(DEV_NBITS, &bits, sizeof(int), hipMemcpyHostToDevice); // KERNEL TO COMPUTE ALL COLUMNS ON GPU hipLaunchKernelGGL(( fft_by_col_GPU), dim3(blocks_col), dim3(threads_col), 0, 0, DEV_REAL,DEV_IMAG, DEV_NBITS, DEV_W); // COPY RESULT TO CPU hipMemcpy(complex_image->real, DEV_REAL,sizeof(float)*(complex_image->x * complex_image->y), hipMemcpyDeviceToHost); hipMemcpy(complex_image->imag, DEV_IMAG,sizeof(float)*(complex_image->x * complex_image->y), hipMemcpyDeviceToHost); // END TIME GPU hipEventRecord(stop); hipEventSynchronize(stop); // COMPUTE TOTAL TIME GPU float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); // OUTPUT DATA t_gray_image *spectre = image_spectre(complex_image); write_ppm_gray_image(output, spectre); // FREE MEMORY free_image(image); free_gray_image(gray_image); free_complex_image(complex_image); free_gray_image(spectre); // FREE MEMORY ON GPU hipFree(DEV_REAL); hipFree(DEV_IMAG); hipFree(DEV_NBITS); hipFree(DEV_W); return milliseconds; } float CPU(const char *filename, const char *output){ // MEMORY ALLOCATION t_image *image = read_ppm_image(filename); t_gray_image *gray_image = image_2_gray_image(image); t_complex_image *complex_image = gray_image_2_complex_image(gray_image); // SHIFT FREQUENCY shift_frequency_domain(complex_image); // CUDA EVENTS hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // START TIME SERIAL hipEventRecord(start); // SERIAL FFT, ALL ROWS AND COLUMNS fft_by_row_CPU(complex_image->real, complex_image->imag, complex_image->x, complex_image->y); fft_by_col_CPU(complex_image->real, complex_image->imag, complex_image->x, complex_image->y); // END TIME GPU hipEventRecord(stop); hipEventSynchronize(stop); // COMPUTE TOTAL SERIAL float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); // OUTPUT DATA t_gray_image *spectre = image_spectre(complex_image); write_ppm_gray_image(output, spectre); // FREE MEMORY free_image(image); free_gray_image(gray_image); free_complex_image(complex_image); free_gray_image(spectre); return milliseconds; } void execute(){ float cpu, gpu; gpu = GPU("INPUT/input01.ppm", "OUTPUT/gpu-input01.ppm"); cpu = CPU("INPUT/input01.ppm", "OUTPUT/cpu-input01.ppm"); printf("\nINPUT/input01.ppm-----------------\n\nSpeedup: %.3f\n", (cpu / gpu)); gpu = GPU("INPUT/input02.ppm", "OUTPUT/gpu-input02.ppm"); cpu = CPU("INPUT/input02.ppm", "OUTPUT/cpu-input02.ppm"); printf("\nINPUT/input02.ppm-----------------\n\nSpeedup: %.3f\n", (cpu / gpu)); gpu = GPU("INPUT/input03.ppm", "OUTPUT/gpu-input03.ppm"); cpu = CPU("INPUT/input03.ppm", "OUTPUT/cpu-input03.ppm"); printf("\nINPUT/input03.ppm-----------------\n\nSpeedup: %.3f\n", (cpu / gpu)); } int main(int argc, char *argv[]) { execute(); return 0; }
5cbbe2a45e4696964dcfa79bd7eeeed104038ba6.cu
#include <stdio.h> #include <stdlib.h> #include "GPU/fft.h" #include "SERIAL/fft.h" float GPU(const char *filename, const char *output){ // MEMORY ALLOCATION t_image *image = read_ppm_image(filename); t_gray_image *gray_image = image_2_gray_image(image); t_complex_image *complex_image = gray_image_2_complex_image(gray_image); int bits; float *DEV_REAL, *DEV_IMAG; int *DEV_NBITS, *DEV_W; // SHIFT FREQUENCY shift_frequency_domain(complex_image); // NUMBER OF BITS TO REPRESENT A SINGLE ROW bits = (int)log2((float)complex_image->y); // CUDA EVENTS cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // START TIME GPU cudaEventRecord(start); // MEMORY ALLOCATION ON GPU cudaMalloc((void**)&DEV_REAL, sizeof(float)*(complex_image->x * complex_image->y)); cudaMalloc((void**)&DEV_IMAG, sizeof(float)*(complex_image->x * complex_image->y)); cudaMalloc((void**)&DEV_NBITS, sizeof(int)); cudaMalloc((void**)&DEV_W, sizeof(int)); // COPY DATA TO GPU cudaMemcpy(DEV_REAL, complex_image->real, sizeof(float)*(complex_image->x * complex_image->y), cudaMemcpyHostToDevice); cudaMemcpy(DEV_IMAG, complex_image->imag, sizeof(float)*(complex_image->x * complex_image->y), cudaMemcpyHostToDevice); cudaMemcpy(DEV_NBITS, &bits, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(DEV_W, &complex_image->y, sizeof(int), cudaMemcpyHostToDevice); // BLOCKS dim3 blocks_row(complex_image->x, 1, 1); // THREADS PER BLOCK dim3 threads_row(complex_image->y, 1, 1); // KERNEL TO COMPUTE ALL ROWS ON GPU fft_by_row_GPU<<<blocks_row, threads_row>>>(DEV_REAL,DEV_IMAG, DEV_NBITS); // NUMBER OF BITS TO REPRESENT A SINGLE COLUMN bits = (int)log2((float)complex_image->x); // BLOCKS dim3 blocks_col(1,complex_image->y, 1); // THREADS PER BLOCK dim3 threads_col(1, complex_image->x, 1); // COPY DATA TO GPU cudaMemcpy(DEV_NBITS, &bits, sizeof(int), cudaMemcpyHostToDevice); // KERNEL TO COMPUTE ALL COLUMNS ON GPU fft_by_col_GPU<<<blocks_col, threads_col>>>(DEV_REAL,DEV_IMAG, DEV_NBITS, DEV_W); // COPY RESULT TO CPU cudaMemcpy(complex_image->real, DEV_REAL,sizeof(float)*(complex_image->x * complex_image->y), cudaMemcpyDeviceToHost); cudaMemcpy(complex_image->imag, DEV_IMAG,sizeof(float)*(complex_image->x * complex_image->y), cudaMemcpyDeviceToHost); // END TIME GPU cudaEventRecord(stop); cudaEventSynchronize(stop); // COMPUTE TOTAL TIME GPU float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); // OUTPUT DATA t_gray_image *spectre = image_spectre(complex_image); write_ppm_gray_image(output, spectre); // FREE MEMORY free_image(image); free_gray_image(gray_image); free_complex_image(complex_image); free_gray_image(spectre); // FREE MEMORY ON GPU cudaFree(DEV_REAL); cudaFree(DEV_IMAG); cudaFree(DEV_NBITS); cudaFree(DEV_W); return milliseconds; } float CPU(const char *filename, const char *output){ // MEMORY ALLOCATION t_image *image = read_ppm_image(filename); t_gray_image *gray_image = image_2_gray_image(image); t_complex_image *complex_image = gray_image_2_complex_image(gray_image); // SHIFT FREQUENCY shift_frequency_domain(complex_image); // CUDA EVENTS cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // START TIME SERIAL cudaEventRecord(start); // SERIAL FFT, ALL ROWS AND COLUMNS fft_by_row_CPU(complex_image->real, complex_image->imag, complex_image->x, complex_image->y); fft_by_col_CPU(complex_image->real, complex_image->imag, complex_image->x, complex_image->y); // END TIME GPU cudaEventRecord(stop); cudaEventSynchronize(stop); // COMPUTE TOTAL SERIAL float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); // OUTPUT DATA t_gray_image *spectre = image_spectre(complex_image); write_ppm_gray_image(output, spectre); // FREE MEMORY free_image(image); free_gray_image(gray_image); free_complex_image(complex_image); free_gray_image(spectre); return milliseconds; } void execute(){ float cpu, gpu; gpu = GPU("INPUT/input01.ppm", "OUTPUT/gpu-input01.ppm"); cpu = CPU("INPUT/input01.ppm", "OUTPUT/cpu-input01.ppm"); printf("\nINPUT/input01.ppm-----------------\n\nSpeedup: %.3f\n", (cpu / gpu)); gpu = GPU("INPUT/input02.ppm", "OUTPUT/gpu-input02.ppm"); cpu = CPU("INPUT/input02.ppm", "OUTPUT/cpu-input02.ppm"); printf("\nINPUT/input02.ppm-----------------\n\nSpeedup: %.3f\n", (cpu / gpu)); gpu = GPU("INPUT/input03.ppm", "OUTPUT/gpu-input03.ppm"); cpu = CPU("INPUT/input03.ppm", "OUTPUT/cpu-input03.ppm"); printf("\nINPUT/input03.ppm-----------------\n\nSpeedup: %.3f\n", (cpu / gpu)); } int main(int argc, char *argv[]) { execute(); return 0; }
9aecfcc17552bdf41c6ccf2d0197ec689df6c600.hip
// !!! This is a file automatically generated by hipify!!! // Code adapted from stack oveflow: // https://stackoverflow.com/a/38749995 #include <stdio.h> #include <helper_cuda.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_texture_types.h> texture<float, hipTextureType3D, hipReadModeElementType> volumeTexIn; surface<void, 3> volumeTexOut; __global__ void surf_write(float *data, hipExtent volumeSize, float offset) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if (x >= volumeSize.width || y >= volumeSize.height || z >= volumeSize.depth) { return; } float output = data[z * (volumeSize.width * volumeSize.height) + y * (volumeSize.width) + x]; surf3Dwrite(output + offset, volumeTexOut, x * sizeof(float), y, z); } __global__ void tex_read(float x, float y, float z) { printf("texture: x: %f, y: %f, z:%f, val: %f\n", x, y, z, tex3D(volumeTexIn, x, y, z)); } __global__ void regular_read(int x, int y, int z, float * array) { printf("regular: x: %d, y: %d, z:%d, val: %f\n", x, y, z, array[x+y*8+z*64]); } void runtest(float *data, hipExtent vol, float x, float y, float z) { hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipArray_t content; checkCudaErrors(hipMalloc3DArray(&content, &channelDesc, vol, hipArraySurfaceLoadStore)); float *d_data; checkCudaErrors(hipMalloc(&d_data, vol.width * vol.height * vol.depth * sizeof(float))); checkCudaErrors(hipMemcpy(d_data, data, vol.width * vol.height * vol.depth * sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( regular_read), dim3(1), dim3(1), 0, 0, (int)x, (int)y, (int)z, d_data); // Binding the texture and surface checkCudaErrors(hipBindSurfaceToArray(volumeTexOut, content)); checkCudaErrors(hipBindTextureToArray(volumeTexIn, content)); dim3 blockSize(8, 8, 8); dim3 gridSize((vol.width+7)/8,(vol.height+7)/8, (vol.depth+7)/8); hipLaunchKernelGGL(( surf_write), dim3(gridSize), dim3(blockSize), 0, 0, d_data,vol, 0); volumeTexIn.filterMode = hipFilterModePoint; // Non interpolated hipLaunchKernelGGL(( tex_read), dim3(1), dim3(1), 0, 0, x, y, z); volumeTexIn.filterMode = hipFilterModeLinear; // Interpolated hipLaunchKernelGGL(( tex_read), dim3(1), dim3(1), 0, 0, x, y, z); hipLaunchKernelGGL(( surf_write), dim3(gridSize), dim3(blockSize), 0, 0, d_data,vol, 42); volumeTexIn.filterMode = hipFilterModePoint; // Non interpolated hipLaunchKernelGGL(( tex_read), dim3(1), dim3(1), 0, 0, x, y, z); volumeTexIn.filterMode = hipFilterModeLinear; // Interpolated hipLaunchKernelGGL(( tex_read), dim3(1), dim3(1), 0, 0, x, y, z); checkCudaErrors(hipDeviceSynchronize()); hipFreeArray(content); hipFree(d_data); return; } int main() { const int dim = 8; float *data = (float *)malloc(dim * dim * dim * sizeof(float)); for (int z = 0; z < dim; z++) for (int y = 0; y < dim; y++) for (int x = 0; x < dim; x++) { data[z * dim * dim + y * dim + x] = z * 100 + y * 10 + x; // printf("x: %d, y: %d, z:%d, val: %f\n", // x, y, z, data[z * dim * dim + y * dim + x]); } hipExtent vol = {dim, dim, dim}; runtest(data, vol, 1.5, 1.5, 1.5); runtest(data, vol, 1.9, 1.9, 1.9); runtest(data, vol, 5, 5, 5); free(data); return 0; }
9aecfcc17552bdf41c6ccf2d0197ec689df6c600.cu
// Code adapted from stack oveflow: // https://stackoverflow.com/a/38749995 #include <stdio.h> #include <helper_cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <cuda_texture_types.h> texture<float, cudaTextureType3D, cudaReadModeElementType> volumeTexIn; surface<void, 3> volumeTexOut; __global__ void surf_write(float *data, cudaExtent volumeSize, float offset) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if (x >= volumeSize.width || y >= volumeSize.height || z >= volumeSize.depth) { return; } float output = data[z * (volumeSize.width * volumeSize.height) + y * (volumeSize.width) + x]; surf3Dwrite(output + offset, volumeTexOut, x * sizeof(float), y, z); } __global__ void tex_read(float x, float y, float z) { printf("texture: x: %f, y: %f, z:%f, val: %f\n", x, y, z, tex3D(volumeTexIn, x, y, z)); } __global__ void regular_read(int x, int y, int z, float * array) { printf("regular: x: %d, y: %d, z:%d, val: %f\n", x, y, z, array[x+y*8+z*64]); } void runtest(float *data, cudaExtent vol, float x, float y, float z) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaArray_t content; checkCudaErrors(cudaMalloc3DArray(&content, &channelDesc, vol, cudaArraySurfaceLoadStore)); float *d_data; checkCudaErrors(cudaMalloc(&d_data, vol.width * vol.height * vol.depth * sizeof(float))); checkCudaErrors(cudaMemcpy(d_data, data, vol.width * vol.height * vol.depth * sizeof(float), cudaMemcpyHostToDevice)); regular_read<<<1, 1>>>((int)x, (int)y, (int)z, d_data); // Binding the texture and surface checkCudaErrors(cudaBindSurfaceToArray(volumeTexOut, content)); checkCudaErrors(cudaBindTextureToArray(volumeTexIn, content)); dim3 blockSize(8, 8, 8); dim3 gridSize((vol.width+7)/8,(vol.height+7)/8, (vol.depth+7)/8); surf_write<<<gridSize, blockSize>>>(d_data,vol, 0); volumeTexIn.filterMode = cudaFilterModePoint; // Non interpolated tex_read<<<1, 1>>>(x, y, z); volumeTexIn.filterMode = cudaFilterModeLinear; // Interpolated tex_read<<<1, 1>>>(x, y, z); surf_write<<<gridSize, blockSize>>>(d_data,vol, 42); volumeTexIn.filterMode = cudaFilterModePoint; // Non interpolated tex_read<<<1, 1>>>(x, y, z); volumeTexIn.filterMode = cudaFilterModeLinear; // Interpolated tex_read<<<1, 1>>>(x, y, z); checkCudaErrors(cudaDeviceSynchronize()); cudaFreeArray(content); cudaFree(d_data); return; } int main() { const int dim = 8; float *data = (float *)malloc(dim * dim * dim * sizeof(float)); for (int z = 0; z < dim; z++) for (int y = 0; y < dim; y++) for (int x = 0; x < dim; x++) { data[z * dim * dim + y * dim + x] = z * 100 + y * 10 + x; // printf("x: %d, y: %d, z:%d, val: %f\n", // x, y, z, data[z * dim * dim + y * dim + x]); } cudaExtent vol = {dim, dim, dim}; runtest(data, vol, 1.5, 1.5, 1.5); runtest(data, vol, 1.9, 1.9, 1.9); runtest(data, vol, 5, 5, 5); free(data); return 0; }
d22af9312d5f8b68271dcea7f49f7fc5a134fff0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 8192 #define THREAD_PER_BLOCK_SIDE_X 8 #define THREAD_PER_BLOCK_SIDE_Y 8 #define THREAD_PER_BLOCK THREAD_PER_BLOCK_SIDE_X*THREAD_PER_BLOCK_SIDE_Y #define TYPE double #define TYPE_S "double" __global__ void transpose(TYPE * in, TYPE * out, int size) { //int temp_side = THREAD_PER_BLOCK; __shared__ TYPE temp_matrix[THREAD_PER_BLOCK_SIDE_X][THREAD_PER_BLOCK_SIDE_Y]; int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; // copy submatrix (transposed) in shared memory temp_matrix[threadIdx.x][threadIdx.y] = in[row*size + col]; __syncthreads(); // copy submatrix in main memory out[col*size + row] = temp_matrix[threadIdx.x][threadIdx.y]; } int correct(TYPE* a, TYPE* b, int side) { int i; for(i=0; i<side*side; i++) if(a[i]!=b[(i%side)*side + i/side]) return 0; return 1; } int main() { TYPE * h_in, * h_out; TYPE * d_in, * d_out; int size = N*N; int size_in_memory = size * sizeof(TYPE); int i; // timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //allocate memory in host and device h_in = (TYPE *)malloc(size_in_memory); h_out = (TYPE *)malloc(size_in_memory); hipMalloc((void**)&d_in, size_in_memory); hipMalloc((void**)&d_out, size_in_memory); //fill matrix in host for(i = 0; i<size; i++) h_in[i] = i; //transfer matrix from host to device hipMemcpy(d_in, h_in, size_in_memory, hipMemcpyHostToDevice); //transpose matrix in device dim3 grid, block; block.x = THREAD_PER_BLOCK_SIDE_X; block.y = THREAD_PER_BLOCK_SIDE_Y; grid.x = N / block.x; grid.y = N / block.y; hipEventRecord(start); hipLaunchKernelGGL(( transpose), dim3(grid), dim3(block) , 0, 0, d_in, d_out, N); hipEventRecord(stop); // transfer matrix from device to host hipMemcpy(h_out, d_out, size_in_memory, hipMemcpyDeviceToHost); // correctness test printf("\ncorrecteness: %d \n", correct(h_in, h_out, N)); //free memory free(h_in); free(h_out); hipFree(d_in); hipFree(d_out); //showing Bandwidth hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("\nmatrix type: %s", TYPE_S); printf("\nblock: %d x %d", block.y, block.x); printf("\nmilliseconds: %f", milliseconds); printf("\nBandwidth: %f GB/s \n", 2*size_in_memory/milliseconds/1e6); return 0; }
d22af9312d5f8b68271dcea7f49f7fc5a134fff0.cu
#include <stdio.h> #define N 8192 #define THREAD_PER_BLOCK_SIDE_X 8 #define THREAD_PER_BLOCK_SIDE_Y 8 #define THREAD_PER_BLOCK THREAD_PER_BLOCK_SIDE_X*THREAD_PER_BLOCK_SIDE_Y #define TYPE double #define TYPE_S "double" __global__ void transpose(TYPE * in, TYPE * out, int size) { //int temp_side = THREAD_PER_BLOCK; __shared__ TYPE temp_matrix[THREAD_PER_BLOCK_SIDE_X][THREAD_PER_BLOCK_SIDE_Y]; int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; // copy submatrix (transposed) in shared memory temp_matrix[threadIdx.x][threadIdx.y] = in[row*size + col]; __syncthreads(); // copy submatrix in main memory out[col*size + row] = temp_matrix[threadIdx.x][threadIdx.y]; } int correct(TYPE* a, TYPE* b, int side) { int i; for(i=0; i<side*side; i++) if(a[i]!=b[(i%side)*side + i/side]) return 0; return 1; } int main() { TYPE * h_in, * h_out; TYPE * d_in, * d_out; int size = N*N; int size_in_memory = size * sizeof(TYPE); int i; // timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //allocate memory in host and device h_in = (TYPE *)malloc(size_in_memory); h_out = (TYPE *)malloc(size_in_memory); cudaMalloc((void**)&d_in, size_in_memory); cudaMalloc((void**)&d_out, size_in_memory); //fill matrix in host for(i = 0; i<size; i++) h_in[i] = i; //transfer matrix from host to device cudaMemcpy(d_in, h_in, size_in_memory, cudaMemcpyHostToDevice); //transpose matrix in device dim3 grid, block; block.x = THREAD_PER_BLOCK_SIDE_X; block.y = THREAD_PER_BLOCK_SIDE_Y; grid.x = N / block.x; grid.y = N / block.y; cudaEventRecord(start); transpose<<< grid, block >>>(d_in, d_out, N); cudaEventRecord(stop); // transfer matrix from device to host cudaMemcpy(h_out, d_out, size_in_memory, cudaMemcpyDeviceToHost); // correctness test printf("\ncorrecteness: %d \n", correct(h_in, h_out, N)); //free memory free(h_in); free(h_out); cudaFree(d_in); cudaFree(d_out); //showing Bandwidth cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("\nmatrix type: %s", TYPE_S); printf("\nblock: %d x %d", block.y, block.x); printf("\nmilliseconds: %f", milliseconds); printf("\nBandwidth: %f GB/s \n", 2*size_in_memory/milliseconds/1e6); return 0; }
ca1b81eb78871fcad5f4b2d7821cd2c4032b20ab.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cdf_max_layer_tester_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "neural_network_cuda_exception.h" #include "../cdf_max_layer.h" #include <memory> template<bool IS_MIN> __global__ void cdf_max_kernel( float * __restrict output, const float * __restrict input, int neuron_count, int entry_subsampling_size, int output_entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_entry_id = blockIdx.y * blockDim.y + threadIdx.y; if ((neuron_id < neuron_count) && (output_entry_id < output_entry_count)) { int input_offset = output_entry_id * neuron_count * entry_subsampling_size + neuron_id; float product = 1.0F; #pragma unroll 4 for(int i = 0; i < entry_subsampling_size; ++i) { float val = input[input_offset]; if (IS_MIN) product *= (1.0F - val); else product *= val; input_offset += neuron_count; } if (IS_MIN) product = 1.0F - product; output[output_entry_id * neuron_count + neuron_id] = product; } } namespace nnforge { namespace cuda { void cdf_max_layer_tester_cuda::enqueue_forward_propagation( hipStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_entry, entry_count, 1); if (is_min) hipLaunchKernelGGL(( cdf_max_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_buffer, *input_buffers[0], output_elem_count_per_entry, entry_subsampling_size, entry_count); else hipLaunchKernelGGL(( cdf_max_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_buffer, *input_buffers[0], output_elem_count_per_entry, entry_subsampling_size, entry_count); } void cdf_max_layer_tester_cuda::tester_configured() { std::shared_ptr<const cdf_max_layer> layer_derived = std::dynamic_pointer_cast<const cdf_max_layer>(layer_schema); entry_subsampling_size = layer_derived->entry_subsampling_size; is_min = layer_derived->is_min; } } }
ca1b81eb78871fcad5f4b2d7821cd2c4032b20ab.cu
/* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cdf_max_layer_tester_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "neural_network_cuda_exception.h" #include "../cdf_max_layer.h" #include <memory> template<bool IS_MIN> __global__ void cdf_max_kernel( float * __restrict output, const float * __restrict input, int neuron_count, int entry_subsampling_size, int output_entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_entry_id = blockIdx.y * blockDim.y + threadIdx.y; if ((neuron_id < neuron_count) && (output_entry_id < output_entry_count)) { int input_offset = output_entry_id * neuron_count * entry_subsampling_size + neuron_id; float product = 1.0F; #pragma unroll 4 for(int i = 0; i < entry_subsampling_size; ++i) { float val = input[input_offset]; if (IS_MIN) product *= (1.0F - val); else product *= val; input_offset += neuron_count; } if (IS_MIN) product = 1.0F - product; output[output_entry_id * neuron_count + neuron_id] = product; } } namespace nnforge { namespace cuda { void cdf_max_layer_tester_cuda::enqueue_forward_propagation( cudaStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_entry, entry_count, 1); if (is_min) cdf_max_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_buffer, *input_buffers[0], output_elem_count_per_entry, entry_subsampling_size, entry_count); else cdf_max_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_buffer, *input_buffers[0], output_elem_count_per_entry, entry_subsampling_size, entry_count); } void cdf_max_layer_tester_cuda::tester_configured() { std::shared_ptr<const cdf_max_layer> layer_derived = std::dynamic_pointer_cast<const cdf_max_layer>(layer_schema); entry_subsampling_size = layer_derived->entry_subsampling_size; is_min = layer_derived->is_min; } } }
8b75b56c60487cee86a5c404642cbcf92cd53445.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } #define TB 256 #define EPS 0.1 #undef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #undef MAX #define MAX(a, b) ((a) > (b) ? (a) : (b)) __global__ void blend_kernel( float *A, float *BP, float *M, float *AP, float alpha, int c, int h, int w ) { int _id = blockIdx.x * blockDim.x + threadIdx.x; int size = h * w; if (_id < c * size) { // _id = dc * size + id int id = _id % size, dc = _id / size; // int x = id % w, y = id / w; float weight = M[id] < 0.05f ? 0.f : alpha; AP[dc * size + id] = A[dc * size + id] * weight + BP[dc * size + id] * (1.f - weight); } return ; }
8b75b56c60487cee86a5c404642cbcf92cd53445.cu
#include "includes.h" extern "C" { } #define TB 256 #define EPS 0.1 #undef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #undef MAX #define MAX(a, b) ((a) > (b) ? (a) : (b)) __global__ void blend_kernel( float *A, float *BP, float *M, float *AP, float alpha, int c, int h, int w ) { int _id = blockIdx.x * blockDim.x + threadIdx.x; int size = h * w; if (_id < c * size) { // _id = dc * size + id int id = _id % size, dc = _id / size; // int x = id % w, y = id / w; float weight = M[id] < 0.05f ? 0.f : alpha; AP[dc * size + id] = A[dc * size + id] * weight + BP[dc * size + id] * (1.f - weight); } return ; }
4c15fa1930a0140b114a5b5684f35b9d2e26dde8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" #define BLOCKSIZE 512 namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void scan(const int n, const int _d, int * idata, int * odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[idx] = idx >= _d ? idata[idx - _d] + idata[idx] : idata[idx]; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { timer().startGpuTimer(); int dSize, dLen; int *dev_idata, *dev_odata; dLen = 1 << ilog2ceil(n); dSize = dLen * sizeof(int); dim3 blocksPerGrid((dLen + BLOCKSIZE - 1) / BLOCKSIZE); dim3 threadsPerBlocks(BLOCKSIZE); hipMalloc((void**)&dev_idata, dSize); hipMalloc((void**)&dev_odata, dSize); hipMemcpy(dev_idata, idata, dSize, hipMemcpyHostToDevice); for (int _d = 1; _d < dLen; _d <<= 1) { hipLaunchKernelGGL(( scan) , dim3(blocksPerGrid), dim3(threadsPerBlocks) , 0, 0, n, _d, dev_idata, dev_odata); std::swap(dev_idata, dev_odata); } hipMemcpy(odata + 1, dev_idata, dSize - sizeof(int), hipMemcpyDeviceToHost); odata[0] = 0; hipFree(dev_idata); hipFree(dev_odata); timer().endGpuTimer(); } } }
4c15fa1930a0140b114a5b5684f35b9d2e26dde8.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" #define BLOCKSIZE 512 namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void scan(const int n, const int _d, int * idata, int * odata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= n) { return; } odata[idx] = idx >= _d ? idata[idx - _d] + idata[idx] : idata[idx]; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { timer().startGpuTimer(); int dSize, dLen; int *dev_idata, *dev_odata; dLen = 1 << ilog2ceil(n); dSize = dLen * sizeof(int); dim3 blocksPerGrid((dLen + BLOCKSIZE - 1) / BLOCKSIZE); dim3 threadsPerBlocks(BLOCKSIZE); cudaMalloc((void**)&dev_idata, dSize); cudaMalloc((void**)&dev_odata, dSize); cudaMemcpy(dev_idata, idata, dSize, cudaMemcpyHostToDevice); for (int _d = 1; _d < dLen; _d <<= 1) { scan <<<blocksPerGrid, threadsPerBlocks >>>(n, _d, dev_idata, dev_odata); std::swap(dev_idata, dev_odata); } cudaMemcpy(odata + 1, dev_idata, dSize - sizeof(int), cudaMemcpyDeviceToHost); odata[0] = 0; cudaFree(dev_idata); cudaFree(dev_odata); timer().endGpuTimer(); } } }
cc66cdd60840257a739de013634ede718092c381.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*********************************************** streamcluster_cuda.cu : parallelized code of streamcluster - original code from PARSEC Benchmark Suite - parallelization with CUDA API has been applied by Shawn Sang-Ha Lee - [email protected] University of Virginia Department of Electrical and Computer Engineering Department of Computer Science ***********************************************/ #include "streamcluster_header.cu" using namespace std; // AUTO-ERROR CHECK FOR ALL CUDA FUNCTIONS #define CUDA_SAFE_CALL( call) do { \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } while (0) #define THREADS_PER_BLOCK 512 #define MAXBLOCKS 65536 //#define CUDATIME // host memory float *work_mem_h; float *coord_h; // device memory float *work_mem_d; float *coord_d; int *center_table_d; bool *switch_membership_d; Point *p; static int iter = 0; // counter for total# of iteration //======================================= // Euclidean Distance //======================================= __device__ float d_dist(int p1, int p2, int num, int dim, float *coord_d) { float retval = 0.0; for(int i = 0; i < dim; i++){ float tmp = coord_d[(i*num)+p1] - coord_d[(i*num)+p2]; retval += tmp * tmp; } return retval; } //======================================= // Kernel - Compute Cost //======================================= __global__ void kernel_compute_cost(int num, int dim, long x, Point *p, int K, int stride, float *coord_d, float *work_mem_d, int *center_table_d, bool *switch_membership_d) { // block ID and global thread ID const int bid = blockIdx.x + gridDim.x * blockIdx.y; const int tid = blockDim.x * bid + threadIdx.x; if(tid < num) { float *lower = &work_mem_d[tid*stride]; // cost between this point and point[x]: euclidean distance multiplied by weight float x_cost = d_dist(tid, x, num, dim, coord_d) * p[tid].weight; // if computed cost is less then original (it saves), mark it as to reassign if ( x_cost < p[tid].cost ) { switch_membership_d[tid] = 1; lower[K] += x_cost - p[tid].cost; } // if computed cost is larger, save the difference else { lower[center_table_d[p[tid].assign]] += p[tid].cost - x_cost; } } } //======================================= // Allocate Device Memory //======================================= void allocDevMem(int num, int dim) { CUDA_SAFE_CALL( hipMalloc((void**) &center_table_d, num * sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc((void**) &switch_membership_d, num * sizeof(bool)) ); CUDA_SAFE_CALL( hipMalloc((void**) &p, num * sizeof(Point)) ); CUDA_SAFE_CALL( hipMalloc((void**) &coord_d, num * dim * sizeof(float)) ); } //======================================= // Allocate Host Memory //======================================= void allocHostMem(int num, int dim) { coord_h = (float*) malloc( num * dim * sizeof(float) ); } //======================================= // Free Device Memory //======================================= void freeDevMem() { CUDA_SAFE_CALL( hipFree(center_table_d) ); CUDA_SAFE_CALL( hipFree(switch_membership_d) ); CUDA_SAFE_CALL( hipFree(p) ); CUDA_SAFE_CALL( hipFree(coord_d) ); } //======================================= // Free Host Memory //======================================= void freeHostMem() { free(coord_h); } //======================================= // pgain Entry - CUDA SETUP + CUDA CALL //======================================= float pgain( long x, Points *points, float z, long int *numcenters, int kmax, bool *is_center, int *center_table, bool *switch_membership, bool isCoordChanged, double *serial_t, double *cpu_to_gpu_t, double *gpu_to_cpu_t, double *alloc_t, double *kernel_t, double *free_t) { #ifdef CUDATIME float tmp_t; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); #endif hipError_t error; int stride = *numcenters + 1; // size of each work_mem segment int K = *numcenters ; // number of centers int num = points->num; // number of points int dim = points->dim; // number of dimension int nThread = num; // number of threads == number of data points //========================================= // ALLOCATE HOST MEMORY + DATA PREPARATION //========================================= work_mem_h = (float*) malloc(stride * (nThread + 1) * sizeof(float) ); // Only on the first iteration if(iter == 0) { allocHostMem(num, dim); } // build center-index table int count = 0; for( int i=0; i<num; i++) { if( is_center[i] ) { center_table[i] = count++; } } // Extract 'coord' // Only if first iteration OR coord has changed if(isCoordChanged || iter == 0) { for(int i=0; i<dim; i++) { for(int j=0; j<num; j++) { coord_h[ (num*i)+j ] = points->p[j].coord[i]; } } } #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *serial_t += (double) tmp_t; hipEventRecord(start,0); #endif //======================================= // ALLOCATE GPU MEMORY //======================================= CUDA_SAFE_CALL( hipMalloc((void**) &work_mem_d, stride * (nThread + 1) * sizeof(float)) ); // Only on the first iteration if( iter == 0 ) { allocDevMem(num, dim); } #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *alloc_t += (double) tmp_t; hipEventRecord(start,0); #endif //======================================= // CPU-TO-GPU MEMORY COPY //======================================= // Only if first iteration OR coord has changed if(isCoordChanged || iter == 0) { CUDA_SAFE_CALL( hipMemcpy(coord_d, coord_h, num * dim * sizeof(float), hipMemcpyHostToDevice) ); } CUDA_SAFE_CALL( hipMemcpy(center_table_d, center_table, num * sizeof(int), hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemcpy(p, points->p, num * sizeof(Point), hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemset((void*) switch_membership_d, 0, num * sizeof(bool)) ); CUDA_SAFE_CALL( hipMemset((void*) work_mem_d, 0, stride * (nThread + 1) * sizeof(float)) ); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *cpu_to_gpu_t += (double) tmp_t; hipEventRecord(start,0); #endif //======================================= // KERNEL: CALCULATE COST //======================================= // Determine the number of thread blocks in the x- and y-dimension int num_blocks = (int) ((float) (num + THREADS_PER_BLOCK - 1) / (float) THREADS_PER_BLOCK); int num_blocks_y = (int) ((float) (num_blocks + MAXBLOCKS - 1) / (float) MAXBLOCKS); int num_blocks_x = (int) ((float) (num_blocks+num_blocks_y - 1) / (float) num_blocks_y); dim3 grid_size(num_blocks_x, num_blocks_y, 1); BENCHMARK.start_kernel(); hipLaunchKernelGGL(( kernel_compute_cost), dim3(grid_size), dim3(THREADS_PER_BLOCK), 0, 0, num, // in: # of data dim, // in: dimension of point coordinates x, // in: point to open a center at p, // in: data point array K, // in: number of centers stride, // in: size of each work_mem segment coord_d, // in: array of point coordinates work_mem_d, // out: cost and lower field array center_table_d, // in: center index table switch_membership_d // out: changes in membership ); BENCHMARK.end_kernel(); hipDeviceSynchronize(); // error check error = hipGetLastError(); if (error != hipSuccess) { printf("kernel error: %s\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *kernel_t += (double) tmp_t; hipEventRecord(start,0); #endif //======================================= // GPU-TO-CPU MEMORY COPY //======================================= CUDA_SAFE_CALL( hipMemcpy(work_mem_h, work_mem_d, stride * (nThread + 1) * sizeof(float), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipMemcpy(switch_membership, switch_membership_d, num * sizeof(bool), hipMemcpyDeviceToHost) ); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *gpu_to_cpu_t += (double) tmp_t; hipEventRecord(start,0); #endif //======================================= // CPU (SERIAL) WORK //======================================= int number_of_centers_to_close = 0; float gl_cost_of_opening_x = z; float *gl_lower = &work_mem_h[stride * nThread]; // compute the number of centers to close if we are to open i for(int i=0; i < num; i++) { if( is_center[i] ) { float low = z; for( int j = 0; j < num; j++ ) { low += work_mem_h[ j*stride + center_table[i] ]; } gl_lower[center_table[i]] = low; if ( low > 0 ) { ++number_of_centers_to_close; work_mem_h[i*stride+K] -= low; } } gl_cost_of_opening_x += work_mem_h[i*stride+K]; } //if opening a center at x saves cost (i.e. cost is negative) do so; otherwise, do nothing if ( gl_cost_of_opening_x < 0 ) { for(int i = 0; i < num; i++) { bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ; if ( switch_membership[i] || close_center ) { points->p[i].cost = dist(points->p[i], points->p[x], dim) * points->p[i].weight; points->p[i].assign = x; } } for(int i = 0; i < num; i++) { if( is_center[i] && gl_lower[center_table[i]] > 0 ) { is_center[i] = false; } } if( x >= 0 && x < num) { is_center[x] = true; } *numcenters = *numcenters + 1 - number_of_centers_to_close; } else { gl_cost_of_opening_x = 0; } //======================================= // DEALLOCATE HOST MEMORY //======================================= free(work_mem_h); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *serial_t += (double) tmp_t; hipEventRecord(start,0); #endif //======================================= // DEALLOCATE GPU MEMORY //======================================= CUDA_SAFE_CALL( hipFree(work_mem_d) ); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *free_t += (double) tmp_t; #endif iter++; return -gl_cost_of_opening_x; }
cc66cdd60840257a739de013634ede718092c381.cu
/*********************************************** streamcluster_cuda.cu : parallelized code of streamcluster - original code from PARSEC Benchmark Suite - parallelization with CUDA API has been applied by Shawn Sang-Ha Lee - [email protected] University of Virginia Department of Electrical and Computer Engineering Department of Computer Science ***********************************************/ #include "streamcluster_header.cu" using namespace std; // AUTO-ERROR CHECK FOR ALL CUDA FUNCTIONS #define CUDA_SAFE_CALL( call) do { \ cudaError err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } while (0) #define THREADS_PER_BLOCK 512 #define MAXBLOCKS 65536 //#define CUDATIME // host memory float *work_mem_h; float *coord_h; // device memory float *work_mem_d; float *coord_d; int *center_table_d; bool *switch_membership_d; Point *p; static int iter = 0; // counter for total# of iteration //======================================= // Euclidean Distance //======================================= __device__ float d_dist(int p1, int p2, int num, int dim, float *coord_d) { float retval = 0.0; for(int i = 0; i < dim; i++){ float tmp = coord_d[(i*num)+p1] - coord_d[(i*num)+p2]; retval += tmp * tmp; } return retval; } //======================================= // Kernel - Compute Cost //======================================= __global__ void kernel_compute_cost(int num, int dim, long x, Point *p, int K, int stride, float *coord_d, float *work_mem_d, int *center_table_d, bool *switch_membership_d) { // block ID and global thread ID const int bid = blockIdx.x + gridDim.x * blockIdx.y; const int tid = blockDim.x * bid + threadIdx.x; if(tid < num) { float *lower = &work_mem_d[tid*stride]; // cost between this point and point[x]: euclidean distance multiplied by weight float x_cost = d_dist(tid, x, num, dim, coord_d) * p[tid].weight; // if computed cost is less then original (it saves), mark it as to reassign if ( x_cost < p[tid].cost ) { switch_membership_d[tid] = 1; lower[K] += x_cost - p[tid].cost; } // if computed cost is larger, save the difference else { lower[center_table_d[p[tid].assign]] += p[tid].cost - x_cost; } } } //======================================= // Allocate Device Memory //======================================= void allocDevMem(int num, int dim) { CUDA_SAFE_CALL( cudaMalloc((void**) &center_table_d, num * sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc((void**) &switch_membership_d, num * sizeof(bool)) ); CUDA_SAFE_CALL( cudaMalloc((void**) &p, num * sizeof(Point)) ); CUDA_SAFE_CALL( cudaMalloc((void**) &coord_d, num * dim * sizeof(float)) ); } //======================================= // Allocate Host Memory //======================================= void allocHostMem(int num, int dim) { coord_h = (float*) malloc( num * dim * sizeof(float) ); } //======================================= // Free Device Memory //======================================= void freeDevMem() { CUDA_SAFE_CALL( cudaFree(center_table_d) ); CUDA_SAFE_CALL( cudaFree(switch_membership_d) ); CUDA_SAFE_CALL( cudaFree(p) ); CUDA_SAFE_CALL( cudaFree(coord_d) ); } //======================================= // Free Host Memory //======================================= void freeHostMem() { free(coord_h); } //======================================= // pgain Entry - CUDA SETUP + CUDA CALL //======================================= float pgain( long x, Points *points, float z, long int *numcenters, int kmax, bool *is_center, int *center_table, bool *switch_membership, bool isCoordChanged, double *serial_t, double *cpu_to_gpu_t, double *gpu_to_cpu_t, double *alloc_t, double *kernel_t, double *free_t) { #ifdef CUDATIME float tmp_t; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); #endif cudaError_t error; int stride = *numcenters + 1; // size of each work_mem segment int K = *numcenters ; // number of centers int num = points->num; // number of points int dim = points->dim; // number of dimension int nThread = num; // number of threads == number of data points //========================================= // ALLOCATE HOST MEMORY + DATA PREPARATION //========================================= work_mem_h = (float*) malloc(stride * (nThread + 1) * sizeof(float) ); // Only on the first iteration if(iter == 0) { allocHostMem(num, dim); } // build center-index table int count = 0; for( int i=0; i<num; i++) { if( is_center[i] ) { center_table[i] = count++; } } // Extract 'coord' // Only if first iteration OR coord has changed if(isCoordChanged || iter == 0) { for(int i=0; i<dim; i++) { for(int j=0; j<num; j++) { coord_h[ (num*i)+j ] = points->p[j].coord[i]; } } } #ifdef CUDATIME cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&tmp_t, start, stop); *serial_t += (double) tmp_t; cudaEventRecord(start,0); #endif //======================================= // ALLOCATE GPU MEMORY //======================================= CUDA_SAFE_CALL( cudaMalloc((void**) &work_mem_d, stride * (nThread + 1) * sizeof(float)) ); // Only on the first iteration if( iter == 0 ) { allocDevMem(num, dim); } #ifdef CUDATIME cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&tmp_t, start, stop); *alloc_t += (double) tmp_t; cudaEventRecord(start,0); #endif //======================================= // CPU-TO-GPU MEMORY COPY //======================================= // Only if first iteration OR coord has changed if(isCoordChanged || iter == 0) { CUDA_SAFE_CALL( cudaMemcpy(coord_d, coord_h, num * dim * sizeof(float), cudaMemcpyHostToDevice) ); } CUDA_SAFE_CALL( cudaMemcpy(center_table_d, center_table, num * sizeof(int), cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaMemcpy(p, points->p, num * sizeof(Point), cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaMemset((void*) switch_membership_d, 0, num * sizeof(bool)) ); CUDA_SAFE_CALL( cudaMemset((void*) work_mem_d, 0, stride * (nThread + 1) * sizeof(float)) ); #ifdef CUDATIME cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&tmp_t, start, stop); *cpu_to_gpu_t += (double) tmp_t; cudaEventRecord(start,0); #endif //======================================= // KERNEL: CALCULATE COST //======================================= // Determine the number of thread blocks in the x- and y-dimension int num_blocks = (int) ((float) (num + THREADS_PER_BLOCK - 1) / (float) THREADS_PER_BLOCK); int num_blocks_y = (int) ((float) (num_blocks + MAXBLOCKS - 1) / (float) MAXBLOCKS); int num_blocks_x = (int) ((float) (num_blocks+num_blocks_y - 1) / (float) num_blocks_y); dim3 grid_size(num_blocks_x, num_blocks_y, 1); BENCHMARK.start_kernel(); kernel_compute_cost<<<grid_size, THREADS_PER_BLOCK>>>( num, // in: # of data dim, // in: dimension of point coordinates x, // in: point to open a center at p, // in: data point array K, // in: number of centers stride, // in: size of each work_mem segment coord_d, // in: array of point coordinates work_mem_d, // out: cost and lower field array center_table_d, // in: center index table switch_membership_d // out: changes in membership ); BENCHMARK.end_kernel(); cudaThreadSynchronize(); // error check error = cudaGetLastError(); if (error != cudaSuccess) { printf("kernel error: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } #ifdef CUDATIME cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&tmp_t, start, stop); *kernel_t += (double) tmp_t; cudaEventRecord(start,0); #endif //======================================= // GPU-TO-CPU MEMORY COPY //======================================= CUDA_SAFE_CALL( cudaMemcpy(work_mem_h, work_mem_d, stride * (nThread + 1) * sizeof(float), cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL( cudaMemcpy(switch_membership, switch_membership_d, num * sizeof(bool), cudaMemcpyDeviceToHost) ); #ifdef CUDATIME cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&tmp_t, start, stop); *gpu_to_cpu_t += (double) tmp_t; cudaEventRecord(start,0); #endif //======================================= // CPU (SERIAL) WORK //======================================= int number_of_centers_to_close = 0; float gl_cost_of_opening_x = z; float *gl_lower = &work_mem_h[stride * nThread]; // compute the number of centers to close if we are to open i for(int i=0; i < num; i++) { if( is_center[i] ) { float low = z; for( int j = 0; j < num; j++ ) { low += work_mem_h[ j*stride + center_table[i] ]; } gl_lower[center_table[i]] = low; if ( low > 0 ) { ++number_of_centers_to_close; work_mem_h[i*stride+K] -= low; } } gl_cost_of_opening_x += work_mem_h[i*stride+K]; } //if opening a center at x saves cost (i.e. cost is negative) do so; otherwise, do nothing if ( gl_cost_of_opening_x < 0 ) { for(int i = 0; i < num; i++) { bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ; if ( switch_membership[i] || close_center ) { points->p[i].cost = dist(points->p[i], points->p[x], dim) * points->p[i].weight; points->p[i].assign = x; } } for(int i = 0; i < num; i++) { if( is_center[i] && gl_lower[center_table[i]] > 0 ) { is_center[i] = false; } } if( x >= 0 && x < num) { is_center[x] = true; } *numcenters = *numcenters + 1 - number_of_centers_to_close; } else { gl_cost_of_opening_x = 0; } //======================================= // DEALLOCATE HOST MEMORY //======================================= free(work_mem_h); #ifdef CUDATIME cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&tmp_t, start, stop); *serial_t += (double) tmp_t; cudaEventRecord(start,0); #endif //======================================= // DEALLOCATE GPU MEMORY //======================================= CUDA_SAFE_CALL( cudaFree(work_mem_d) ); #ifdef CUDATIME cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&tmp_t, start, stop); *free_t += (double) tmp_t; #endif iter++; return -gl_cost_of_opening_x; }
462eae1b687577bd19802f677004ea3db1a95d9b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pointwise_scores.cuh" #include "split_properties_helpers.cuh" #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <cmath> #include <exception> #include <cfloat> namespace NKernel { class TSolarScoreCalcer { public: __host__ __device__ TSolarScoreCalcer(float) { } __forceinline__ __device__ void NextFeature(TCBinFeature) { Score = 0; } __forceinline__ __device__ void AddLeaf(double sum, double weight) { Score += (weight > 1e-20f ? (-sum * sum) * (1 + 2 * log(weight + 1.0)) / weight : 0); } __forceinline__ __device__ double GetScore() { return Score; } private: float Lambda = 0; float Score = 0; }; class TL2ScoreCalcer { public: __host__ __device__ TL2ScoreCalcer(float) { } __forceinline__ __device__ void NextFeature(TCBinFeature) { Score = 0; } __forceinline__ __device__ void AddLeaf(double sum, double weight) { Score += (weight > 1e-20f ? (-sum * sum) / weight : 0); } __forceinline__ __device__ double GetScore() { return Score; } private: float Score = 0; }; class TLOOL2ScoreCalcer { public: __host__ __device__ TLOOL2ScoreCalcer(float) { } __forceinline__ __device__ void NextFeature(TCBinFeature) { Score = 0; } __forceinline__ __device__ void AddLeaf(double sum, double weight) { float adjust = weight > 1 ? weight / (weight - 1) : 0; adjust = adjust * adjust; Score += (weight > 0 ? adjust * (-sum * sum) / weight : 0); } __forceinline__ __device__ double GetScore() { return Score; } private: float Score = 0; }; class TSatL2ScoreCalcer { public: __host__ __device__ TSatL2ScoreCalcer(float) { } __forceinline__ __device__ void NextFeature(TCBinFeature) { Score = 0; } __forceinline__ __device__ void AddLeaf(double sum, double weight) { float adjust = weight > 2 ? weight * (weight - 2)/(weight * weight - 3 * weight + 1) : 0; Score += (weight > 0 ? adjust * ((-sum * sum) / weight) : 0); } __forceinline__ __device__ double GetScore() { return Score; } private: float Score = 0; }; class TCorrelationScoreCalcer { public: __host__ __device__ TCorrelationScoreCalcer(float lambda, bool normalize, float scoreStdDev, ui64 globalSeed ) : Lambda(lambda) , Normalize(normalize) , ScoreStdDev(scoreStdDev) , GlobalSeed(globalSeed) { } __forceinline__ __device__ void NextFeature(TCBinFeature bf) { FeatureId = bf.FeatureId; Score = 0; DenumSqr = 1e-20f; } __forceinline__ __device__ void AddLeaf(double sum, double weight) { double lambda = Normalize ? Lambda * weight : Lambda; const float mu = weight > 0 ? (sum / (weight + lambda)) : 0.0f; Score += sum * mu; DenumSqr += weight * mu * mu; } __forceinline__ __device__ float GetScore() { float score = DenumSqr > 1e-15f ? -Score / sqrt(DenumSqr) : FLT_MAX; if (ScoreStdDev) { ui64 seed = GlobalSeed + FeatureId; AdvanceSeed(&seed, 4); score += NextNormal(&seed) * ScoreStdDev; } return score; } private: float Lambda; bool Normalize; float ScoreStdDev; ui64 GlobalSeed; int FeatureId = 0; float Score = 0; float DenumSqr = 0; }; template <int BLOCK_SIZE> __global__ void FindOptimalSplitSolarImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, int foldCount, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPartOffsetsHelper helper(foldCount); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } const float* current = binSums + 2 * (i + tid); float score = 0; for (int leaf = 0; leaf < pCount; leaf++) { float leftTotalWeight = 0; float rightTotalWeight = 0; float leftScore = 0; float rightScore = 0; #pragma unroll 4 for (int fold = 0; fold < foldCount; fold += 2) { TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold)); TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1)); float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2]; float weightEstimateRight = partLearn.Weight - weightEstimateLeft; float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1]; float sumEstimateRight = partLearn.Sum - sumEstimateLeft; float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2]; float weightTestRight = partTest.Weight - weightTestLeft; float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1]; float sumTestRight = partTest.Sum - sumTestLeft; { const float mu = weightEstimateLeft > 0.0f ? (sumEstimateLeft / (weightEstimateLeft + 1e-15f)) : 0; leftScore += -2 * mu * sumTestLeft + weightTestLeft * mu * mu; leftTotalWeight += weightTestLeft; } { const float mu = weightEstimateRight > 0.0f ? (sumEstimateRight / (weightEstimateRight + 1e-15f)) : 0; rightTotalWeight += weightTestRight; rightScore += -2 * mu * sumTestRight + weightTestRight * mu * mu; } } score += leftTotalWeight > 2 ? leftScore * (1 + 2 * log(leftTotalWeight + 1)) : 0; score += rightTotalWeight > 2 ? rightScore * (1 + 2 * log(rightTotalWeight + 1)) : 0; } if (score < bestScore) { bestScore = score; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if ( scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { result->FeatureId = bf[indices[0]].FeatureId; result->BinId = bf[indices[0]].BinId; result->Score = scores[0]; } } class TDirectHistLoader { public: __forceinline__ __device__ TDirectHistLoader(const float* binSums, TPartOffsetsHelper& helper, int binFeatureId, int /* leaf count*/, int binFeatureCount) : BinSums(binSums + 2 * binFeatureId) , Helper(helper) , BinFeatureCount(binFeatureCount) { } __forceinline__ __device__ float LoadWeight(int leaf) { return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2]; } __forceinline__ __device__ float LoadSum(int leaf) { return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2 + 1]; } private: const float* BinSums; TPartOffsetsHelper& Helper; int BinFeatureCount; }; class TGatheredByLeavesHistLoader { public: __forceinline__ __device__ TGatheredByLeavesHistLoader(const float* binSums, TPartOffsetsHelper&, int binFeatureId, int leafCount, int /*binFeatureCount*/) : BinSums(binSums) , LeafCount(leafCount) , FeatureId(binFeatureId) { } __forceinline__ __device__ int GetOffset(int leaf) { return 2 * (FeatureId * LeafCount + leaf); } __forceinline__ __device__ float LoadWeight(int leaf) { return BinSums[GetOffset(leaf)]; } __forceinline__ __device__ float LoadSum(int leaf) { return BinSums[GetOffset(leaf) + 1]; } private: const float* BinSums; int LeafCount; int FeatureId; }; template <int BLOCK_SIZE, class THistLoader, class TScoreCalcer> __global__ void FindOptimalSplitSingleFoldImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPartOffsetsHelper helper(1); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } calcer.NextFeature(bf[i + tid]); THistLoader histLoader(binSums, helper, i + tid, pCount, binFeatureCount); for (int leaf = 0; leaf < pCount; leaf++) { TPartitionStatistics part = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, 0)); float weightLeft = histLoader.LoadWeight(leaf); float weightRight = max(part.Weight - weightLeft, 0.0f); float sumLeft = histLoader.LoadSum(leaf); float sumRight = static_cast<float>(part.Sum - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); } const float score = calcer.GetScore(); if (score < bestScore) { bestScore = score; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if ( scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { result->FeatureId = bf[indices[0]].FeatureId; result->BinId = bf[indices[0]].BinId; result->Score = scores[0]; } } template <int BLOCK_SIZE> __global__ void FindOptimalSplitCorrelationImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, int foldCount, double l2, bool normalize, double scoreStdDev, ui64 globalSeed, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPartOffsetsHelper helper(foldCount); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } float score = 0; float denumSqr = 1e-20f; const float* current = binSums + 2 * (i + tid); for (int leaf = 0; leaf < pCount; leaf++) { #pragma unroll 4 for (int fold = 0; fold < foldCount; fold += 2) { TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold)); TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1)); float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2]; float weightEstimateRight = max(partLearn.Weight - weightEstimateLeft, 0.0f); float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1]; float sumEstimateRight = partLearn.Sum - sumEstimateLeft; float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2]; float weightTestRight = max(partTest.Weight - weightTestLeft, 0.0f); float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1]; float sumTestRight = partTest.Sum - sumTestLeft; { double lambda = normalize ? l2 * weightEstimateLeft : l2; const float mu = weightEstimateLeft > 0 ? (sumEstimateLeft / (weightEstimateLeft + lambda)) : 0; score += sumTestLeft * mu; denumSqr += weightTestLeft * mu * mu; } { double lambda = normalize ? l2 * weightEstimateRight : l2; const float mu = weightEstimateRight > 0 ? (sumEstimateRight / (weightEstimateRight + lambda)) : 0; score += sumTestRight * mu; denumSqr += weightTestRight * mu * mu; } } } score = denumSqr > 1e-15f ? -score / sqrt(denumSqr) : FLT_MAX; float tmp = score; if (scoreStdDev) { ui64 seed = globalSeed + bf[i + tid].FeatureId; AdvanceSeed(&seed, 4); tmp += NextNormal(&seed) * scoreStdDev; } if (tmp < bestScore) { bestScore = tmp; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { result->FeatureId = bf[indices[0]].FeatureId; result->BinId = bf[indices[0]].BinId; result->Score = scores[0]; } } void FindOptimalSplitDynamic(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; switch (scoreFunction) { case EScoreFunction::SolarL2: { FindOptimalSplitSolarImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result); break; } case EScoreFunction::Correlation: case EScoreFunction::NewtonCorrelation: { FindOptimalSplitCorrelationImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, l2, normalize, scoreStdDev, seed, result); break; } default: { throw std::exception(); } } } template <class TLoader> void FindOptimalSplitPlain(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; #define RUN() \ FindOptimalSplitSingleFoldImpl<blockSize, TLoader, TScoreCalcer> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Correlation: case EScoreFunction::NewtonCorrelation: { using TScoreCalcer = TCorrelationScoreCalcer; TCorrelationScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } void FindOptimalSplit(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, bool gatheredByLeaves, TCudaStream stream) { if (binaryFeatureCount > 0) { if (foldCount == 1) { if (gatheredByLeaves) { using THistLoader = TGatheredByLeavesHistLoader; FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } else { using THistLoader = TDirectHistLoader; FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } } else { FindOptimalSplitDynamic(binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } } } template <int BLOCK_SIZE, int HIST_COUNT> __global__ void GatherHistogramsByLeavesImpl(const int binFeatureCount, const float* histogram, const int histCount, const int leafCount, const int foldCount, float* result) { const int featuresPerBlock = BLOCK_SIZE / leafCount; const int featureId = blockIdx.x * featuresPerBlock + threadIdx.x / leafCount; const int leafId = threadIdx.x & (leafCount - 1); const int foldId = blockIdx.y; TPartOffsetsHelper helper(gridDim.y); if (featureId < binFeatureCount) { float leafVals[HIST_COUNT]; #pragma unroll for (int histId = 0; histId < HIST_COUNT; ++histId) { leafVals[histId] = LdgWithFallback(histogram, (featureId + (size_t)binFeatureCount * helper.GetHistogramOffset(leafId, foldId)) * HIST_COUNT + histId); } #pragma unroll for (int histId = 0; histId < HIST_COUNT; ++histId) { const ui64 idx = ((size_t)featureId * leafCount * foldCount + leafId * foldCount + foldId) * HIST_COUNT + histId; result[idx] = leafVals[histId]; } } } bool GatherHistogramByLeaves(const float* histogram, const ui32 binFeatureCount, const ui32 histCount, const ui32 leafCount, const ui32 foldCount, float* result, TCudaStream stream ) { const int blockSize = 1024; dim3 numBlocks; numBlocks.x = (binFeatureCount + (blockSize / leafCount) - 1) / (blockSize / leafCount); numBlocks.y = foldCount; numBlocks.z = 1; switch (histCount) { case 1: { hipLaunchKernelGGL(( GatherHistogramsByLeavesImpl<blockSize, 1>) , dim3(numBlocks), dim3(blockSize), 0, stream, binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } case 2: { hipLaunchKernelGGL(( GatherHistogramsByLeavesImpl<blockSize, 2>) , dim3(numBlocks), dim3(blockSize), 0, stream, binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } case 4: { hipLaunchKernelGGL(( GatherHistogramsByLeavesImpl<blockSize, 4>) , dim3(numBlocks), dim3(blockSize), 0, stream, binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } default: { return false; } } } template <int BLOCK_SIZE> __global__ void PartitionUpdateImpl(const float* target, const float* weights, const float* counts, const struct TDataPartition* parts, struct TPartitionStatistics* partStats) { const int tid = threadIdx.x; parts += blockIdx.x; partStats += blockIdx.x; const int size = parts->Size; __shared__ volatile double localBuffer[BLOCK_SIZE]; double tmp = 0; if (weights != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(weights + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } if (tid == 0) { partStats->Weight = tmp; } tmp = 0; __syncthreads(); if (target != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(target + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } if (tid == 0) { partStats->Sum = tmp; } tmp = 0; __syncthreads(); if (counts != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(counts + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } else { tmp = size; } if (tid == 0) { partStats->Count = tmp; } } void UpdatePartitionProps(const float* target, const float* weights, const float* counts, const struct TDataPartition* parts, struct TPartitionStatistics* partStats, int partsCount, TCudaStream stream ) { const int blockSize = 1024; if (partsCount) { PartitionUpdateImpl<blockSize> << < partsCount, blockSize, 0, stream >> > (target, weights, counts, parts, partStats); } } }
462eae1b687577bd19802f677004ea3db1a95d9b.cu
#include "pointwise_scores.cuh" #include "split_properties_helpers.cuh" #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <cmath> #include <exception> #include <cfloat> namespace NKernel { class TSolarScoreCalcer { public: __host__ __device__ TSolarScoreCalcer(float) { } __forceinline__ __device__ void NextFeature(TCBinFeature) { Score = 0; } __forceinline__ __device__ void AddLeaf(double sum, double weight) { Score += (weight > 1e-20f ? (-sum * sum) * (1 + 2 * log(weight + 1.0)) / weight : 0); } __forceinline__ __device__ double GetScore() { return Score; } private: float Lambda = 0; float Score = 0; }; class TL2ScoreCalcer { public: __host__ __device__ TL2ScoreCalcer(float) { } __forceinline__ __device__ void NextFeature(TCBinFeature) { Score = 0; } __forceinline__ __device__ void AddLeaf(double sum, double weight) { Score += (weight > 1e-20f ? (-sum * sum) / weight : 0); } __forceinline__ __device__ double GetScore() { return Score; } private: float Score = 0; }; class TLOOL2ScoreCalcer { public: __host__ __device__ TLOOL2ScoreCalcer(float) { } __forceinline__ __device__ void NextFeature(TCBinFeature) { Score = 0; } __forceinline__ __device__ void AddLeaf(double sum, double weight) { float adjust = weight > 1 ? weight / (weight - 1) : 0; adjust = adjust * adjust; Score += (weight > 0 ? adjust * (-sum * sum) / weight : 0); } __forceinline__ __device__ double GetScore() { return Score; } private: float Score = 0; }; class TSatL2ScoreCalcer { public: __host__ __device__ TSatL2ScoreCalcer(float) { } __forceinline__ __device__ void NextFeature(TCBinFeature) { Score = 0; } __forceinline__ __device__ void AddLeaf(double sum, double weight) { float adjust = weight > 2 ? weight * (weight - 2)/(weight * weight - 3 * weight + 1) : 0; Score += (weight > 0 ? adjust * ((-sum * sum) / weight) : 0); } __forceinline__ __device__ double GetScore() { return Score; } private: float Score = 0; }; class TCorrelationScoreCalcer { public: __host__ __device__ TCorrelationScoreCalcer(float lambda, bool normalize, float scoreStdDev, ui64 globalSeed ) : Lambda(lambda) , Normalize(normalize) , ScoreStdDev(scoreStdDev) , GlobalSeed(globalSeed) { } __forceinline__ __device__ void NextFeature(TCBinFeature bf) { FeatureId = bf.FeatureId; Score = 0; DenumSqr = 1e-20f; } __forceinline__ __device__ void AddLeaf(double sum, double weight) { double lambda = Normalize ? Lambda * weight : Lambda; const float mu = weight > 0 ? (sum / (weight + lambda)) : 0.0f; Score += sum * mu; DenumSqr += weight * mu * mu; } __forceinline__ __device__ float GetScore() { float score = DenumSqr > 1e-15f ? -Score / sqrt(DenumSqr) : FLT_MAX; if (ScoreStdDev) { ui64 seed = GlobalSeed + FeatureId; AdvanceSeed(&seed, 4); score += NextNormal(&seed) * ScoreStdDev; } return score; } private: float Lambda; bool Normalize; float ScoreStdDev; ui64 GlobalSeed; int FeatureId = 0; float Score = 0; float DenumSqr = 0; }; template <int BLOCK_SIZE> __global__ void FindOptimalSplitSolarImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, int foldCount, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPartOffsetsHelper helper(foldCount); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } const float* current = binSums + 2 * (i + tid); float score = 0; for (int leaf = 0; leaf < pCount; leaf++) { float leftTotalWeight = 0; float rightTotalWeight = 0; float leftScore = 0; float rightScore = 0; #pragma unroll 4 for (int fold = 0; fold < foldCount; fold += 2) { TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold)); TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1)); float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2]; float weightEstimateRight = partLearn.Weight - weightEstimateLeft; float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1]; float sumEstimateRight = partLearn.Sum - sumEstimateLeft; float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2]; float weightTestRight = partTest.Weight - weightTestLeft; float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1]; float sumTestRight = partTest.Sum - sumTestLeft; { const float mu = weightEstimateLeft > 0.0f ? (sumEstimateLeft / (weightEstimateLeft + 1e-15f)) : 0; leftScore += -2 * mu * sumTestLeft + weightTestLeft * mu * mu; leftTotalWeight += weightTestLeft; } { const float mu = weightEstimateRight > 0.0f ? (sumEstimateRight / (weightEstimateRight + 1e-15f)) : 0; rightTotalWeight += weightTestRight; rightScore += -2 * mu * sumTestRight + weightTestRight * mu * mu; } } score += leftTotalWeight > 2 ? leftScore * (1 + 2 * log(leftTotalWeight + 1)) : 0; score += rightTotalWeight > 2 ? rightScore * (1 + 2 * log(rightTotalWeight + 1)) : 0; } if (score < bestScore) { bestScore = score; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if ( scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { result->FeatureId = bf[indices[0]].FeatureId; result->BinId = bf[indices[0]].BinId; result->Score = scores[0]; } } class TDirectHistLoader { public: __forceinline__ __device__ TDirectHistLoader(const float* binSums, TPartOffsetsHelper& helper, int binFeatureId, int /* leaf count*/, int binFeatureCount) : BinSums(binSums + 2 * binFeatureId) , Helper(helper) , BinFeatureCount(binFeatureCount) { } __forceinline__ __device__ float LoadWeight(int leaf) { return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2]; } __forceinline__ __device__ float LoadSum(int leaf) { return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2 + 1]; } private: const float* BinSums; TPartOffsetsHelper& Helper; int BinFeatureCount; }; class TGatheredByLeavesHistLoader { public: __forceinline__ __device__ TGatheredByLeavesHistLoader(const float* binSums, TPartOffsetsHelper&, int binFeatureId, int leafCount, int /*binFeatureCount*/) : BinSums(binSums) , LeafCount(leafCount) , FeatureId(binFeatureId) { } __forceinline__ __device__ int GetOffset(int leaf) { return 2 * (FeatureId * LeafCount + leaf); } __forceinline__ __device__ float LoadWeight(int leaf) { return BinSums[GetOffset(leaf)]; } __forceinline__ __device__ float LoadSum(int leaf) { return BinSums[GetOffset(leaf) + 1]; } private: const float* BinSums; int LeafCount; int FeatureId; }; template <int BLOCK_SIZE, class THistLoader, class TScoreCalcer> __global__ void FindOptimalSplitSingleFoldImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPartOffsetsHelper helper(1); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } calcer.NextFeature(bf[i + tid]); THistLoader histLoader(binSums, helper, i + tid, pCount, binFeatureCount); for (int leaf = 0; leaf < pCount; leaf++) { TPartitionStatistics part = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, 0)); float weightLeft = histLoader.LoadWeight(leaf); float weightRight = max(part.Weight - weightLeft, 0.0f); float sumLeft = histLoader.LoadSum(leaf); float sumRight = static_cast<float>(part.Sum - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); } const float score = calcer.GetScore(); if (score < bestScore) { bestScore = score; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if ( scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { result->FeatureId = bf[indices[0]].FeatureId; result->BinId = bf[indices[0]].BinId; result->Score = scores[0]; } } template <int BLOCK_SIZE> __global__ void FindOptimalSplitCorrelationImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, int foldCount, double l2, bool normalize, double scoreStdDev, ui64 globalSeed, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPartOffsetsHelper helper(foldCount); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } float score = 0; float denumSqr = 1e-20f; const float* current = binSums + 2 * (i + tid); for (int leaf = 0; leaf < pCount; leaf++) { #pragma unroll 4 for (int fold = 0; fold < foldCount; fold += 2) { TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold)); TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1)); float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2]; float weightEstimateRight = max(partLearn.Weight - weightEstimateLeft, 0.0f); float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1]; float sumEstimateRight = partLearn.Sum - sumEstimateLeft; float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2]; float weightTestRight = max(partTest.Weight - weightTestLeft, 0.0f); float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1]; float sumTestRight = partTest.Sum - sumTestLeft; { double lambda = normalize ? l2 * weightEstimateLeft : l2; const float mu = weightEstimateLeft > 0 ? (sumEstimateLeft / (weightEstimateLeft + lambda)) : 0; score += sumTestLeft * mu; denumSqr += weightTestLeft * mu * mu; } { double lambda = normalize ? l2 * weightEstimateRight : l2; const float mu = weightEstimateRight > 0 ? (sumEstimateRight / (weightEstimateRight + lambda)) : 0; score += sumTestRight * mu; denumSqr += weightTestRight * mu * mu; } } } score = denumSqr > 1e-15f ? -score / sqrt(denumSqr) : FLT_MAX; float tmp = score; if (scoreStdDev) { ui64 seed = globalSeed + bf[i + tid].FeatureId; AdvanceSeed(&seed, 4); tmp += NextNormal(&seed) * scoreStdDev; } if (tmp < bestScore) { bestScore = tmp; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { result->FeatureId = bf[indices[0]].FeatureId; result->BinId = bf[indices[0]].BinId; result->Score = scores[0]; } } void FindOptimalSplitDynamic(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; switch (scoreFunction) { case EScoreFunction::SolarL2: { FindOptimalSplitSolarImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result); break; } case EScoreFunction::Correlation: case EScoreFunction::NewtonCorrelation: { FindOptimalSplitCorrelationImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, l2, normalize, scoreStdDev, seed, result); break; } default: { throw std::exception(); } } } template <class TLoader> void FindOptimalSplitPlain(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; #define RUN() \ FindOptimalSplitSingleFoldImpl<blockSize, TLoader, TScoreCalcer> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Correlation: case EScoreFunction::NewtonCorrelation: { using TScoreCalcer = TCorrelationScoreCalcer; TCorrelationScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } void FindOptimalSplit(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, bool gatheredByLeaves, TCudaStream stream) { if (binaryFeatureCount > 0) { if (foldCount == 1) { if (gatheredByLeaves) { using THistLoader = TGatheredByLeavesHistLoader; FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } else { using THistLoader = TDirectHistLoader; FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } } else { FindOptimalSplitDynamic(binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } } } template <int BLOCK_SIZE, int HIST_COUNT> __global__ void GatherHistogramsByLeavesImpl(const int binFeatureCount, const float* histogram, const int histCount, const int leafCount, const int foldCount, float* result) { const int featuresPerBlock = BLOCK_SIZE / leafCount; const int featureId = blockIdx.x * featuresPerBlock + threadIdx.x / leafCount; const int leafId = threadIdx.x & (leafCount - 1); const int foldId = blockIdx.y; TPartOffsetsHelper helper(gridDim.y); if (featureId < binFeatureCount) { float leafVals[HIST_COUNT]; #pragma unroll for (int histId = 0; histId < HIST_COUNT; ++histId) { leafVals[histId] = LdgWithFallback(histogram, (featureId + (size_t)binFeatureCount * helper.GetHistogramOffset(leafId, foldId)) * HIST_COUNT + histId); } #pragma unroll for (int histId = 0; histId < HIST_COUNT; ++histId) { const ui64 idx = ((size_t)featureId * leafCount * foldCount + leafId * foldCount + foldId) * HIST_COUNT + histId; result[idx] = leafVals[histId]; } } } bool GatherHistogramByLeaves(const float* histogram, const ui32 binFeatureCount, const ui32 histCount, const ui32 leafCount, const ui32 foldCount, float* result, TCudaStream stream ) { const int blockSize = 1024; dim3 numBlocks; numBlocks.x = (binFeatureCount + (blockSize / leafCount) - 1) / (blockSize / leafCount); numBlocks.y = foldCount; numBlocks.z = 1; switch (histCount) { case 1: { GatherHistogramsByLeavesImpl<blockSize, 1> <<<numBlocks, blockSize, 0, stream>>>(binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } case 2: { GatherHistogramsByLeavesImpl<blockSize, 2> <<<numBlocks, blockSize, 0, stream>>>(binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } case 4: { GatherHistogramsByLeavesImpl<blockSize, 4> <<<numBlocks, blockSize, 0, stream>>>(binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } default: { return false; } } } template <int BLOCK_SIZE> __global__ void PartitionUpdateImpl(const float* target, const float* weights, const float* counts, const struct TDataPartition* parts, struct TPartitionStatistics* partStats) { const int tid = threadIdx.x; parts += blockIdx.x; partStats += blockIdx.x; const int size = parts->Size; __shared__ volatile double localBuffer[BLOCK_SIZE]; double tmp = 0; if (weights != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(weights + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } if (tid == 0) { partStats->Weight = tmp; } tmp = 0; __syncthreads(); if (target != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(target + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } if (tid == 0) { partStats->Sum = tmp; } tmp = 0; __syncthreads(); if (counts != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(counts + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } else { tmp = size; } if (tid == 0) { partStats->Count = tmp; } } void UpdatePartitionProps(const float* target, const float* weights, const float* counts, const struct TDataPartition* parts, struct TPartitionStatistics* partStats, int partsCount, TCudaStream stream ) { const int blockSize = 1024; if (partsCount) { PartitionUpdateImpl<blockSize> << < partsCount, blockSize, 0, stream >> > (target, weights, counts, parts, partStats); } } }
52a929a6e4ba85098b897b4fb6a5df7489c75119.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void constant_mul_kernel(float *data_l, float constant, float *result) { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x); result[threadId] = data_l[threadId] * constant; result[threadId + 1] = data_l[threadId + 1] * constant; }
52a929a6e4ba85098b897b4fb6a5df7489c75119.cu
#include "includes.h" __global__ void constant_mul_kernel(float *data_l, float constant, float *result) { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x); result[threadId] = data_l[threadId] * constant; result[threadId + 1] = data_l[threadId + 1] * constant; }
871508357053458a710597033efdd2fd6f78798d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <float/float32.h> #include <Rinternals.h> #include "../nelder-mead/nelder_mead.hpp" #include "../Rmpi.h" #include "blas.hh" #include "cu_utils.hh" #include "mpicxx.hh" #include "nm_opts.hh" #include "restrict.h" template <typename REAL> struct lm_param_t { hipblasHandle_t handle; int m; int n; const REAL *restrict x; const int *restrict y; REAL *restrict w; // beta REAL *restrict work; REAL *restrict s; MPI_Comm comm; }; template <typename REAL> __global__ static void hinge_loss_sum(REAL *s, const int m, const int *const restrict y, const REAL *const restrict work) { int tid = threadIdx.x; int i = tid + blockIdx.x*blockDim.x; if (i >= m) return; __shared__ REAL temp[TPB]; temp[tid] = work[i] - y[i]; __syncthreads(); if (tid == 0) { REAL sum = 0.0; for (int i=0; i<TPB; i++) sum += temp[i]; sum *= sum; atomicAdd(s, sum); } } template <typename REAL> static inline REAL lm_cost(const lm_param_t<REAL> *restrict args) { const int m = args->m; const int n = args->n; const REAL *const restrict x = args->x; const int *const restrict y = args->y; const REAL *const restrict w = args->w; REAL *const restrict s = args->s; REAL *const restrict work = args->work; int check; REAL J; REAL s_cpu; int nb = get_num_blocks(m); // J_local = 1/(2*m) * sum( (x%*%w - y)^2 ) mvm(args->handle, m, n, x, w, work); hipMemset(s, 0, sizeof(*s)); hipLaunchKernelGGL(( hinge_loss_sum), dim3(nb), dim3(TPB), 0, 0, s, m, y, work); hipMemcpy(&s_cpu, s, sizeof(*s), hipMemcpyDeviceToHost); J = ((REAL) 0.5/m) * s_cpu; check = allreduce1(&J, args->comm); MPI_CHECK(args->comm, check); return J; } template <typename REAL> static inline void lm_nmwrap(int n, point_t<REAL> *point, const void *arg) { const lm_param_t<REAL> *restrict args = (const lm_param_t<REAL>*) arg; hipMemcpy(args->w, point->x, n*sizeof(REAL), hipMemcpyHostToDevice); point->fx = lm_cost(args); hipMemcpy(point->x, args->w, n*sizeof(REAL), hipMemcpyDeviceToHost); } template <typename REAL> static inline void lm(const int m, const int n, const REAL *const restrict x, const int *const restrict y, REAL *const restrict w, MPI_Comm comm, optimset_t<REAL> *const restrict optimset) { lm_param_t<REAL> args; point_t<REAL> start, solution; hipblasHandle_t handle; hipblasStatus_t st = hipblasCreate(&handle); if (st != HIPBLAS_STATUS_SUCCESS) error("hipblasCreate() failed\n"); hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST); REAL *x_gpu; int *y_gpu; REAL *w_gpu; REAL *work_gpu; REAL *s_gpu; hipMalloc(&x_gpu, m*n*sizeof(*x_gpu)); hipMalloc(&y_gpu, m*sizeof(*y_gpu)); hipMalloc(&w_gpu, n*sizeof(*w_gpu)); hipMalloc(&work_gpu, m*sizeof(*work_gpu)); hipMalloc(&s_gpu, sizeof(*s_gpu)); if (x_gpu == NULL || y_gpu == NULL || w_gpu == NULL || work_gpu == NULL || s_gpu == NULL) { CUFREE(x_gpu); CUFREE(y_gpu); CUFREE(w_gpu); CUFREE(work_gpu); CUFREE(s_gpu); error("Unable to allocate device memory"); } hipMemcpy(x_gpu, x, m*n*sizeof(*x), hipMemcpyHostToDevice); hipMemcpy(y_gpu, y, m*sizeof(*y), hipMemcpyHostToDevice); start.x = w; memset(w, 0, n*sizeof(*w)); args.handle = handle; args.m = m; args.n = n; args.x = x_gpu; args.y = y_gpu; args.w = w_gpu; args.s = s_gpu; args.work = work_gpu; args.comm = comm; nelder_mead(n, &start, &solution, &lm_nmwrap, &args, optimset); for (int i=0; i<n; i++) w[i] = solution.x[i]; hipblasDestroy(handle); hipFree(x_gpu); hipFree(y_gpu); hipFree(w_gpu); hipFree(work_gpu); free(solution.x); } extern "C" SEXP R_lm(SEXP x, SEXP y, SEXP maxiter, SEXP comm_) { SEXP ret, ret_names, w, niters; MPI_Comm comm = get_mpi_comm_from_Robj(comm_); const int m = nrows(x); const int n = ncols(x); PROTECT(ret = allocVector(VECSXP, 2)); PROTECT(ret_names = allocVector(STRSXP, 2)); PROTECT(niters = allocVector(INTSXP, 1)); SET_STRING_ELT(ret_names, 0, mkChar("w")); SET_STRING_ELT(ret_names, 1, mkChar("niters")); if (TYPEOF(x) == REALSXP) { PROTECT(w = allocVector(REALSXP, n)); optimset_t<double> opts; set_nm_opts(INTEGER(maxiter)[0], &opts); lm<double>(m, n, REAL(x), INTEGER(y), REAL(w), comm, &opts); } else if (TYPEOF(x) == INTSXP) { PROTECT(w = allocVector(INTSXP, n)); optimset_t<float> opts; set_nm_opts(INTEGER(maxiter)[0], &opts); lm<float>(m, n, FLOAT(x), INTEGER(y), FLOAT(w), comm, &opts); } SET_VECTOR_ELT(ret, 0, w); SET_VECTOR_ELT(ret, 1, niters); setAttrib(ret, R_NamesSymbol, ret_names); UNPROTECT(4); return ret; }
871508357053458a710597033efdd2fd6f78798d.cu
#include <cstdlib> #include <float/float32.h> #include <Rinternals.h> #include "../nelder-mead/nelder_mead.hpp" #include "../Rmpi.h" #include "blas.hh" #include "cu_utils.hh" #include "mpicxx.hh" #include "nm_opts.hh" #include "restrict.h" template <typename REAL> struct lm_param_t { cublasHandle_t handle; int m; int n; const REAL *restrict x; const int *restrict y; REAL *restrict w; // beta REAL *restrict work; REAL *restrict s; MPI_Comm comm; }; template <typename REAL> __global__ static void hinge_loss_sum(REAL *s, const int m, const int *const restrict y, const REAL *const restrict work) { int tid = threadIdx.x; int i = tid + blockIdx.x*blockDim.x; if (i >= m) return; __shared__ REAL temp[TPB]; temp[tid] = work[i] - y[i]; __syncthreads(); if (tid == 0) { REAL sum = 0.0; for (int i=0; i<TPB; i++) sum += temp[i]; sum *= sum; atomicAdd(s, sum); } } template <typename REAL> static inline REAL lm_cost(const lm_param_t<REAL> *restrict args) { const int m = args->m; const int n = args->n; const REAL *const restrict x = args->x; const int *const restrict y = args->y; const REAL *const restrict w = args->w; REAL *const restrict s = args->s; REAL *const restrict work = args->work; int check; REAL J; REAL s_cpu; int nb = get_num_blocks(m); // J_local = 1/(2*m) * sum( (x%*%w - y)^2 ) mvm(args->handle, m, n, x, w, work); cudaMemset(s, 0, sizeof(*s)); hinge_loss_sum<<<nb, TPB>>>(s, m, y, work); cudaMemcpy(&s_cpu, s, sizeof(*s), cudaMemcpyDeviceToHost); J = ((REAL) 0.5/m) * s_cpu; check = allreduce1(&J, args->comm); MPI_CHECK(args->comm, check); return J; } template <typename REAL> static inline void lm_nmwrap(int n, point_t<REAL> *point, const void *arg) { const lm_param_t<REAL> *restrict args = (const lm_param_t<REAL>*) arg; cudaMemcpy(args->w, point->x, n*sizeof(REAL), cudaMemcpyHostToDevice); point->fx = lm_cost(args); cudaMemcpy(point->x, args->w, n*sizeof(REAL), cudaMemcpyDeviceToHost); } template <typename REAL> static inline void lm(const int m, const int n, const REAL *const restrict x, const int *const restrict y, REAL *const restrict w, MPI_Comm comm, optimset_t<REAL> *const restrict optimset) { lm_param_t<REAL> args; point_t<REAL> start, solution; cublasHandle_t handle; cublasStatus_t st = cublasCreate_v2(&handle); if (st != CUBLAS_STATUS_SUCCESS) error("cublasCreate() failed\n"); cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST); REAL *x_gpu; int *y_gpu; REAL *w_gpu; REAL *work_gpu; REAL *s_gpu; cudaMalloc(&x_gpu, m*n*sizeof(*x_gpu)); cudaMalloc(&y_gpu, m*sizeof(*y_gpu)); cudaMalloc(&w_gpu, n*sizeof(*w_gpu)); cudaMalloc(&work_gpu, m*sizeof(*work_gpu)); cudaMalloc(&s_gpu, sizeof(*s_gpu)); if (x_gpu == NULL || y_gpu == NULL || w_gpu == NULL || work_gpu == NULL || s_gpu == NULL) { CUFREE(x_gpu); CUFREE(y_gpu); CUFREE(w_gpu); CUFREE(work_gpu); CUFREE(s_gpu); error("Unable to allocate device memory"); } cudaMemcpy(x_gpu, x, m*n*sizeof(*x), cudaMemcpyHostToDevice); cudaMemcpy(y_gpu, y, m*sizeof(*y), cudaMemcpyHostToDevice); start.x = w; memset(w, 0, n*sizeof(*w)); args.handle = handle; args.m = m; args.n = n; args.x = x_gpu; args.y = y_gpu; args.w = w_gpu; args.s = s_gpu; args.work = work_gpu; args.comm = comm; nelder_mead(n, &start, &solution, &lm_nmwrap, &args, optimset); for (int i=0; i<n; i++) w[i] = solution.x[i]; cublasDestroy_v2(handle); cudaFree(x_gpu); cudaFree(y_gpu); cudaFree(w_gpu); cudaFree(work_gpu); free(solution.x); } extern "C" SEXP R_lm(SEXP x, SEXP y, SEXP maxiter, SEXP comm_) { SEXP ret, ret_names, w, niters; MPI_Comm comm = get_mpi_comm_from_Robj(comm_); const int m = nrows(x); const int n = ncols(x); PROTECT(ret = allocVector(VECSXP, 2)); PROTECT(ret_names = allocVector(STRSXP, 2)); PROTECT(niters = allocVector(INTSXP, 1)); SET_STRING_ELT(ret_names, 0, mkChar("w")); SET_STRING_ELT(ret_names, 1, mkChar("niters")); if (TYPEOF(x) == REALSXP) { PROTECT(w = allocVector(REALSXP, n)); optimset_t<double> opts; set_nm_opts(INTEGER(maxiter)[0], &opts); lm<double>(m, n, REAL(x), INTEGER(y), REAL(w), comm, &opts); } else if (TYPEOF(x) == INTSXP) { PROTECT(w = allocVector(INTSXP, n)); optimset_t<float> opts; set_nm_opts(INTEGER(maxiter)[0], &opts); lm<float>(m, n, FLOAT(x), INTEGER(y), FLOAT(w), comm, &opts); } SET_VECTOR_ELT(ret, 0, w); SET_VECTOR_ELT(ret, 1, niters); setAttrib(ret, R_NamesSymbol, ret_names); UNPROTECT(4); return ret; }
4b701738ce19b329e54c4eabc4545c5e4a5bca04.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file image_random.cu * \brief GPU Implementation of image transformation operators */ #include <hip/hip_runtime_api.h> #include "./image_random-inl.h" #include "../elemwise_op_common.h" namespace mxnet { namespace op { namespace image { using namespace mshadow; // ToTensor Kernel for 3D input template<typename xpu, typename Dtype> __global__ void ToTensorCudaKernel(const Tensor<xpu, 3, Dtype> input, const Tensor<xpu, 3, float> output, const int req, const int N, const int H, const int W, const int C, const float normalize_factor) { // We process one image per thread block. // In 3D case, we have only 1 block i.e., blockIdx.x // We do not use it. for (int c = 0; c < C; ++c) { for (int h = threadIdx.y; h < H; h += blockDim.y) { for (int w = threadIdx.x; w < W; w += blockDim.x) { KERNEL_ASSIGN(output[c][h][w], req, input[h][w][c] / normalize_factor); } } } } // ToTensor Kernel for 4D input template<typename xpu, typename Dtype> __global__ void ToTensorCudaKernel(const Tensor<xpu, 4, Dtype> input, const Tensor<xpu, 4, float> output, const int req, const int N, const int H, const int W, const int C, const float normalize_factor) { // We process one image per thread block. const int n = blockIdx.x; for (int c = 0; c < C; ++c) { for (int h = threadIdx.y; h < H; h += blockDim.y) { for (int w = threadIdx.x; w < W; w += blockDim.x) { KERNEL_ASSIGN(output[n][c][h][w], req, input[n][h][w][c] / normalize_factor); } } } } template<typename DType, typename T1, typename T2> void ToTensorImplCUDA(mshadow::Stream<gpu> *s, const T1 input, const T2 output, const int req, const float normalize_factor) { int blocks, H, W, C, N; hipStream_t stream = mshadow::Stream<gpu>::GetStream(s); if (std::is_same<T1, Tensor<gpu, 3, DType>>::value) { // 3D Input - (H, W, C) N = 0; H = input.size(0); W = input.size(1); C = input.size(2); blocks = 1; } else { // 4D Input - (N, H, W, C) N = input.size(0); H = input.size(1); W = input.size(2); C = input.size(3); blocks = N > 0 ? N : 1; blocks = N; } // One block per image. // Number of threads = (32, 32) is optimal, because, // computation is minimal and overhead of CUDA preparing // all threads is minimal. hipLaunchKernelGGL(( ToTensorCudaKernel<gpu, DType>) , dim3(blocks), dim3(dim3(32, 32)), 0, stream, input, output, req, N, H, W, C, normalize_factor); MSHADOW_CUDA_POST_KERNEL_CHECK(ToTensorCudaKernel); } NNVM_REGISTER_OP(_image_to_tensor) .set_attr<FCompute>("FCompute<gpu>", ToTensorOpForward<gpu>); NNVM_REGISTER_OP(_image_normalize) .set_attr<FCompute>("FCompute<gpu>", NormalizeOpForward<gpu>); NNVM_REGISTER_OP(_backward_image_normalize) .set_attr<FCompute>("FCompute<gpu>", NormalizeOpBackward<gpu>); } // namespace image } // namespace op } // namespace mxnet
4b701738ce19b329e54c4eabc4545c5e4a5bca04.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file image_random.cu * \brief GPU Implementation of image transformation operators */ #include <cuda_runtime_api.h> #include "./image_random-inl.h" #include "../elemwise_op_common.h" namespace mxnet { namespace op { namespace image { using namespace mshadow; // ToTensor Kernel for 3D input template<typename xpu, typename Dtype> __global__ void ToTensorCudaKernel(const Tensor<xpu, 3, Dtype> input, const Tensor<xpu, 3, float> output, const int req, const int N, const int H, const int W, const int C, const float normalize_factor) { // We process one image per thread block. // In 3D case, we have only 1 block i.e., blockIdx.x // We do not use it. for (int c = 0; c < C; ++c) { for (int h = threadIdx.y; h < H; h += blockDim.y) { for (int w = threadIdx.x; w < W; w += blockDim.x) { KERNEL_ASSIGN(output[c][h][w], req, input[h][w][c] / normalize_factor); } } } } // ToTensor Kernel for 4D input template<typename xpu, typename Dtype> __global__ void ToTensorCudaKernel(const Tensor<xpu, 4, Dtype> input, const Tensor<xpu, 4, float> output, const int req, const int N, const int H, const int W, const int C, const float normalize_factor) { // We process one image per thread block. const int n = blockIdx.x; for (int c = 0; c < C; ++c) { for (int h = threadIdx.y; h < H; h += blockDim.y) { for (int w = threadIdx.x; w < W; w += blockDim.x) { KERNEL_ASSIGN(output[n][c][h][w], req, input[n][h][w][c] / normalize_factor); } } } } template<typename DType, typename T1, typename T2> void ToTensorImplCUDA(mshadow::Stream<gpu> *s, const T1 input, const T2 output, const int req, const float normalize_factor) { int blocks, H, W, C, N; cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s); if (std::is_same<T1, Tensor<gpu, 3, DType>>::value) { // 3D Input - (H, W, C) N = 0; H = input.size(0); W = input.size(1); C = input.size(2); blocks = 1; } else { // 4D Input - (N, H, W, C) N = input.size(0); H = input.size(1); W = input.size(2); C = input.size(3); blocks = N > 0 ? N : 1; blocks = N; } // One block per image. // Number of threads = (32, 32) is optimal, because, // computation is minimal and overhead of CUDA preparing // all threads is minimal. ToTensorCudaKernel<gpu, DType> <<<blocks, dim3(32, 32), 0, stream>>>(input, output, req, N, H, W, C, normalize_factor); MSHADOW_CUDA_POST_KERNEL_CHECK(ToTensorCudaKernel); } NNVM_REGISTER_OP(_image_to_tensor) .set_attr<FCompute>("FCompute<gpu>", ToTensorOpForward<gpu>); NNVM_REGISTER_OP(_image_normalize) .set_attr<FCompute>("FCompute<gpu>", NormalizeOpForward<gpu>); NNVM_REGISTER_OP(_backward_image_normalize) .set_attr<FCompute>("FCompute<gpu>", NormalizeOpBackward<gpu>); } // namespace image } // namespace op } // namespace mxnet
3087cef793d94fb7afa0adaa2e860bec259efd7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define MODE_MANDEL 1 #define MODE_MANDEL_DISTANCE 2 #define MODE_JULIA 3 #define WIDTH gridDim.x*blockDim.x #define HEIGHT gridDim.y*blockDim.y #define X ((blockIdx.x * blockDim.x) + threadIdx.x) #define Y ((blockIdx.y * blockDim.y) + threadIdx.y) extern "C" __global__ void compute( int *iters, double *lastValuesR, double *lastValuesI, double *distancesR, double *distancesI, const int mode, const int4 tile, const double4 area, const double2 julia, const int maxIterations, const double sqrEscapeRadius ) { if (X >= tile.z || Y >= tile.w) { // tile.z is width of tile, tile.w is height of tile return; } const float x = (float)area.x + X * (float)area.z; const float y = (float)area.y + Y * (float)area.w; const float cr = mode == MODE_JULIA ? julia.x : x; const float ci = mode == MODE_JULIA ? julia.y : y; const float escape = sqrEscapeRadius; float zr = x; float zi = y; float new_zr = 0.0f; // distance float dr = 1; float di = 0; float new_dr; int count = 0; for (; count < maxIterations; count++) { const float zrsqr = zr * zr; const float zisqr = zi * zi; if ((zrsqr + zisqr) >= escape) { break; } if (mode == MODE_MANDEL_DISTANCE) { new_dr = 2.0f * (zr * dr - zi * di) + 1.0f; di = 2.0f * (zr * di + zi * dr); dr = new_dr; } new_zr = (zrsqr - zisqr) + cr; zi = ((2.0f * zr) * zi) + ci; zr = new_zr; //If in a periodic orbit, assume it is trapped if (zr == 0.0 && zi == 0.0) { count = maxIterations; break; } } const int tIndex = X + Y * tile.z; // tile.z is width of tile iters[tIndex] = count; lastValuesR[tIndex] = (double) zr; lastValuesI[tIndex] = (double) zi; if (mode == MODE_MANDEL_DISTANCE) { distancesR[tIndex] = (double) dr; distancesI[tIndex] = (double) di; } }
3087cef793d94fb7afa0adaa2e860bec259efd7f.cu
#include <stdio.h> #define MODE_MANDEL 1 #define MODE_MANDEL_DISTANCE 2 #define MODE_JULIA 3 #define WIDTH gridDim.x*blockDim.x #define HEIGHT gridDim.y*blockDim.y #define X ((blockIdx.x * blockDim.x) + threadIdx.x) #define Y ((blockIdx.y * blockDim.y) + threadIdx.y) extern "C" __global__ void compute( int *iters, double *lastValuesR, double *lastValuesI, double *distancesR, double *distancesI, const int mode, const int4 tile, const double4 area, const double2 julia, const int maxIterations, const double sqrEscapeRadius ) { if (X >= tile.z || Y >= tile.w) { // tile.z is width of tile, tile.w is height of tile return; } const float x = (float)area.x + X * (float)area.z; const float y = (float)area.y + Y * (float)area.w; const float cr = mode == MODE_JULIA ? julia.x : x; const float ci = mode == MODE_JULIA ? julia.y : y; const float escape = sqrEscapeRadius; float zr = x; float zi = y; float new_zr = 0.0f; // distance float dr = 1; float di = 0; float new_dr; int count = 0; for (; count < maxIterations; count++) { const float zrsqr = zr * zr; const float zisqr = zi * zi; if ((zrsqr + zisqr) >= escape) { break; } if (mode == MODE_MANDEL_DISTANCE) { new_dr = 2.0f * (zr * dr - zi * di) + 1.0f; di = 2.0f * (zr * di + zi * dr); dr = new_dr; } new_zr = (zrsqr - zisqr) + cr; zi = ((2.0f * zr) * zi) + ci; zr = new_zr; //If in a periodic orbit, assume it is trapped if (zr == 0.0 && zi == 0.0) { count = maxIterations; break; } } const int tIndex = X + Y * tile.z; // tile.z is width of tile iters[tIndex] = count; lastValuesR[tIndex] = (double) zr; lastValuesI[tIndex] = (double) zi; if (mode == MODE_MANDEL_DISTANCE) { distancesR[tIndex] = (double) dr; distancesI[tIndex] = (double) di; } }
10e5f1a3b39c1e7ce2e53078d0f8b899c0ee5b92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "CImg.h" using namespace cimg_library; #define MIN(a,b) ((a < b) ? a : b) #define MAX(a,b) ((a > b) ? a : b) hipError_t redFilterCUDA(const char *fname); hipError_t greenFilterCUDA(const char *fname); hipError_t blueFilterCUDA(const char *fname); hipError_t yellowFilterCUDA(const char *fname); hipError_t magentaFilterCUDA(const char *fname); hipError_t cyanFilterCUDA(const char *fname); hipError_t grayscaleLightnessCUDA(const char *fname); hipError_t grayscaleAverageCUDA(const char *fname); hipError_t grayscaleLuminosityRCUDA(const char *fname); hipError_t grayscaleLuminosityGCUDA(const char *fname); hipError_t grayscaleLuminosityBCUDA(const char *fname); /* Color filters should be applied to grayscaled images. It is possible to create a grayscale image and then apply the filter. */ __global__ void redFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[(y + h) * w + x] = 0; img[(y + h + h) * w + x] = 0; } __global__ void greenFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[y * w + x] = 0; img[(y + h + h) * w + x] = 0; } __global__ void blueFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[y * w + x] = 0; img[(y + h) * w + x] = 0; } __global__ void yellowFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[(y + h + h) * w + x] = 0; } __global__ void magentaFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[(y + h) * w + x] = 0; } __global__ void cyanFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[y * w + x] = 0; } __global__ void grayscaleLightnessKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; int R = img[y * w + x]; int G = img[(y + h) * w + x]; int B = img[(y + h + h) * w + x]; int gray = (MAX(R, MAX(G, B)) + MIN(R, MIN(G, B))) / 2; img[y * w + x] = img[(y + h) * w + x] = img[(y + h + h) * w + x] = gray; } __global__ void grayscaleAverageKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; // R, G, B values for pixel are assigned average of said R, G, B values; img[y * w + x] = img[(y + h) * w + x] = img[(y + h + h) * w + x] = (img[y * w + x] + img[(y + h) * w + x] + img[(y + h + h) * w + x]) / 3; } /* General Luminosity: (0.21 R + 0.72 G + 0.07 B) / 3 (people are more sensitive to green) */ __global__ void grayscaleLuminosityRKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; // R, G, B values for pixel are assigned scaled average of said R, G, B values; img[y * w + x] = img[(y + h) * w + x] = img[(y + h + h) * w + x] = (img[y * w + x] * 0.72 + img[(y + h) * w + x] * 0.21 + img[(y + h + h) * w + x] * 0.07) / 3; } __global__ void grayscaleLuminosityGKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; // R, G, B values for pixel are assigned scaled average of said R, G, B values; img[y * w + x] = img[(y + h) * w + x] = img[(y + h + h) * w + x] = (img[y * w + x] * 0.21 + img[(y + h) * w + x] * 0.72 + img[(y + h + h) * w + x] * 0.07) / 3; } __global__ void grayscaleLuminosityBKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; // R, G, B values for pixel are assigned scaled average of said R, G, B values; img[y * w + x] = img[(y + h) * w + x] = img[(y + h + h) * w + x] = (img[y * w + x] * 0.07 + img[(y + h) * w + x] * 0.21 + img[(y + h + h) * w + x] * 0.72) / 3; } int checkFunctionCudaSuccess(hipError_t status, char *func) { int rc = 0; if (status != hipSuccess) { fprintf(stderr, "%s failed!", func); rc = 1; } return rc; } int main(int argc, char *argv[]) { hipError_t cudaStatus; // Adds red filter in parallel cudaStatus = redFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "redFilterCUDA")) { return 1; } // Adds green filter in parallel cudaStatus = greenFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "greenFilterCUDA")) { return 1; } // Adds blue filter in parallel cudaStatus = blueFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "blueFilterCUDA")) { return 1; } // Adds yellow filter in parallel cudaStatus = yellowFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "yellowFilterCUDA")) { return 1; } // Adds magenta filter in parallel cudaStatus = magentaFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "magentaFilterCUDA")) { return 1; } // Adds cyan filter in parallel cudaStatus = cyanFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "cyanFilterCUDA")) { return 1; } // Turn image grayscale using Lightness method in parallel cudaStatus = grayscaleLightnessCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "grayscaleLightnessCUDA")) { return 1; } // Turn image grayscale using Average method in parallel cudaStatus = grayscaleAverageCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "grayscaleAverageCUDA")) { return 1; } // Turn image grayscale using Luminosity R method in parallel cudaStatus = grayscaleLuminosityRCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "grayscaleLuminosityRCUDA")) { return 1; } // Turn image grayscale using Luminosity G method in parallel cudaStatus = grayscaleLuminosityGCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "grayscaleLuminosityGCUDA")) { return 1; } // Turn image grayscale using Luminosity B method in parallel cudaStatus = grayscaleLuminosityBCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "grayscaleLuminosityBCUDA")) { return 1; } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } hipError_t redFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( redFilterKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "redFilterKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching redFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/redFilter.bmp"); Error: hipFree(src_kptr); return cudaStatus; } hipError_t greenFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( greenFilterKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "greenFilterKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching redFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/greenFilter.bmp"); Error: hipFree(src_kptr); return cudaStatus; } hipError_t blueFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( blueFilterKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "blueFilterKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching blueFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/blueFilter.bmp"); Error: hipFree(src_kptr); return cudaStatus; } hipError_t yellowFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( yellowFilterKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "yellowFilterKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching yellowFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/yellowFilter.bmp"); Error: hipFree(src_kptr); return cudaStatus; } hipError_t magentaFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( magentaFilterKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "magentaFilterKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching magentaFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/magentaFilter.bmp"); Error: hipFree(src_kptr); return cudaStatus; } hipError_t cyanFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( cyanFilterKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "cyanFilterKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching cyanFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/cyanFilter.bmp"); Error: hipFree(src_kptr); return cudaStatus; } // Helper function for using CUDA to turn an image grayscale using Lightness method in parallel. hipError_t grayscaleLightnessCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( grayscaleLightnessKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "grayscaleLightnessKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching removeRKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/grayscaleLightness.bmp"); Error: hipFree(src_kptr); return cudaStatus; } // Helper function for using CUDA to turn an image grayscale using Average method in parallel. hipError_t grayscaleAverageCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( grayscaleAverageKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "grayscaleAverageKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching removeRKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/grayscaleAverage.bmp"); Error: hipFree(src_kptr); return cudaStatus; } // Helper function for using CUDA to turn an image grayscale using Luminosity R method in parallel. hipError_t grayscaleLuminosityRCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( grayscaleLuminosityRKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "grayscaleLuminosityRKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching removeRKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/grayscaleLuminosityR.bmp"); Error: hipFree(src_kptr); return cudaStatus; } // Helper function for using CUDA to turn an image grayscale using Luminosity G method in parallel. hipError_t grayscaleLuminosityGCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( grayscaleLuminosityGKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "grayscaleLuminosityGKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching removeRKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/grayscaleLuminosityG.bmp"); Error: hipFree(src_kptr); return cudaStatus; } // Helper function for using CUDA to turn an image grayscale using Luminosity B method in parallel. hipError_t grayscaleLuminosityBCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = hipMalloc((void**)&src_kptr, s); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(src_kptr, src_ptr, s, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); hipLaunchKernelGGL(( grayscaleLuminosityBKernel), dim3(grdDim), dim3(blkDim), 0, 0, src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "grayscaleLuminosityBKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching removeRKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(src_ptr, src_kptr, s, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } src.save("./images/grayscaleLuminosityB.bmp"); Error: hipFree(src_kptr); return cudaStatus; }
10e5f1a3b39c1e7ce2e53078d0f8b899c0ee5b92.cu
#include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "CImg.h" using namespace cimg_library; #define MIN(a,b) ((a < b) ? a : b) #define MAX(a,b) ((a > b) ? a : b) cudaError_t redFilterCUDA(const char *fname); cudaError_t greenFilterCUDA(const char *fname); cudaError_t blueFilterCUDA(const char *fname); cudaError_t yellowFilterCUDA(const char *fname); cudaError_t magentaFilterCUDA(const char *fname); cudaError_t cyanFilterCUDA(const char *fname); cudaError_t grayscaleLightnessCUDA(const char *fname); cudaError_t grayscaleAverageCUDA(const char *fname); cudaError_t grayscaleLuminosityRCUDA(const char *fname); cudaError_t grayscaleLuminosityGCUDA(const char *fname); cudaError_t grayscaleLuminosityBCUDA(const char *fname); /* Color filters should be applied to grayscaled images. It is possible to create a grayscale image and then apply the filter. */ __global__ void redFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[(y + h) * w + x] = 0; img[(y + h + h) * w + x] = 0; } __global__ void greenFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[y * w + x] = 0; img[(y + h + h) * w + x] = 0; } __global__ void blueFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[y * w + x] = 0; img[(y + h) * w + x] = 0; } __global__ void yellowFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[(y + h + h) * w + x] = 0; } __global__ void magentaFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[(y + h) * w + x] = 0; } __global__ void cyanFilterKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; img[y * w + x] = 0; } __global__ void grayscaleLightnessKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; int R = img[y * w + x]; int G = img[(y + h) * w + x]; int B = img[(y + h + h) * w + x]; int gray = (MAX(R, MAX(G, B)) + MIN(R, MIN(G, B))) / 2; img[y * w + x] = img[(y + h) * w + x] = img[(y + h + h) * w + x] = gray; } __global__ void grayscaleAverageKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; // R, G, B values for pixel are assigned average of said R, G, B values; img[y * w + x] = img[(y + h) * w + x] = img[(y + h + h) * w + x] = (img[y * w + x] + img[(y + h) * w + x] + img[(y + h + h) * w + x]) / 3; } /* General Luminosity: (0.21 R + 0.72 G + 0.07 B) / 3 (people are more sensitive to green) */ __global__ void grayscaleLuminosityRKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; // R, G, B values for pixel are assigned scaled average of said R, G, B values; img[y * w + x] = img[(y + h) * w + x] = img[(y + h + h) * w + x] = (img[y * w + x] * 0.72 + img[(y + h) * w + x] * 0.21 + img[(y + h + h) * w + x] * 0.07) / 3; } __global__ void grayscaleLuminosityGKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; // R, G, B values for pixel are assigned scaled average of said R, G, B values; img[y * w + x] = img[(y + h) * w + x] = img[(y + h + h) * w + x] = (img[y * w + x] * 0.21 + img[(y + h) * w + x] * 0.72 + img[(y + h + h) * w + x] * 0.07) / 3; } __global__ void grayscaleLuminosityBKernel(unsigned char *img, int w, int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; // R, G, B values for pixel are assigned scaled average of said R, G, B values; img[y * w + x] = img[(y + h) * w + x] = img[(y + h + h) * w + x] = (img[y * w + x] * 0.07 + img[(y + h) * w + x] * 0.21 + img[(y + h + h) * w + x] * 0.72) / 3; } int checkFunctionCudaSuccess(cudaError_t status, char *func) { int rc = 0; if (status != cudaSuccess) { fprintf(stderr, "%s failed!", func); rc = 1; } return rc; } int main(int argc, char *argv[]) { cudaError_t cudaStatus; // Adds red filter in parallel cudaStatus = redFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "redFilterCUDA")) { return 1; } // Adds green filter in parallel cudaStatus = greenFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "greenFilterCUDA")) { return 1; } // Adds blue filter in parallel cudaStatus = blueFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "blueFilterCUDA")) { return 1; } // Adds yellow filter in parallel cudaStatus = yellowFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "yellowFilterCUDA")) { return 1; } // Adds magenta filter in parallel cudaStatus = magentaFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "magentaFilterCUDA")) { return 1; } // Adds cyan filter in parallel cudaStatus = cyanFilterCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "cyanFilterCUDA")) { return 1; } // Turn image grayscale using Lightness method in parallel cudaStatus = grayscaleLightnessCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "grayscaleLightnessCUDA")) { return 1; } // Turn image grayscale using Average method in parallel cudaStatus = grayscaleAverageCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "grayscaleAverageCUDA")) { return 1; } // Turn image grayscale using Luminosity R method in parallel cudaStatus = grayscaleLuminosityRCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "grayscaleLuminosityRCUDA")) { return 1; } // Turn image grayscale using Luminosity G method in parallel cudaStatus = grayscaleLuminosityGCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "grayscaleLuminosityGCUDA")) { return 1; } // Turn image grayscale using Luminosity B method in parallel cudaStatus = grayscaleLuminosityBCUDA(argv[1]); if (checkFunctionCudaSuccess(cudaStatus, "grayscaleLuminosityBCUDA")) { return 1; } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } cudaError_t redFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); redFilterKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "redFilterKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching redFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/redFilter.bmp"); Error: cudaFree(src_kptr); return cudaStatus; } cudaError_t greenFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); greenFilterKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "greenFilterKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching redFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/greenFilter.bmp"); Error: cudaFree(src_kptr); return cudaStatus; } cudaError_t blueFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); blueFilterKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "blueFilterKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching blueFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/blueFilter.bmp"); Error: cudaFree(src_kptr); return cudaStatus; } cudaError_t yellowFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); yellowFilterKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "yellowFilterKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching yellowFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/yellowFilter.bmp"); Error: cudaFree(src_kptr); return cudaStatus; } cudaError_t magentaFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); magentaFilterKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "magentaFilterKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching magentaFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/magentaFilter.bmp"); Error: cudaFree(src_kptr); return cudaStatus; } cudaError_t cyanFilterCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); cyanFilterKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cyanFilterKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching cyanFilterKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/cyanFilter.bmp"); Error: cudaFree(src_kptr); return cudaStatus; } // Helper function for using CUDA to turn an image grayscale using Lightness method in parallel. cudaError_t grayscaleLightnessCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); grayscaleLightnessKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "grayscaleLightnessKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching removeRKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/grayscaleLightness.bmp"); Error: cudaFree(src_kptr); return cudaStatus; } // Helper function for using CUDA to turn an image grayscale using Average method in parallel. cudaError_t grayscaleAverageCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); grayscaleAverageKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "grayscaleAverageKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching removeRKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/grayscaleAverage.bmp"); Error: cudaFree(src_kptr); return cudaStatus; } // Helper function for using CUDA to turn an image grayscale using Luminosity R method in parallel. cudaError_t grayscaleLuminosityRCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); grayscaleLuminosityRKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "grayscaleLuminosityRKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching removeRKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/grayscaleLuminosityR.bmp"); Error: cudaFree(src_kptr); return cudaStatus; } // Helper function for using CUDA to turn an image grayscale using Luminosity G method in parallel. cudaError_t grayscaleLuminosityGCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); grayscaleLuminosityGKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "grayscaleLuminosityGKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching removeRKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/grayscaleLuminosityG.bmp"); Error: cudaFree(src_kptr); return cudaStatus; } // Helper function for using CUDA to turn an image grayscale using Luminosity B method in parallel. cudaError_t grayscaleLuminosityBCUDA(const char *fname) { CImg<unsigned char> src(fname); int w = src.width(); int h = src.height(); unsigned long s = src.size(); unsigned char *src_ptr = src.data(); unsigned char *src_kptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output). cudaStatus = cudaMalloc((void**)&src_kptr, s); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(src_kptr, src_ptr, s, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch kernel for 256 blocks and a multitude of threads based on image size. dim3 blkDim(16, 16, 1); dim3 grdDim((w + 15) / 16, (h + 15) / 16, 1); grayscaleLuminosityBKernel<<<grdDim, blkDim>>>(src_kptr, w, h); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "grayscaleLuminosityBKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching removeRKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(src_ptr, src_kptr, s, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } src.save("./images/grayscaleLuminosityB.bmp"); Error: cudaFree(src_kptr); return cudaStatus; }
c307d6faec068597eaff494061348e3b1af6437c.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" #include "THHApply.cuh" #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> template <typename Dtype, typename Acctype> struct abs_functor { __host__ __device__ Acctype operator()(const Dtype& x, const Dtype& y) const { Dtype z = x-y; return ScalarConvert<Dtype, Acctype>::to(z >= 0 ? z : -z); } }; template <typename Dtype> struct abs_updateOutput_no_reduce_functor { __host__ __device__ void operator()(const Dtype* x, const Dtype* y, Dtype *out) { Dtype z = *x - *y; *out = z >= 0 ? z : -z; } }; template <typename Dtype> struct abs_updateGradInput_no_reduce_functor { __forceinline__ __host__ __device__ void operator()( const Dtype *x, const Dtype *y, Dtype *gradInput) { *gradInput = ScalarConvert<int, Dtype>::to(*x >= *y ? 1 : -1); } }; template <typename Dtype> struct abs_updateGradInput_functor { const Dtype norm; const Dtype gradOutput; abs_updateGradInput_functor(Dtype norm_, Dtype gradOutput_) : norm(norm_), gradOutput(gradOutput_) {} __host__ __device__ Dtype operator()(const Dtype& x, const Dtype& y) const { return ((x - y) >= 0 ? norm : -norm) * gradOutput; } }; #include "generic/AbsCriterion.cu" #include "THHGenerateFloatTypes.h"
c307d6faec068597eaff494061348e3b1af6437c.cu
#include "THCUNN.h" #include "common.h" #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" #include "THCApply.cuh" #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> template <typename Dtype, typename Acctype> struct abs_functor { __host__ __device__ Acctype operator()(const Dtype& x, const Dtype& y) const { Dtype z = x-y; return ScalarConvert<Dtype, Acctype>::to(z >= 0 ? z : -z); } }; template <typename Dtype> struct abs_updateOutput_no_reduce_functor { __host__ __device__ void operator()(const Dtype* x, const Dtype* y, Dtype *out) { Dtype z = *x - *y; *out = z >= 0 ? z : -z; } }; template <typename Dtype> struct abs_updateGradInput_no_reduce_functor { __forceinline__ __host__ __device__ void operator()( const Dtype *x, const Dtype *y, Dtype *gradInput) { *gradInput = ScalarConvert<int, Dtype>::to(*x >= *y ? 1 : -1); } }; template <typename Dtype> struct abs_updateGradInput_functor { const Dtype norm; const Dtype gradOutput; abs_updateGradInput_functor(Dtype norm_, Dtype gradOutput_) : norm(norm_), gradOutput(gradOutput_) {} __host__ __device__ Dtype operator()(const Dtype& x, const Dtype& y) const { return ((x - y) >= 0 ? norm : -norm) * gradOutput; } }; #include "generic/AbsCriterion.cu" #include "THCGenerateFloatTypes.h"
258d4ce709d7d8e48964f068aa1e0f862b541606.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/UpSample.cuh> #include <THH/THHAtomics.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_linear1d_out_frame( const int n, const accscalar_t rwidth, const bool align_corners, const PackedTensorAccessor64<scalar_t, 3> idata, PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][w1]; odata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const accscalar_t val = w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p]; odata[n][c][w2] = static_cast<scalar_t>(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_linear1d_out_frame_backward( const int n, const accscalar_t rwidth, const bool align_corners, PackedTensorAccessor64<scalar_t, 3> idata, const PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][w1]; idata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][w2]; gpuAtomicAddNoReturn(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val)); gpuAtomicAddNoReturn( &idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val)); } } } } static void upsample_linear1d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_linear1d_out_cuda", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); int output_width = output_size[0]; int nbatch = input.size(0); int channels = input.size(1); int input_width = input.size(2); upsample_1d_shape_check( input, Tensor(), nbatch, channels, input_width, output_width); output.resize_({input.size(0), input.size(1), output_width}); output.zero_(); AT_ASSERT(input_width > 0 && output_width > 0); const int num_kernels = output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_linear1d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor64<scalar_t, 3>(); auto odata = output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); hipLaunchKernelGGL(( upsample_linear1d_out_frame<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(hipGetLastError()); } static void upsample_linear1d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { TensorArg grad_output_arg{grad_output_, "grad_output_", 1}, grad_input_arg{grad_input, "grad_input", 2}; checkAllSameGPU( "upsample_linear1d_backward_out_cuda", {grad_output_arg, grad_input_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); TORCH_CHECK( input_size.size() == 3, "It is expected input_size equals to 3, but got size ", input_size.size()); int output_width = output_size[0]; int nbatch = input_size[0]; int channels = input_size[1]; int input_width = input_size[2]; upsample_1d_shape_check( Tensor(), grad_output_, nbatch, channels, input_width, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_width}); grad_input.zero_(); const int num_kernels = output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor64<scalar_t, 3>(); auto odata = grad_output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); hipLaunchKernelGGL(( upsample_linear1d_out_frame_backward<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(hipGetLastError()); } } // namespace Tensor& upsample_linear1d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { upsample_linear1d_out_cuda_template( output, input, output_size, align_corners, scales); return output; } Tensor upsample_linear1d_cuda( const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_linear1d_out_cuda_template( output, input, output_size, align_corners, scales); return output; } Tensor& upsample_linear1d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("upsample_linear1d_backward_out_cuda"); upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales); return grad_input; } Tensor upsample_linear1d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("upsample_linear1d_backward_cuda"); Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales); return grad_input; } using at::native::upsample::compute_output_size; using at::native::upsample_cuda::get_scale_value; Tensor upsample_linear1d_cuda( const Tensor& input, c10::optional<IntArrayRef> output_size, bool align_corners, c10::optional<ArrayRef<double>> scale_factors) { auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_w = get_scale_value(scale_factors, 0); upsample_linear1d_out_cuda_template(output, input, osize, align_corners, scale_w); return output; } Tensor upsample_linear1d_backward_cuda( const Tensor& grad_output, c10::optional<IntArrayRef> output_size, IntArrayRef input_size, bool align_corners, c10::optional<ArrayRef<double>> scale_factors) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("upsample_linear1d_backward_cuda"); auto osize = compute_output_size(input_size, output_size, scale_factors); auto scale_w = get_scale_value(scale_factors, 0); auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, osize, input_size, align_corners, scale_w); return grad_input; } } // namespace native } // namespace at
258d4ce709d7d8e48964f068aa1e0f862b541606.cu
// Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/UpSample.cuh> #include <THC/THCAtomics.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_linear1d_out_frame( const int n, const accscalar_t rwidth, const bool align_corners, const PackedTensorAccessor64<scalar_t, 3> idata, PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][w1]; odata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const accscalar_t val = w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p]; odata[n][c][w2] = static_cast<scalar_t>(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void upsample_linear1d_out_frame_backward( const int n, const accscalar_t rwidth, const bool align_corners, PackedTensorAccessor64<scalar_t, 3> idata, const PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][w1]; idata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][w2]; gpuAtomicAddNoReturn(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val)); gpuAtomicAddNoReturn( &idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val)); } } } } static void upsample_linear1d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_linear1d_out_cuda", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); int output_width = output_size[0]; int nbatch = input.size(0); int channels = input.size(1); int input_width = input.size(2); upsample_1d_shape_check( input, Tensor(), nbatch, channels, input_width, output_width); output.resize_({input.size(0), input.size(1), output_width}); output.zero_(); AT_ASSERT(input_width > 0 && output_width > 0); const int num_kernels = output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_linear1d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor64<scalar_t, 3>(); auto odata = output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); upsample_linear1d_out_frame<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } static void upsample_linear1d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { TensorArg grad_output_arg{grad_output_, "grad_output_", 1}, grad_input_arg{grad_input, "grad_input", 2}; checkAllSameGPU( "upsample_linear1d_backward_out_cuda", {grad_output_arg, grad_input_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); TORCH_CHECK( input_size.size() == 3, "It is expected input_size equals to 3, but got size ", input_size.size()); int output_width = output_size[0]; int nbatch = input_size[0]; int channels = input_size[1]; int input_width = input_size[2]; upsample_1d_shape_check( Tensor(), grad_output_, nbatch, channels, input_width, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_width}); grad_input.zero_(); const int num_kernels = output_width; const int num_threads = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor64<scalar_t, 3>(); auto odata = grad_output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); upsample_linear1d_out_frame_backward<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, rwidth, align_corners, idata, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } } // namespace Tensor& upsample_linear1d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { upsample_linear1d_out_cuda_template( output, input, output_size, align_corners, scales); return output; } Tensor upsample_linear1d_cuda( const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_linear1d_out_cuda_template( output, input, output_size, align_corners, scales); return output; } Tensor& upsample_linear1d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("upsample_linear1d_backward_out_cuda"); upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales); return grad_input; } Tensor upsample_linear1d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("upsample_linear1d_backward_cuda"); Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales); return grad_input; } using at::native::upsample::compute_output_size; using at::native::upsample_cuda::get_scale_value; Tensor upsample_linear1d_cuda( const Tensor& input, c10::optional<IntArrayRef> output_size, bool align_corners, c10::optional<ArrayRef<double>> scale_factors) { auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_w = get_scale_value(scale_factors, 0); upsample_linear1d_out_cuda_template(output, input, osize, align_corners, scale_w); return output; } Tensor upsample_linear1d_backward_cuda( const Tensor& grad_output, c10::optional<IntArrayRef> output_size, IntArrayRef input_size, bool align_corners, c10::optional<ArrayRef<double>> scale_factors) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("upsample_linear1d_backward_cuda"); auto osize = compute_output_size(input_size, output_size, scale_factors); auto scale_w = get_scale_value(scale_factors, 0); auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, osize, input_size, align_corners, scale_w); return grad_input; } } // namespace native } // namespace at
7ba2fbe745fb4a23512bf2cb7c67341784c22c84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> //#define rows 1000 //#define cols 1000 using namespace std; // CUDA kernel. Each thread takes care of one element of c __global__ void matricesMul(double *m1, double *m2, double *m3){ // Get our global thread ID int ti = blockIdx.y*blockDim.y+threadIdx.y; int tj = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if(ti < rows && tj < cols){ double data= 0.0; for(int k=0;k<rows;k++) data += m1[ti*rows+k] * m2[k*cols+tj]; m3[ti*rows+tj] = data; } } FILE * openFile(char const *fileName){ /* try to open a file */ FILE *f=NULL; f = fopen(fileName,"r"); if(f == NULL){ printf("File '%s' doesn't exist!\n",fileName); exit(1); } return f; } double * buildMatrix(FILE *f, size_t &rows, size_t &columns){ /* build a matrix M (get memory) */ fscanf(f,"%i",&rows); /* %zu zx is size_t */ fscanf(f,"%i",&columns); //fgetc(f); /* skipping nasty character, on this case new line */ double *M = (double *)malloc(rows*columns*sizeof(double)); /* reserved memory */ if(rows <= 0 || columns <= 0){ printf("should be size positive and upper than cero \n"); exit(1); } return M; } void getData(FILE *f, double *M, size_t len){ /* Capture data from plain text file to system memory Note: the data files need one end line to get last number format of data files ... A B #.#,#.# #.#,#.# ... A -> Size row B -> Size column */ //sizeof(char)==1 char data[50]="", ch = ' '; size_t posData = 0, Mindex = 0; while(len>Mindex){ ch = fgetc(f); /*get char and char in file f */ if(Mindex==0 && ch == '\n'){//skip nasty chracter continue; } if(ch == ',' || ch == '\n'){ data[posData] = '\0'; /* char end */ M[Mindex] = stod(data); /*convert string to double */ posData = 0; strcpy(data, ""); /* take memory for the next data */ Mindex++; }else{ data[posData] = ch; posData++; } } } void writeResult(){ /* Wite the result on output.txt file M -> Matrix, Mrow -> Matrix rows, Mcol -> Matrix columns */ FILE *f = fopen("output.txt","w+");//clean file and set result for(size_t i=0;i<this->M1row;i++){ for(size_t j=0;j<this->M2col;j++){ if(j+1 == this->M2col) {//last chracter fprintf(f,"%f\n",this->MResult[i*this->M2col + j]); } else { fprintf(f,"%f,",this->MResult[i*this->M2col + j]); } } } fclose(f); } bool checkMul(){ if(this->M1col != this->M2row){ printf("ERROR - Matrices cannot be multiply!"); return 0;//FALSE } return 1;//TRUE } int main( int argc, char* argv[] ){ if(argc != 2){ printf("There should be 2 arguments!\n"); exit(1); } // Host (CPU) input matrices double *h_m1; size_t rows_m1, cols_m1; double *h_m2; size_t rows_m2, cols_m2; //Host (CPU) output matrix double *h_m3; // Device (GPU-Nvidia) input matrices double *d_m1; double *d_m2; //Device (GPU-Nvidia) output matrix double *d_m3; FILE *f1=NULL; /* file pointers */ f1=openFile(argv[1]); // Allocate memory for each matrix on host h_m1=buildMatrix(f1, rows_m1, cols_m1); getData(f1, h_m1, rows_m1*cols_m1); h_m2=buildMatrix(f1, rows_m2, cols_m2); getData(f1, h_m2, rows_m2*cols_m2); h_m3=(double *)malloc(rows_m1*cols_m2*sizeof(double)); fclose(f1); // Size of matrices n size_t n = rows_m1*cols_m2; size_t bytes = n*sizeof(double); // Allocate memory for each matrix on GPU hipMalloc((void **)&d_m1, bytes); hipMalloc((void **)&d_m2, bytes); hipMalloc((void **)&d_m3, bytes); // Copy host matrices to device hipMemcpy( d_m1, h_m1, bytes, hipMemcpyHostToDevice); hipMemcpy( d_m2, h_m2, bytes, hipMemcpyHostToDevice); // Number of threads in each thread matrix block double x = sqrt(1024); size_t threadsInX= floor(x); size_t threadsInY= threadsInX; dim3 dimBlock(threadsInX,threadsInY,1); // Number of thread blocks in matrix grid size_t gridNum = ceil((double)n/1024); // needed grid numbers to our problem size_t gridR = ceil(sqrt(gridNum)); // grid rows size_t gridC = gridR; // grid cols dim3 dimGrid(gridR,gridC,1); // Execute the kernel hipLaunchKernelGGL(( matricesMul), dim3(dimGrid),dim3(dimBlock), 0, 0, d_m1, d_m2, d_m3); // Copy result m3 matrix back to host hipMemcpy(h_m3, d_m3, bytes, hipMemcpyDeviceToHost); // print every item into m3 matrix for(int i=0; i<n; i++){ double val = h_m3[i]; printf("final result: %f\n", val); } // Release device memory hipFree(d_m1); hipFree(d_m2); hipFree(d_m3); // Release host memory free(h_m1); free(h_m2); free(h_m3); return 0; }
7ba2fbe745fb4a23512bf2cb7c67341784c22c84.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> //#define rows 1000 //#define cols 1000 using namespace std; // CUDA kernel. Each thread takes care of one element of c __global__ void matricesMul(double *m1, double *m2, double *m3){ // Get our global thread ID int ti = blockIdx.y*blockDim.y+threadIdx.y; int tj = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if(ti < rows && tj < cols){ double data= 0.0; for(int k=0;k<rows;k++) data += m1[ti*rows+k] * m2[k*cols+tj]; m3[ti*rows+tj] = data; } } FILE * openFile(char const *fileName){ /* try to open a file */ FILE *f=NULL; f = fopen(fileName,"r"); if(f == NULL){ printf("File '%s' doesn't exist!\n",fileName); exit(1); } return f; } double * buildMatrix(FILE *f, size_t &rows, size_t &columns){ /* build a matrix M (get memory) */ fscanf(f,"%i",&rows); /* %zu zx is size_t */ fscanf(f,"%i",&columns); //fgetc(f); /* skipping nasty character, on this case new line */ double *M = (double *)malloc(rows*columns*sizeof(double)); /* reserved memory */ if(rows <= 0 || columns <= 0){ printf("should be size positive and upper than cero \n"); exit(1); } return M; } void getData(FILE *f, double *M, size_t len){ /* Capture data from plain text file to system memory Note: the data files need one end line to get last number format of data files ... A B #.#,#.# #.#,#.# ... A -> Size row B -> Size column */ //sizeof(char)==1 char data[50]="", ch = ' '; size_t posData = 0, Mindex = 0; while(len>Mindex){ ch = fgetc(f); /*get char and char in file f */ if(Mindex==0 && ch == '\n'){//skip nasty chracter continue; } if(ch == ',' || ch == '\n'){ data[posData] = '\0'; /* char end */ M[Mindex] = stod(data); /*convert string to double */ posData = 0; strcpy(data, ""); /* take memory for the next data */ Mindex++; }else{ data[posData] = ch; posData++; } } } void writeResult(){ /* Wite the result on output.txt file M -> Matrix, Mrow -> Matrix rows, Mcol -> Matrix columns */ FILE *f = fopen("output.txt","w+");//clean file and set result for(size_t i=0;i<this->M1row;i++){ for(size_t j=0;j<this->M2col;j++){ if(j+1 == this->M2col) {//last chracter fprintf(f,"%f\n",this->MResult[i*this->M2col + j]); } else { fprintf(f,"%f,",this->MResult[i*this->M2col + j]); } } } fclose(f); } bool checkMul(){ if(this->M1col != this->M2row){ printf("ERROR - Matrices cannot be multiply!"); return 0;//FALSE } return 1;//TRUE } int main( int argc, char* argv[] ){ if(argc != 2){ printf("There should be 2 arguments!\n"); exit(1); } // Host (CPU) input matrices double *h_m1; size_t rows_m1, cols_m1; double *h_m2; size_t rows_m2, cols_m2; //Host (CPU) output matrix double *h_m3; // Device (GPU-Nvidia) input matrices double *d_m1; double *d_m2; //Device (GPU-Nvidia) output matrix double *d_m3; FILE *f1=NULL; /* file pointers */ f1=openFile(argv[1]); // Allocate memory for each matrix on host h_m1=buildMatrix(f1, rows_m1, cols_m1); getData(f1, h_m1, rows_m1*cols_m1); h_m2=buildMatrix(f1, rows_m2, cols_m2); getData(f1, h_m2, rows_m2*cols_m2); h_m3=(double *)malloc(rows_m1*cols_m2*sizeof(double)); fclose(f1); // Size of matrices n² size_t n = rows_m1*cols_m2; size_t bytes = n*sizeof(double); // Allocate memory for each matrix on GPU cudaMalloc((void **)&d_m1, bytes); cudaMalloc((void **)&d_m2, bytes); cudaMalloc((void **)&d_m3, bytes); // Copy host matrices to device cudaMemcpy( d_m1, h_m1, bytes, cudaMemcpyHostToDevice); cudaMemcpy( d_m2, h_m2, bytes, cudaMemcpyHostToDevice); // Number of threads in each thread matrix block double x = sqrt(1024); size_t threadsInX= floor(x); size_t threadsInY= threadsInX; dim3 dimBlock(threadsInX,threadsInY,1); // Number of thread blocks in matrix grid size_t gridNum = ceil((double)n/1024); // needed grid numbers to our problem size_t gridR = ceil(sqrt(gridNum)); // grid rows size_t gridC = gridR; // grid cols dim3 dimGrid(gridR,gridC,1); // Execute the kernel matricesMul<<<dimGrid,dimBlock>>>(d_m1, d_m2, d_m3); // Copy result m3 matrix back to host cudaMemcpy(h_m3, d_m3, bytes, cudaMemcpyDeviceToHost); // print every item into m3 matrix for(int i=0; i<n; i++){ double val = h_m3[i]; printf("final result: %f\n", val); } // Release device memory cudaFree(d_m1); cudaFree(d_m2); cudaFree(d_m3); // Release host memory free(h_m1); free(h_m2); free(h_m3); return 0; }
f3b6f8b159bfea6f87498447c9351230e94eb0f0.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorFactories.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <ATen/NumericUtils.h> #include <c10/hip/HIPMathCompat.h> #include <ATen/NumericUtils.h> #include <c10/util/complex.h> namespace at { namespace native { void bitwise_not_kernel_cuda(TensorIteratorBase& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a) { return !a; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ~a; }); }); } } void exp_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "exp_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp(a); }); }); } void expm1_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "expm1_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::expm1(a); }); }); } // We manually overload rsqrt because std::rsqrt does not work with complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) { return ::rsqrt(v); } template<typename T> __host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) { const c10::complex<T> one = c10::complex<T>(1.0, 0); // std::sqrt for c10::complex is overloaded in c10/util/complex_math.h return one / ::sqrt(v); } void rsqrt_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "rsqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { // In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float. return rsqrt_wrapper(a); }); }); } void sqrt_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sqrt(a); }); }); } void clamp_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() { auto lower = min_value.to<scalar_t>(); auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_min_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() { auto lower = min_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_max_kernel_cuda(TensorIteratorBase& iter, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() { auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } void nan_to_num_kernel_cuda( TensorIteratorBase& iter, c10::optional<double> nan, c10::optional<double> pos_inf, c10::optional<double> neg_inf) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "nan_to_num_cuda", [&]() { scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.)); scalar_t pos_inf_replacement = pos_inf.has_value() ? static_cast<scalar_t>(pos_inf.value()) : std::numeric_limits<scalar_t>::max(); scalar_t neg_inf_replacement = neg_inf.has_value() ? static_cast<scalar_t>(neg_inf.value()) : std::numeric_limits<scalar_t>::lowest(); gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t { return ( at::_isnan(a) ? nan_replacement : (a == std::numeric_limits<scalar_t>::infinity() ? pos_inf_replacement : (a == -std::numeric_limits<scalar_t>::infinity() ? neg_inf_replacement : a))); }); }); } void frexp_kernel_cuda(TensorIteratorBase& iter) { #ifdef __HIP_PLATFORM_HCC__ // Reference: https://rocmdocs.amd.com/en/latest/ROCm_API_References/HIP-MATH.html // https://github.com/ROCm-Developer-Tools/HIP/issues/2169 // ROCm does not support frexp function yet TORCH_CHECK(false, "torch.frexp() is not implemented on ROCm platform."); #else AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, // The iter.dtype() here is the dtype of mantissa output. // It's a floating point type and must be the same as the input's dtype. iter.dtype(), "frexp_cuda", [&]() { gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t a) -> thrust::tuple<scalar_t, int32_t> { int32_t exponent; scalar_t mantissa = ::frexp(a, &exponent); return {mantissa, exponent}; }); }); #endif } REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda); REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda); REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda); REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda); REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda); REGISTER_DISPATCH(clamp_stub, &clamp_kernel_cuda); REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_cuda); REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_cuda); REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda); REGISTER_DISPATCH(frexp_stub, &frexp_kernel_cuda); } // namespace native } // namespace at
f3b6f8b159bfea6f87498447c9351230e94eb0f0.cu
#include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorFactories.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/NumericUtils.h> #include <c10/cuda/CUDAMathCompat.h> #include <ATen/NumericUtils.h> #include <c10/util/complex.h> namespace at { namespace native { void bitwise_not_kernel_cuda(TensorIteratorBase& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a) { return !a; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ~a; }); }); } } void exp_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "exp_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp(a); }); }); } void expm1_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "expm1_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::expm1(a); }); }); } // We manually overload rsqrt because std::rsqrt does not work with complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) { return ::rsqrt(v); } template<typename T> __host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) { const c10::complex<T> one = c10::complex<T>(1.0, 0); // std::sqrt for c10::complex is overloaded in c10/util/complex_math.h return one / ::sqrt(v); } void rsqrt_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "rsqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { // In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float. return rsqrt_wrapper(a); }); }); } void sqrt_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sqrt(a); }); }); } void clamp_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() { auto lower = min_value.to<scalar_t>(); auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_min_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() { auto lower = min_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_max_kernel_cuda(TensorIteratorBase& iter, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() { auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } void nan_to_num_kernel_cuda( TensorIteratorBase& iter, c10::optional<double> nan, c10::optional<double> pos_inf, c10::optional<double> neg_inf) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "nan_to_num_cuda", [&]() { scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.)); scalar_t pos_inf_replacement = pos_inf.has_value() ? static_cast<scalar_t>(pos_inf.value()) : std::numeric_limits<scalar_t>::max(); scalar_t neg_inf_replacement = neg_inf.has_value() ? static_cast<scalar_t>(neg_inf.value()) : std::numeric_limits<scalar_t>::lowest(); gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t { return ( at::_isnan(a) ? nan_replacement : (a == std::numeric_limits<scalar_t>::infinity() ? pos_inf_replacement : (a == -std::numeric_limits<scalar_t>::infinity() ? neg_inf_replacement : a))); }); }); } void frexp_kernel_cuda(TensorIteratorBase& iter) { #ifdef __HIP_PLATFORM_HCC__ // Reference: https://rocmdocs.amd.com/en/latest/ROCm_API_References/HIP-MATH.html // https://github.com/ROCm-Developer-Tools/HIP/issues/2169 // ROCm does not support frexp function yet TORCH_CHECK(false, "torch.frexp() is not implemented on ROCm platform."); #else AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, // The iter.dtype() here is the dtype of mantissa output. // It's a floating point type and must be the same as the input's dtype. iter.dtype(), "frexp_cuda", [&]() { gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t a) -> thrust::tuple<scalar_t, int32_t> { int32_t exponent; scalar_t mantissa = std::frexp(a, &exponent); return {mantissa, exponent}; }); }); #endif } REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda); REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda); REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda); REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda); REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda); REGISTER_DISPATCH(clamp_stub, &clamp_kernel_cuda); REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_cuda); REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_cuda); REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda); REGISTER_DISPATCH(frexp_stub, &frexp_kernel_cuda); } // namespace native } // namespace at
4f29b40125f01405fa85371b5a3ae033a847b9b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/sequence.h> #include <thrust/count.h> //#define MAXN 24 #define MAXN 96 #define MAXP 300000 #define MAXS 3000000 #define PI 3.14159f #define THREADS 256 struct pair { int key; int value; }; struct grid { float oX, oY, oZ; float size; int nX, nY, nZ; }; struct simulation { float minX, maxX; float minY, maxY; float minZ, maxZ; float dt; int tsn; int ssi; int nsi; }; struct load { float minX, maxX; float minY, maxY; float minZ, maxZ; float gx; float gy; float gz; float w; }; struct fix { float minX, maxX; float minY, maxY; float minZ, maxZ; float velX, velY, velZ; }; struct outlet { float oX, oY, oZ; float nX, nY, nZ; }; struct inlet { int Material; float Mass, Smooth; float oX, oY, oZ; float uX, uY, uZ; float vX, vY, vZ; float nX, nY, nZ; int nu, nv; float Velocity; float Density, Energy; float Distance; }; struct model { int pn; int* Material; float* Mass; float* Smooth; float* PosX; float* PosY; float* PosZ; float* VelX; float* VelY; float* VelZ; float* Density; float* Energy; float* Pressure; float* Sound; float* VelDotX; float* VelDotY; float* VelDotZ; float* DensityDot; float* EnergyDot; float* PosX0; float* PosY0; float* PosZ0; float* VelX0; float* VelY0; float* VelZ0; float* Density0; float* Energy0; int* List; int* Hash; int* Index; int* SetStart; int* SetStop; int* IntDummy; float* FloatDummy; }; // Host Variables int hMatType[10]; float hMatProp[10][10]; struct simulation hRun; struct grid hGrid; struct load hLoad[10]; struct fix hFix[10]; struct outlet hOut[10]; struct inlet hIn[10]; // Device Variables __device__ __constant__ int dMatType[10]; __device__ __constant__ float dMatProp[10][10]; __device__ __constant__ struct simulation dRun; __device__ struct grid dGrid; __device__ __constant__ struct load dLoad[10]; __device__ __constant__ struct fix dFix[10]; __device__ __constant__ struct outlet dOut[10]; __device__ struct inlet dIn[10]; void initBox(struct model *hm) { int i, j, k, m, b, q, ip; double rho, c0, pmin; double dr; q = 2; m = 1; b = 2; rho = 1000.f; c0 = 20.0f; pmin = -1.e4; hMatType[m] = 3; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 3; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.02f / q; // x4 ip = 0; for (k = 0; k < 2 * q ; k++) { for (j = 0; j < 2 * q ; j++) { for (i = 0; i < 2 * q; i++) { hm->PosX[ip] = i * dr; hm->PosY[ip] = j * dr; hm->PosZ[ip] = k * dr; hm->Material[ip] = m; hm->VelX[ip] = 0.0f; ip++; } } } for (k = 0; k < 2 * q ; k++) { for (j = -2; j < -1; j++) { for (i = -0; i < 2 * q; i++) { hm->PosX[ip] = i * dr; hm->PosY[ip] = j * dr; hm->PosZ[ip] = k * dr; hm->Material[ip] = b; hm->VelX[ip] = 0.0f; ip++; } } } hm->pn = ip; thrust::fill(hm->Mass, hm->Mass + hm->pn, rho * dr * dr * dr); thrust::fill(hm->Smooth, hm->Smooth + hm->pn, 1.2f * dr); thrust::fill(hm->VelY, hm->VelY + hm->pn, 0.0f); thrust::fill(hm->VelZ, hm->VelZ + hm->pn, 0.0f); thrust::fill(hm->Density, hm->Density + hm->pn, rho); thrust::fill(hm->Energy, hm->Energy + hm->pn, 0.0f); thrust::fill(hm->Pressure, hm->Pressure + hm->pn, 0.0f); thrust::fill(hm->Sound, hm->Sound + hm->pn, c0); hRun.minX = -0.2f; hRun.maxX = 1.0f; hRun.minY = -0.2f; hRun.maxY = 1.0f; hRun.minZ = -0.2f; hRun.maxZ = 1.0f; hRun.dt = dr / hMatProp[m][1]; hRun.tsn = 1000 * q; //1000; hRun.ssi = 100 * q; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; printf("Box \n"); printf("Particles: %i \n", hm->pn); printf("Grid: %i \n", hGrid.nX * hGrid.nY * hGrid.nZ); } int copyHostToDevice(struct model *hm, struct model *dm) { dm->pn = hm->pn; hipMemcpy(dm->Material, hm->Material, (MAXP * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->Mass, hm->Mass, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Smooth, hm->Smooth, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosX, hm->PosX, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosY, hm->PosY, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosZ, hm->PosZ, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelX, hm->VelX, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelY, hm->VelY, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelZ, hm->VelZ, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Density, hm->Density, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Energy, hm->Energy, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Pressure, hm->Pressure, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Sound, hm->Sound, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelDotX, hm->VelDotX, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelDotY, hm->VelDotY, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelDotZ, hm->VelDotZ, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->DensityDot, hm->DensityDot, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->EnergyDot, hm->EnergyDot, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosX0, hm->PosX0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosY0, hm->PosY0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosZ0, hm->PosZ0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelX0, hm->VelX0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelY0, hm->VelY0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelZ0, hm->VelZ0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Density0, hm->Density0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Energy0, hm->Energy0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->List, hm->List, (MAXP * MAXN * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->Hash, hm->Hash, (MAXP * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->Index, hm->Index, (MAXP * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->IntDummy, hm->IntDummy, (MAXP * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->FloatDummy, hm->FloatDummy, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->SetStart, hm->SetStart, (MAXS * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->SetStop, hm->SetStop, (MAXS * sizeof(int)), hipMemcpyHostToDevice); dGrid.oX = hGrid.oX; dGrid.oY = hGrid.oY; dGrid.oZ = hGrid.oZ; dGrid.nX = hGrid.nX; dGrid.nY = hGrid.nY; dGrid.nZ = hGrid.nZ; dGrid.size = hGrid.size; for (int i = 0; i < 10; i++) dIn[i] = hIn[i]; hipMemcpyToSymbol("dMatType", hMatType, 10 * sizeof(int)); hipMemcpyToSymbol("dMatProp", hMatProp, 100 * sizeof(float)); hipMemcpyToSymbol("dRun", &hRun, sizeof(struct simulation)); hipMemcpyToSymbol("dLoad", &hLoad, 10 * sizeof(struct load)); hipMemcpyToSymbol("dFix", &hFix, 10 * sizeof(struct fix)); hipMemcpyToSymbol("dOut", &hOut, 10 * sizeof(struct outlet)); return 0; } int copyDeviceToHost(struct model *dm, struct model *hm) { hm->pn = dm->pn; hipMemcpy(hm->Material, dm->Material, (MAXP * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->Mass, dm->Mass, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Smooth, dm->Smooth, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosX, dm->PosX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosY, dm->PosY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosZ, dm->PosZ, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelX, dm->VelX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelY, dm->VelY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelZ, dm->VelZ, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Density, dm->Density, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Energy, dm->Energy, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Pressure, dm->Pressure, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Sound, dm->Sound, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelDotX, dm->VelDotX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelDotY, dm->VelDotY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelDotZ, dm->VelDotZ, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->DensityDot, dm->DensityDot, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->EnergyDot, dm->EnergyDot, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosX0, dm->PosX0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosY0, dm->PosY0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosZ0, dm->PosZ0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelX0, dm->VelX0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelY0, dm->VelY0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelZ0, dm->VelZ0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Density0, dm->Density0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Energy0, dm->Energy0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->List, dm->List, (MAXP * MAXN * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->Hash, dm->Hash, (MAXP * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->Index, dm->Index, (MAXP * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->IntDummy, dm->IntDummy, (MAXP * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->FloatDummy, dm->FloatDummy, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->SetStart, dm->SetStart, (MAXS * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->SetStop, dm->SetStop, (MAXS * sizeof(int)), hipMemcpyDeviceToHost); hGrid.oX = dGrid.oX; hGrid.oY = dGrid.oY; hGrid.oZ = dGrid.oZ; hGrid.nX = dGrid.nX; hGrid.nY = dGrid.nY; hGrid.nZ = dGrid.nZ; hGrid.size = dGrid.size; for (int i = 0; i < 10; i++) hIn[i] = dIn[i]; return 0; } int initHost(struct model *hm) { hm->Material = (int *) malloc(MAXP * sizeof(int)); hm->Mass = (float *) malloc(MAXP * sizeof(float)); hm->Smooth = (float *) malloc(MAXP * sizeof(float)); hm->PosX = (float *) malloc(MAXP * sizeof(float)); hm->PosY = (float *) malloc(MAXP * sizeof(float)); hm->PosZ = (float *) malloc(MAXP * sizeof(float)); hm->VelX = (float *) malloc(MAXP * sizeof(float)); hm->VelY = (float *) malloc(MAXP * sizeof(float)); hm->VelZ = (float *) malloc(MAXP * sizeof(float)); hm->Density = (float *) malloc(MAXP * sizeof(float)); hm->Energy = (float *) malloc(MAXP * sizeof(float)); hm->Pressure = (float *) malloc(MAXP * sizeof(float)); hm->Sound = (float *) malloc(MAXP * sizeof(float)); hm->VelDotX = (float *) malloc(MAXP * sizeof(float)); hm->VelDotY = (float *) malloc(MAXP * sizeof(float)); hm->VelDotZ = (float *) malloc(MAXP * sizeof(float)); hm->DensityDot = (float *) malloc(MAXP * sizeof(float)); hm->EnergyDot = (float *) malloc(MAXP * sizeof(float)); hm->PosX0 = (float *) malloc(MAXP * sizeof(float)); hm->PosY0 = (float *) malloc(MAXP * sizeof(float)); hm->PosZ0 = (float *) malloc(MAXP * sizeof(float)); hm->VelX0 = (float *) malloc(MAXP * sizeof(float)); hm->VelY0 = (float *) malloc(MAXP * sizeof(float)); hm->VelZ0 = (float *) malloc(MAXP * sizeof(float)); hm->Density0 = (float *) malloc(MAXP * sizeof(float)); hm->Energy0 = (float *) malloc(MAXP * sizeof(float)); hm->Hash = (int *) malloc(MAXP * sizeof(int)); hm->Index = (int *) malloc(MAXP * sizeof(int)); hm->List = (int *) malloc(MAXP * MAXN * sizeof(int)); hm->IntDummy = (int *) malloc(MAXP * sizeof(int)); hm->FloatDummy = (float *) malloc(MAXP * sizeof(float)); hm->SetStart = (int *) malloc(MAXS * sizeof(int)); hm->SetStop = (int *) malloc(MAXS * sizeof(int)); return 0; } int initDevice(struct model *dm) { hipMalloc((void**) &(dm->Material), (MAXP * sizeof(int))); hipMalloc((void**) &(dm->Mass), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Smooth), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosX), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosY), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosZ), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelX), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelY), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelZ), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Density), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Energy), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Pressure), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Sound), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelDotX), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelDotY), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelDotZ), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->DensityDot), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->EnergyDot), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosX0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosY0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosZ0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelX0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelY0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelZ0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Density0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Energy0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Hash), (MAXP * sizeof(int))); hipMalloc((void**) &(dm->Index), (MAXP * sizeof(int))); hipMalloc((void**) &(dm->List), (MAXP * MAXN * sizeof(int))); hipMalloc((void**) &(dm->IntDummy), (MAXP * sizeof(int))); hipMalloc((void**) &(dm->FloatDummy), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->SetStart), (MAXS * sizeof(int))); hipMalloc((void**) &(dm->SetStop), (MAXS * sizeof(int))); return 0; } int printData(struct model *hm) { /** * \brief Particle data file output * * Saves particle data on a disk file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; int i; // Stream file position stream = fopen("new_pos.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fclose(stream); // Stream file velocity stream = fopen("new_vel.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]); fclose(stream); // Stream file info stream = fopen("new_info.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i %+14.8e %+14.8e \n", hm->Material[i], hm->Mass[i], hm->Smooth[i]); fclose(stream); // Stream file field stream = fopen("new_field.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->Density[i], hm->Pressure[i], hm->Energy[i]); fclose(stream); /* // Stream file add1 stream = fopen("new_debug.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%d %d %d %f %f %f\n", i, hm->Index[i], hm->Hash[i], hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fclose(stream); */ // Stream file add1 stream = fopen("new_debug.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%d %f %f %f %f %f %f\n", i, hm->VelX[i], hm->VelY[i], hm->VelZ[i], hm->Density[i], hm->Energy[i], hm->Pressure[i]); fclose(stream); /* for (i = 0; i < hm->pn; i++) { printf("%d - ", i); for (int j = 0; j < MAXN; j++) printf("%d ", hm->List[i * MAXN +j]); printf("\n"); } */ return 0; } int outputVTK(struct model *hm, int ss) { /** * \brief Output Data file * * Saves vtk data file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; char filename[80]; int i; // Stream position file sprintf(filename, "out%05d.vtk", ss); stream = fopen(filename, "w"); fprintf(stream, "# vtk DataFile Version 2.0\n"); fprintf(stream, "Unstructured Grid Example\n"); fprintf(stream, "ASCII\n"); fprintf(stream, "DATASET UNSTRUCTURED_GRID\n"); fprintf(stream, "POINTS %i float\n", hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e %+e %+e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fprintf(stream, "CELLS %i %i \n", hm->pn, 2*hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i %i \n", 1, i); fprintf(stream, "CELL_TYPES %i \n", hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i \n", 1); fprintf(stream, "POINT_DATA %i \n", hm->pn); fprintf(stream, "SCALARS material int 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+d \n", hm->Material[i]); fprintf(stream, "SCALARS density float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Density[i]); fprintf(stream, "SCALARS pressure float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Pressure[i]); fprintf(stream, "SCALARS energy float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Energy[i]); fprintf(stream, "VECTORS velocity float\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e %+e %+e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]); fclose(stream); /* for (i = 0; i < hm->pn; i++) printf("%d %d \n", i, hm->Hash[i]); printf("\n\n\n"); for (i = 0; i < hm->SetStart.size(); i++) printf("%d %d %d \n", i, hm->SetStart[i], hm->SetStop[i]); for (i = 0; i < hm->pn; i++) { printf("%d - ", i); for (j = 0; j < MAXN; j++) printf("%d ", hm->List[i*MAXN +j]); printf("\n"); } */ return 0; } __host__ void addInletHost(const int pn, const int* Index, const float* Smooth, float* PosX, float* PosY, float* PosZ, float* PosX0, float* PosY0, float* PosZ0) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; float dx, dy, dz, d; for (ip = 0; ip < pn; ip++) if (Index[ip] == 1) { dx = PosX[ip] - PosX0[ip]; dy = PosY[ip] - PosY0[ip]; dz = PosZ[ip] - PosZ0[ip]; d = sqrtf(dx*dx + dy*dy + dz*dz); dx *= Smooth[ip] / (1.2f * d); dy *= Smooth[ip] / (1.2f * d); dz *= Smooth[ip] / (1.2f * d); PosX[ip] -= dx; PosY[ip] -= dy; PosZ[ip] -= dz; PosX0[ip] = PosX[ip]; PosY0[ip] = PosY[ip]; PosZ0[ip] = PosZ[ip]; } } __host__ void updateHashHost(const int pn, const struct grid Grid, const float* PosX, const float* PosY, const float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, ix, iy, iz, ic; for (ip = 0; ip < pn; ip++) { ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size); iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size); iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; Hash[ip] = ic; } } __global__ void updateHashDevice(const int pn, const struct grid Grid, const float* PosX, const float* PosY, const float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, ix, iy, iz, ic; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size); iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size); iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; Hash[ip] = ic; } } __host__ void checkBoundariesHost(const int pn, const struct grid Grid, const float* PosX, const float* PosY, const float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; float minX, maxX; float minY, maxY; float minZ, maxZ; minX = Grid.oX; maxX = Grid.oX + Grid.size * Grid.nX; minY = Grid.oY; maxY = Grid.oY + Grid.size * Grid.nY; minZ = Grid.oZ; maxZ = Grid.oZ + Grid.size * Grid.nZ; for (ip = 0; ip < pn; ip++) { if ((PosX[ip] < minX) || (PosX[ip] > maxX) || (PosY[ip] < minY) || (PosY[ip] > maxY) || (PosZ[ip] < minZ) || (PosZ[ip] > maxZ)) { Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } } __global__ void checkBoundariesDevice(const int pn, const struct grid Grid, float* PosX, float* PosY, float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; float minX, maxX; float minY, maxY; float minZ, maxZ; ip = threadIdx.x + blockDim.x * blockIdx.x; minX = Grid.oX; maxX = Grid.oX + Grid.size * Grid.nX; minY = Grid.oY; maxY = Grid.oY + Grid.size * Grid.nY; minZ = Grid.oZ; maxZ = Grid.oZ + Grid.size * Grid.nZ; if (ip < pn) { if ((PosX[ip] < minX) || (PosX[ip] > maxX) || (PosY[ip] < minY) || (PosY[ip] > maxY) || (PosZ[ip] < minZ) || (PosZ[ip] > maxZ)) { PosX[ip] = 0.5f*(minX + maxX); PosY[ip] = 0.5f*(minY + maxY); PosZ[ip] = 0.5f*(minZ + maxZ); //Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } /* if (PosX[ip] < minX) PosX[ip] = minX; if (PosY[ip] < minY) PosY[ip] = minY; if (PosZ[ip] < minZ) PosZ[ip] = minZ; if (PosX[ip] > maxX) PosX[ip] = maxX; if (PosY[ip] > maxY) PosY[ip] = maxY; if (PosZ[ip] > maxZ) PosZ[ip] = maxZ; if (Hash[ip] > Grid.nX * Grid.nY * Grid.nZ) Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; */ } } __host__ void checkOutletHost(const int pn, const struct grid Grid, const float* PosX, const float* PosY, const float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ float d; int ip, i; for (ip = 0; ip < pn; ip++) { for (i = 0; i < 10; i++) { d = 0.0f; d += (PosX[ip] - hOut[i].oX) * hOut[i].nX; d += (PosY[ip] - hOut[i].oY) * hOut[i].nY; d += (PosZ[ip] - hOut[i].oZ) * hOut[i].nZ; if (d > 0.0f) { Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } } } __global__ void checkOutletDevice(const int pn, const struct grid Grid, const float* PosX, const float* PosY, const float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ float d; int ip, i; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { for (i = 0; i < 10; i++) { d = 0.0f; d += (PosX[ip] - dOut[i].oX) * dOut[i].nX; d += (PosY[ip] - dOut[i].oY) * dOut[i].nY; d += (PosZ[ip] - dOut[i].oZ) * dOut[i].nZ; if (d > 0.0f) { Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } } } /* __host__ void makeInletHost(struct model *hm, struct pointer_model *pm) { int i, j, iu, iv; for (i = 0; i < 10; i++) if (hIn[i].Material != 0) { hIn[i].Distance += hIn[i].Velocity * hRun.dt; if ((1.2f * hIn[i].Distance) > hIn[i].Smooth) { hIn[i].Distance = 0.0f; for (iv = 0; iv < hIn[i].nv; iv++) { for (iu = 0; iu < hIn[i].nu; iu++) { hm->pn++; pm->pn++; hm->Material.push_back(hIn[i].Material); hm->Mass.push_back(hIn[i].Mass); hm->Smooth.push_back(hIn[i].Smooth); hm->PosX.push_back(hIn[i].oX + iu * hIn[i].uX + iv * hIn[i].vX); hm->PosY.push_back(hIn[i].oY + iu * hIn[i].uY + iv * hIn[i].vY); hm->PosZ.push_back(hIn[i].oZ + iu * hIn[i].uZ + iv * hIn[i].vZ); hm->VelX.push_back(hIn[i].Velocity * hIn[i].nX); hm->VelY.push_back(hIn[i].Velocity * hIn[i].nY); hm->VelZ.push_back(hIn[i].Velocity * hIn[i].nZ); hm->Density.push_back(hIn[i].Density); hm->Energy.push_back(hIn[i].Energy); hm->PosX0.push_back(hIn[i].oX + iu * hIn[i].uX + iv * hIn[i].vX); hm->PosY0.push_back(hIn[i].oY + iu * hIn[i].uY + iv * hIn[i].vY); hm->PosZ0.push_back(hIn[i].oZ + iu * hIn[i].uZ + iv * hIn[i].vZ); hm->VelX0.push_back(hIn[i].Velocity * hIn[i].nX); hm->VelY0.push_back(hIn[i].Velocity * hIn[i].nY); hm->VelZ0.push_back(hIn[i].Velocity * hIn[i].nZ); hm->Density0.push_back(hIn[i].Density); hm->Energy0.push_back(hIn[i].Energy); hm->VelDotX.push_back(0.0f); hm->VelDotY.push_back(0.0f); hm->VelDotZ.push_back(0.0f); hm->DensityDot.push_back(0.0f); hm->EnergyDot.push_back(0.0f); hm->Pressure.push_back(0.0f); hm->Sound.push_back(0.0f); for (j = 0; j < MAXN; j++) hm->List.push_back(0); hm->Hash.push_back(0); hm->Index.push_back(0); hm->IntDummy.push_back(0); hm->FloatDummy.push_back(0.0f); } } } } copyHostToPointer(hm, pm); } void makeInletDevice(struct model *dm) { int i, j, iu, iv; for (i = 0; i < 10; i++) if (dIn[i].Material != 0) { dIn[i].Distance += dIn[i].Velocity * hRun.dt; if ((1.2f * dIn[i].Distance) > dIn[i].Smooth) { dIn[i].Distance = 0.0f; printf("Inlet!\n"); for (iv = 0; iv < dIn[i].nv; iv++) { for (iu = 0; iu < dIn[i].nu; iu++) { dm->pn++; dm->Material.push_back(dIn[i].Material); dm->Mass.push_back(dIn[i].Mass); dm->Smooth.push_back(dIn[i].Smooth); dm->PosX.push_back(dIn[i].oX + iu * dIn[i].uX + iv * dIn[i].vX); dm->PosY.push_back(dIn[i].oY + iu * dIn[i].uY + iv * dIn[i].vY); dm->PosZ.push_back(dIn[i].oZ + iu * dIn[i].uZ + iv * dIn[i].vZ); dm->VelX.push_back(dIn[i].Velocity * dIn[i].nX); dm->VelY.push_back(dIn[i].Velocity * dIn[i].nY); dm->VelZ.push_back(dIn[i].Velocity * dIn[i].nZ); dm->Density.push_back(dIn[i].Density); dm->Energy.push_back(dIn[i].Energy); dm->PosX0.push_back(dIn[i].oX + iu * dIn[i].uX + iv * dIn[i].vX); dm->PosY0.push_back(dIn[i].oY + iu * dIn[i].uY + iv * dIn[i].vY); dm->PosZ0.push_back(dIn[i].oZ + iu * dIn[i].uZ + iv * dIn[i].vZ); dm->VelX0.push_back(dIn[i].Velocity * dIn[i].nX); dm->VelY0.push_back(dIn[i].Velocity * dIn[i].nY); dm->VelZ0.push_back(dIn[i].Velocity * dIn[i].nZ); dm->Density0.push_back(dIn[i].Density); dm->Energy0.push_back(dIn[i].Energy); dm->VelDotX.push_back(0.0f); dm->VelDotY.push_back(0.0f); dm->VelDotZ.push_back(0.0f); dm->DensityDot.push_back(0.0f); dm->EnergyDot.push_back(0.0f); dm->Pressure.push_back(0.0f); dm->Sound.push_back(0.0f); for (j = 0; j < MAXN; j++) dm->List.push_back(0); dm->Hash.push_back(0); dm->Index.push_back(0); dm->IntDummy.push_back(0); dm->FloatDummy.push_back(0.0f); } } } } } */ __host__ void updateSetsHost(const int pn, int *SetStart, int *SetStop, const int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; int hash, nextHash, prevHash; for (ip = 0; ip < pn; ip++) { hash = Hash[ip]; if (ip == 0) prevHash = -1; else prevHash = Hash[ip -1]; if (ip == pn -1) nextHash = -1; else nextHash = Hash[ip +1]; if (hash != prevHash) SetStart[hash] = ip; if (hash != nextHash) SetStop[hash] = ip +1; } } __global__ void updateSetsDevice(const int pn, int *SetStart, int *SetStop, const int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ __shared__ int prevHash[THREADS]; __shared__ int nextHash[THREADS]; int ip; int hash; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= pn) return; hash = Hash[ip]; if (threadIdx.x < THREADS -1) prevHash[threadIdx.x +1] = hash; if (threadIdx.x > 0) nextHash[threadIdx.x -1] = hash; if (threadIdx.x == 0) { if (ip == 0) prevHash[threadIdx.x] = -1; else prevHash[threadIdx.x] = Hash[ip -1]; } if (threadIdx.x == THREADS -1) { if (ip == pn -1) nextHash[threadIdx.x] = -1; else nextHash[threadIdx.x] = Hash[ip +1]; } __syncthreads(); if (hash != prevHash[threadIdx.x]) SetStart[hash] = ip; if (hash != nextHash[threadIdx.x]) SetStop[hash] = ip +1; } __host__ void updateListHost(const int pn, int *List, const int* SetStart, const int* SetStop, const struct grid Grid, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ) { int ip, ic, ix, iy, iz, i, j, k, jp, jc, np; float dx, dy, dz, dr; // Particles list is filled for (ip = 0; ip < pn; ip++) { ix = (int) ((PosX[ip] - Grid.oX) / Grid.size); iy = (int) ((PosY[ip] - Grid.oY) / Grid.size); iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; np = 0; for (k = -1; k <= 1; k++) { for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY; for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) { dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) { List[ip * MAXN + np] = jp; np++; } } } } } while (np < MAXN) { List[ip * MAXN + np] = ip; np++; } } } __global__ void updateListDevice(const int pn, int *List, const int* SetStart, const int* SetStop, const struct grid Grid, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ) { int ip, ic, ix, iy, iz, i, j, k, jp, jc, np; float dx, dy, dz, dr; // Particles list is filled ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= pn) return; ix = (int) ((PosX[ip] - Grid.oX) / Grid.size); iy = (int) ((PosY[ip] - Grid.oY) / Grid.size); iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; np = 0; for (k = -1; k <= 1; k++) { for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY; for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) { dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) { List[ip * MAXN + np] = jp; np++; } } } } } while (np < MAXN) { List[ip * MAXN + np] = ip; np++; } } struct is_in { __host__ __device__ bool operator()(int x) { return x == 1; } }; struct is_out { __host__ __device__ bool operator()(int x) { return x == -1; } }; int neighbourListDevice(struct model *dm) { int blocks, threads; blocks = (dm->pn + THREADS - 1) / THREADS; threads = THREADS; hipLaunchKernelGGL(( updateHashDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dGrid, dm->PosX, dm->PosY, dm->PosZ, dm->Hash); hipLaunchKernelGGL(( checkOutletDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dGrid, dm->PosX, dm->PosY, dm->PosZ, dm->Hash); hipLaunchKernelGGL(( checkBoundariesDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dGrid, dm->PosX, dm->PosY, dm->PosZ, dm->Hash); // wrap raw pointer with a device_ptr thrust::device_ptr<int> tIndex(dm->Index); thrust::device_ptr<int> tHash(dm->Hash); thrust::device_ptr<int> tMaterial(dm->Material); thrust::device_ptr<float> tMass(dm->Mass); thrust::device_ptr<float> tSmooth(dm->Smooth); thrust::device_ptr<float> tPosX(dm->PosX); thrust::device_ptr<float> tPosY(dm->PosY); thrust::device_ptr<float> tPosZ(dm->PosZ); thrust::device_ptr<float> tVelX(dm->VelX); thrust::device_ptr<float> tVelY(dm->VelY); thrust::device_ptr<float> tVelZ(dm->VelZ); thrust::device_ptr<float> tDensity(dm->Density); thrust::device_ptr<float> tEnergy(dm->Energy); thrust::device_ptr<int> tIntDummy(dm->IntDummy); thrust::device_ptr<float> tFloatDummy(dm->FloatDummy); // use device_ptr in thrust algorithms thrust::sequence(tIndex, tIndex + dm->pn, 1); thrust::sort_by_key(tHash, tHash + dm->pn, tIndex); thrust::copy(tMaterial, tMaterial + dm->pn, tIntDummy); thrust::gather(tIndex, tIndex + dm->pn, tIntDummy, tMaterial); thrust::copy(tMass, tMass + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tMass); thrust::copy(tSmooth, tSmooth + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tSmooth); thrust::copy(tPosX, tPosX + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosX); thrust::copy(tPosY, tPosY + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosY); thrust::copy(tPosZ, tPosZ + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosZ); thrust::copy(tVelX, tVelX + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelX); thrust::copy(tVelY, tVelY + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelY); thrust::copy(tVelZ, tVelZ + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelZ); thrust::copy(tDensity, tDensity + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tDensity); thrust::copy(tEnergy, tEnergy + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tEnergy); thrust::device_ptr<int> tSetStart(dm->SetStart); thrust::device_ptr<int> tSetStop(dm->SetStop); thrust::fill(tSetStart, tSetStart + dm->pn, 0); thrust::fill(tSetStop, tSetStop + dm->pn, 0); hipLaunchKernelGGL(( updateSetsDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->SetStart, dm->SetStop, dm->Hash); hipLaunchKernelGGL(( updateListDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->List, dm->SetStart, dm->SetStop, dGrid, dm->Smooth, dm->PosX, dm->PosY, dm->PosZ); return 0; } int iSort(int *array, int *perm, int *dummy, int n) { int i; for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int fSort(float *array, int *perm, int n) { int i; static float* dummy = NULL; if (!dummy) dummy = (float *) malloc(MAXP * sizeof(float)); for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int mapCompare(const void *a, const void *b) { int c; struct pair m1, m2; c = 0; m1 = *(struct pair*)a; m2 = *(struct pair*)b; if (m1.key < m2.key) c = -1; if (m1.key > m2.key) c = 1; return c; } int neighbourListHost(struct model *hm) { struct pair map[MAXP]; updateHashHost(hm->pn, hGrid, hm->PosX, hm->PosY, hm->PosZ, hm->Hash); /* checkOutletHost(hm->pn, hGrid, hm->PosX, hm->PosY, hm->PosZ, hm->Hash); checkBoundariesHost(hm->pn, hGrid, hm->PosX, hm->PosY, hm->PosZ, hm->Hash); */ //thrust::sequence(hm->Index, hm->Index + hm->pn); for (int ip = 0; ip < hm->pn; ip++) hm->Index[ip] = ip; //for (int ip = 0; ip < hm->pn; ip++) printf("%d\n", hm->Hash[ip]), for (int ip = 0; ip < hm->pn; ip++) { map[ip].key = hm->Hash[ip]; map[ip].value = hm->Index[ip]; } qsort(map, hm->pn, sizeof(struct pair), mapCompare); for (int ip = 0; ip < hm->pn; ip++) { hm->Hash[ip] = map[ip].key; hm->Index[ip] = map[ip].value; } //for (int ip = 0; ip < hm->pn; ip++) hm->Index[ip] = map[ip].value; //thrust::sort_by_key(hm->Hash, hm->Hash + hm->pn, hm->Index); /* thrust::copy(hm->Material, hm->Material + hm->pn, hm->IntDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->IntDummy, hm->Material); thrust::copy(hm->Mass, hm->Mass + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->Mass); thrust::copy(hm->Smooth, hm->Smooth + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->Smooth); thrust::copy(hm->PosX, hm->PosX + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->PosX); thrust::copy(hm->PosY, hm->PosY + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->PosY); thrust::copy(hm->PosZ, hm->PosZ + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->PosZ); thrust::copy(hm->VelX, hm->VelX + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->VelX); thrust::copy(hm->VelY, hm->VelY + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->VelY); thrust::copy(hm->VelZ, hm->VelZ + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->VelZ); thrust::copy(hm->Density, hm->Density + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->Density); thrust::copy(hm->Energy, hm->Energy + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->Energy); */ //iSort(hm->Hash, hm->Index, hm->pn); iSort(hm->Material, hm->Index, hm->IntDummy, hm->pn); fSort(hm->Mass, hm->Index, hm->pn); fSort(hm->Smooth, hm->Index, hm->pn); fSort(hm->PosX, hm->Index, hm->pn); fSort(hm->PosY, hm->Index, hm->pn); fSort(hm->PosZ, hm->Index, hm->pn); fSort(hm->VelX, hm->Index, hm->pn); fSort(hm->VelY, hm->Index, hm->pn); fSort(hm->VelZ, hm->Index, hm->pn); fSort(hm->Density, hm->Index, hm->pn); fSort(hm->Energy, hm->Index, hm->pn); //thrust::fill(hm->SetStart, hm->SetStart + hm->pn, 0); for (int i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStart[i] = 0; //thrust::fill(hm->SetStop, hm->SetStop + hm->pn, 0); for (int i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStop[i] = 0; updateSetsHost(hm->pn, hm->SetStart, hm->SetStop, hm->Hash); updateListHost(hm->pn, hm->List, hm->SetStart, hm->SetStop, hGrid, hm->Smooth, hm->PosX, hm->PosY, hm->PosZ); return 0; } int backupDataHost(struct model *hm) { thrust::copy(hm->PosX, hm->PosX + hm->pn, hm->PosX0); thrust::copy(hm->PosY, hm->PosY + hm->pn, hm->PosY0); thrust::copy(hm->PosZ, hm->PosZ + hm->pn, hm->PosZ0); thrust::copy(hm->VelX, hm->VelX + hm->pn, hm->VelX0); thrust::copy(hm->VelY, hm->VelY + hm->pn, hm->VelY0); thrust::copy(hm->VelZ, hm->VelZ + hm->pn, hm->VelZ0); thrust::copy(hm->Density, hm->Density + hm->pn, hm->Density0); thrust::copy(hm->Energy, hm->Energy + hm->pn, hm->Energy0); return 0; } int backupDataHostOld(struct model *hm) { memcpy(hm->PosX0, hm->PosX, MAXP * sizeof(float)); memcpy(hm->PosY0, hm->PosY, MAXP * sizeof(float)); memcpy(hm->PosZ0, hm->PosZ, MAXP * sizeof(float)); memcpy(hm->VelX0, hm->VelX, MAXP * sizeof(float)); memcpy(hm->VelY0, hm->VelY, MAXP * sizeof(float)); memcpy(hm->VelZ0, hm->VelZ, MAXP * sizeof(float)); memcpy(hm->Density0, hm->Density, MAXP * sizeof(float)); memcpy(hm->Energy0, hm->Energy, MAXP * sizeof(float)); return 0; } int backupDataDevice(struct model *dm) { // wrap raw pointer with a device_ptr thrust::device_ptr<float> tPosX(dm->PosX); thrust::device_ptr<float> tPosY(dm->PosY); thrust::device_ptr<float> tPosZ(dm->PosZ); thrust::device_ptr<float> tVelX(dm->VelX); thrust::device_ptr<float> tVelY(dm->VelY); thrust::device_ptr<float> tVelZ(dm->VelZ); thrust::device_ptr<float> tDensity(dm->Density); thrust::device_ptr<float> tEnergy(dm->Energy); thrust::device_ptr<float> tPosX0(dm->PosX0); thrust::device_ptr<float> tPosY0(dm->PosY0); thrust::device_ptr<float> tPosZ0(dm->PosZ0); thrust::device_ptr<float> tVelX0(dm->VelX0); thrust::device_ptr<float> tVelY0(dm->VelY0); thrust::device_ptr<float> tVelZ0(dm->VelZ0); thrust::device_ptr<float> tDensity0(dm->Density0); thrust::device_ptr<float> tEnergy0(dm->Energy0); // use device_ptr in thrust algorithms thrust::copy(tPosX, tPosX + dm->pn, tPosX0); thrust::copy(tPosY, tPosY + dm->pn, tPosY0); thrust::copy(tPosZ, tPosZ + dm->pn, tPosZ0); thrust::copy(tVelX, tVelX + dm->pn, tVelX0); thrust::copy(tVelY, tVelY + dm->pn, tVelY0); thrust::copy(tVelZ, tVelZ + dm->pn, tVelZ0); thrust::copy(tDensity, tDensity + dm->pn, tDensity0); thrust::copy(tEnergy, tEnergy + dm->pn, tEnergy0); return 0; } __host__ __device__ float pressureGas(float* properties, float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = properties[1] * pshift = properties[2] * * \date Jun 10, 2010 * \author Luca Massidda */ float p; p = (properties[1] - 1.0f) * rho * u; p += properties[2]; return p; } __host__ __device__ float pressurePoly(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p; mu = (rho - properties[0]) / properties[0]; if (mu < 0) p = (properties[6] * mu + properties[7] * mu*mu) + (properties[4] * properties[0] * u); else p = (properties[1] * mu + properties[2] * mu*mu + properties[3] * mu*mu*mu) + ((properties[4] + properties[5] * mu) * properties[0] * u); //if (p < properties[8]) p = properties[8]; return p; } __host__ __device__ float pressureShock(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p, ph; mu = (rho - properties[0]) / properties[0]; ph = (properties[0] * powf(properties[1], 2) * mu*(1.0f +mu)) / powf((1.0f - (properties[3] -1.0f) * mu), 2); p = ph + properties[2] * properties[0] * (u - (0.5f * ph / properties[0] * (mu / (1.0f + mu)))); //if (p < properties[4]) p = properties[4]; return p; } __host__ __device__ float pressureTait(float* properties, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float p; p = properties[0] * powf(properties[1], 2) / 7.0f * (powf((rho / properties[0]), 7) - 1.0f); //if (p < properties[2]) p = properties[2]; return p; } __host__ __device__ float soundGas(float* properties ,float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = properties[1] * pshift = properties[2] * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = sqrtf(properties[1] * (properties[1] - 1.0f) * u); return c; } __host__ __device__ float soundPoly(float* properties , float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = sqrtf(properties[1] / rho); return c; } __host__ __device__ float soundShock(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = properties[1]; return c; } __host__ __device__ float soundTait(float* properties, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = properties[1]; return c; } __host__ __device__ float densityPoly(float* properties , float rho) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } __host__ __device__ float densityShock(float* properties, float rho) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } __host__ __device__ float densityTait(float* properties, float rho) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } __host__ void updateParticlesHost(const int pn, const float alpha, const int* Material, const float* VelDotX, const float* VelDotY, const float* VelDotZ, const float* DensityDot, const float* EnergyDot, const float* PosX0, const float* PosY0, const float* PosZ0, const float* VelX0, const float* VelY0, const float* VelZ0, const float* Density0, const float* Energy0, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* Density, float* Energy, float* Pressure, float* Sound) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, i; int iMaterial; for (ip = 0; ip < pn; ip++) if (Material[ip] != 0) { PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + hRun.dt * VelX[ip] - PosX0[ip]); PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + hRun.dt * VelY[ip] - PosY0[ip]); PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + hRun.dt * VelZ[ip] - PosZ0[ip]); VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + hRun.dt * VelDotX[ip] - VelX0[ip]); VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + hRun.dt * VelDotY[ip] - VelY0[ip]); VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + hRun.dt * VelDotZ[ip] - VelZ0[ip]); //VelZ[ip] = 0.0f; Density[ip] = Density0[ip] + alpha * (Density[ip] + hRun.dt * DensityDot[ip] - Density0[ip]); Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + hRun.dt * EnergyDot[ip] - Energy0[ip]); iMaterial = Material[ip]; if (iMaterial <= 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; } for (i = 0; i < 10; i++) if ((PosX[ip] > hFix[i].minX) && (PosX[ip] < hFix[i].maxX) && (PosY[ip] > hFix[i].minY) && (PosY[ip] < hFix[i].maxY) && (PosZ[ip] > hFix[i].minZ) && (PosZ[ip] < hFix[i].maxZ)) { VelX[ip] = hFix[i].velX; VelY[ip] = hFix[i].velY; VelZ[ip] = hFix[i].velZ; } iMaterial = abs(iMaterial); if (hMatType[iMaterial] == 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; } switch (hMatType[iMaterial]) { case (0) : // BOUNDARY Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]); Pressure[ip] = 0.0f*pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (1) : // IDEAL GAS EOS Pressure[ip] = pressureGas(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundGas(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS Density[ip] = densityPoly(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressurePoly(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundPoly(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (3) : // MIE-GRUNEISEN SHOCK EOS Density[ip] = densityShock(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureShock(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundShock(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (4) : // TAIT EOS Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]); break; default : Pressure[ip] = 0.0f; } } } __global__ void updateParticlesDevice(const int pn, const float alpha, const int* Material, const float* VelDotX, const float* VelDotY, const float* VelDotZ, const float* DensityDot, const float* EnergyDot, const float* PosX0, const float* PosY0, const float* PosZ0, const float* VelX0, const float* VelY0, const float* VelZ0, const float* Density0, const float* Energy0, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* Density, float* Energy, float* Pressure, float* Sound) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, i; int iMaterial; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + dRun.dt * VelX[ip] - PosX0[ip]); PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + dRun.dt * VelY[ip] - PosY0[ip]); PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + dRun.dt * VelZ[ip] - PosZ0[ip]); VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + dRun.dt * VelDotX[ip] - VelX0[ip]); VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + dRun.dt * VelDotY[ip] - VelY0[ip]); VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + dRun.dt * VelDotZ[ip] - VelZ0[ip]); //VelZ[ip] = 0.0f; Density[ip] = Density0[ip] + alpha * (Density[ip] + dRun.dt * DensityDot[ip] - Density0[ip]); Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + dRun.dt * EnergyDot[ip] - Energy0[ip]); iMaterial = Material[ip]; for (i = 0; i < 10; i++) if ((PosX[ip] > dFix[i].minX) && (PosX[ip] < dFix[i].maxX) && (PosY[ip] > dFix[i].minY) && (PosY[ip] < dFix[i].maxY) && (PosZ[ip] > dFix[i].minZ) && (PosZ[ip] < dFix[i].maxZ)) { VelX[ip] = dFix[i].velX; VelY[ip] = dFix[i].velY; VelZ[ip] = dFix[i].velZ; } if (dMatType[iMaterial] == 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; } switch (dMatType[iMaterial]) { case (0) : // BOUNDARY //Pressure[ip] = pressureShock(dMatProp[iMaterial], iDensity, iEnergy); //Sound[ip] = soundShock(dMatProp[iMaterial], iDensity, iEnergy); Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]); Pressure[ip] = 0.0f*pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (1) : // IDEAL GAS EOS Pressure[ip] = pressureGas(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundGas(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS Density[ip] = densityPoly(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressurePoly(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundPoly(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (3) : // MIE-GRUNEISEN SHOCK EOS Density[ip] = densityShock(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureShock(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundShock(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (4) : // TAIT EOS Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]); break; default : Pressure[ip] = 0.0f; } } } __host__ __device__ float kernelWendland(float r, float h) { float q, alpha, w; /** * \brief Wendland kernel * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D alpha = 15.0f / (16.0f * PI * h * h * h); // for 2D //alpha = 7.0f / (4.0f * PI * h * h); w = 0.0f; if (q < 2) { w = powf((1.0f - 0.5f*q),4); w *= 1.0f + 2.0f*q; w *= alpha; } return w; } __host__ __device__ float kernelDerivWendland(float r, float h) { float q, alpha, dwdr; /** * \brief Wendland kernel derivative * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D alpha = 15.0f / (16.0f * PI * h * h * h); // for 2D //alpha = 7.0f / (4.0f * PI * h * h); dwdr = 0.0f; if (q < 2) { dwdr = 5.0f / 8.0f * q * powf((q - 2.0f), 3) ; dwdr *= alpha / h; } return dwdr; } __host__ __device__ float kernelGauss(float r, float h) { float r2, q2, h2, alpha, w;//, dwdr; /** * \brief Gauss kernel * * \date Dec 21, 2010 * \author Luca Massidda */ r2 = r * r ; h2 = h * h; q2 = r2 / h2; alpha = 1.0 / (pow(h, 1) * pow(3.14, 0.5)); //alpha = 1.0f / (3.14f * h2); w = 0.0f; //dwdr = 0.0; if (q2 < 4.0f) { w = alpha * expf(-q2); //dwdr = w * (-2.0 * r / h2); } return w; } __host__ __device__ float kernelDerivGauss(float r, float h) { float r2, q2, h2, alpha, w, dwdr; /** * \brief Gauss kernel * * \date Dec 21, 2010 * \author Luca Massidda */ r2 = r * r ; h2 = h * h; q2 = r2 / h2; alpha = 1.0f / (h * powf(3.14f, 0.5f)); //alpha = 1.0f / (3.14f * h2); w = 0.0f; dwdr = 0.0f; if (q2 < 4.0f) { w = alpha * expf(-q2); dwdr = w * (-2.0f * r / h2); } return dwdr; } __host__ __device__ float kernelSpiky(float r, float h) { float q, alpha, w; /** * \brief Spiky kernel * * \date Dec 21, 2010 * \author Luca Massidda */ q = r / h; alpha = 15.0f / (64.0f * 3.14f * pow(h, 3)); w = 0.0f; if (q < 2.0f) { w = alpha * powf(2.0f - q, 3); } return w; } __host__ __device__ float kernelDerivSpiky(float r, float h) { float q, alpha, dwdr; /** * \brief Gauss kernel * * \date Dec 21, 2010 * \author Luca Massidda */ q = r / h; alpha = -45.0f / (64.0f * 3.14f * pow(h, 4)); dwdr = 0.0; if (q < 2.0f) { dwdr = alpha * powf(2.0f - q, 2); } return dwdr; } __host__ void updateLoadsHost(const int pn, const int* Material, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* VelDotX, float* VelDotY, float* VelDotZ, float* EnergyDot) { int ip, i; for (ip = 0; ip < pn; ip++) { if (Material[ip] > 0) { for (i = 0; i < 10; i++) { if ((PosX[ip] > hLoad[i].minX) && (PosX[ip] < hLoad[i].maxX) && (PosZ[ip] < hLoad[i].maxZ) && (PosY[ip] > hLoad[i].minY) && (PosY[ip] < hLoad[i].maxY) && (PosZ[ip] > hLoad[i].minZ) && (PosZ[ip] < hLoad[i].maxZ)) { VelDotX[ip] += hLoad[i].gx; VelDotY[ip] += hLoad[i].gy; VelDotZ[ip] += hLoad[i].gz; EnergyDot[ip] += hLoad[i].w; } } } } } __global__ void updateLoadsDevice(const int pn, const int* Material, const float* PosX, const float* PosY, const float* PosZ, float* VelDotX, float* VelDotY, float* VelDotZ, float* EnergyDot) { int ip, i; ip = threadIdx.x + blockDim.x * blockIdx.x; if ((ip < pn) && (Material[ip] > 0)) { for (i = 0; i < 10; i++) { if ((PosX[ip] > dLoad[i].minX) && (PosX[ip] < dLoad[i].maxX) && (PosZ[ip] < dLoad[i].maxZ) && (PosY[ip] > dLoad[i].minY) && (PosY[ip] < dLoad[i].maxY) && (PosZ[ip] > dLoad[i].minZ) && (PosZ[ip] < dLoad[i].maxZ)) { VelDotX[ip] += dLoad[i].gx; VelDotY[ip] += dLoad[i].gy; VelDotZ[ip] += dLoad[i].gz; EnergyDot[ip] += dLoad[i].w; } } } } __host__ void balanceMassMomentumHost(const int pn, const int* List, const int* Material, const float* Mass, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const float* VelX, const float* VelY, const float* VelZ, const float* Density, const float* Pressure, const float* Sound, float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; float iSmooth, jMass; float dx, dy, dz, dr, dvr, dwdr, f, w, w0; for (ip = 0; ip < pn; ip++) { iDensityDot = 0.0f; iVelDotX = 0.0f; iVelDotY = 0.0f; iVelDotZ = 0.0f; iSmooth = Smooth[ip]; for (il = 0; il < MAXN; il++) { jp = List[ip * MAXN + il]; // for (jp = 0; jp < pn; jp++) { jMass = Mass[jp]; dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth; w = kernelWendland(dr, iSmooth); w0 = kernelWendland(0.0f, iSmooth); dwdr = kernelDerivWendland(dr, iSmooth); dvr = 0.0f; dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]); dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]); dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]); iDensityDot += jMass * dvr * dwdr / dr; // Calculate interparticle pressure action //f = -(Pressure[ip] + Pressure[jp]) // / (Density[ip] * Density[jp]); f = -(Pressure[ip] / powf(Density[ip], 2) + Pressure[jp] / powf(Density[jp], 2)); iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr; // Calculate shock correction for mass f = Density[ip] - Density[jp]; f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); iDensityDot += jMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0.0f) f = dvr; else f = 0.0f; f *= iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth); f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); f *= 0.03f; iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr; // Calculate boundary repulsion if (Material[ip] != Material[jp]) { f = 0.02f * w / w0 * Sound[ip] * Sound[jp] / dr; iVelDotX += jMass / (Mass[ip] + jMass) * f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass / (Mass[ip] + jMass) * f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass / (Mass[ip] + jMass) * f * (PosZ[ip] - PosZ[jp]) / dr; } } DensityDot[ip] += iDensityDot; VelDotX[ip] += iVelDotX; VelDotY[ip] += iVelDotY; VelDotZ[ip] += iVelDotZ; } } __global__ void balanceMassMomentumDevice(const int pn, const int* List, const int* Material, const float* Mass, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const float* VelX, const float* VelY, const float* VelZ, const float* Density, const float* Pressure, const float* Sound, float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; float iSmooth, jMass; volatile float dx, dy, dz, dr, dvr, dwdr, f, w, w0, q; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { iDensityDot = 0.0f; iVelDotX = 0.0f; iVelDotY = 0.0f; iVelDotZ = 0.0f; iSmooth = Smooth[ip]; for (il = 0; il < MAXN; il++) { jp = List[ip * MAXN + il]; jMass = Mass[jp]; dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth; w = kernelWendland(dr, iSmooth); dwdr = kernelDerivWendland(dr, iSmooth); if (Material[ip] == Material[jp]) { dvr = 0.0f; dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]); dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]); dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]); iDensityDot += jMass * dvr * dwdr / dr; // Calculate interparticle pressure action f = -(Pressure[ip] / powf(Density[ip], 2) + Pressure[jp] / powf(Density[jp], 2)); f *= jMass * dwdr; iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; // Calculate shock correction for mass f = Density[ip] - Density[jp]; f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); iDensityDot += jMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0.0f) f = dvr; else f = 0.0f; f *= iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth); f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); f *= 0.03f; f *= jMass * dwdr; iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; } // Calculate boundary repulsion if (Material[ip] != Material[jp]) { f = 0.25f * w * Mass[jp] / Density[jp] / Smooth[jp] * powf(Sound[jp], 2); iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; } } DensityDot[ip] += iDensityDot; VelDotX[ip] += iVelDotX; VelDotY[ip] += iVelDotY; VelDotZ[ip] += iVelDotZ; } } __host__ void balanceEnergyHost(const int pn, const float* Pressure, const float* Density, const float* DensityDot, float* EnergyDot) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ int ip; float iPressure, iDensity, iDensityDot; float iEnergyDot; for (ip = 0; ip < pn; ip++) { iPressure = Pressure[ip]; iDensity = Density[ip]; iDensityDot = DensityDot[ip]; iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity); EnergyDot[ip] += iEnergyDot; } } __global__ void balanceEnergyDevice(const int pn, const float* Pressure, const float* Density, const float* DensityDot, float* EnergyDot) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ volatile int ip; float iPressure, iDensity, iDensityDot; float iEnergyDot; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { iPressure = Pressure[ip]; iDensity = Density[ip]; iDensityDot = DensityDot[ip]; iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity); EnergyDot[ip] += iEnergyDot; } } int RKstepHost(struct model *hm, float alpha) { /* thrust::fill(hm->VelDotX, hm->VelDotX + hm->pn, 0.0f); thrust::fill(hm->VelDotY, hm->VelDotY + hm->pn, 0.0f); thrust::fill(hm->VelDotZ, hm->VelDotZ + hm->pn, 0.0f); thrust::fill(hm->DensityDot, hm->DensityDot + hm->pn, 0.0f); thrust::fill(hm->EnergyDot, hm->EnergyDot + hm->pn, 0.0f); */ for (int ip = 0; ip < hm->pn; ip++) { hm->VelDotX[ip] = 0.0f; hm->VelDotY[ip] = 0.0f; hm->VelDotZ[ip] = 0.0f; hm->DensityDot[ip] = 0.0f; hm->EnergyDot[ip] = 0.0f; } // External loads updateLoadsHost(hm->pn, hm->Material, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->EnergyDot); // Calculate particle interactions balanceMassMomentumHost(hm->pn, hm->List, hm->Material, hm->Mass, hm->Smooth, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->Density, hm->Pressure, hm->Sound, hm->DensityDot, hm->VelDotX, hm->VelDotY, hm->VelDotZ); balanceEnergyHost(hm->pn, hm->Pressure, hm->Density, hm->DensityDot, hm->EnergyDot); // Update particles updateParticlesHost(hm->pn, alpha, hm->Material, hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->DensityDot, hm->EnergyDot, hm->PosX0, hm->PosY0, hm->PosZ0, hm->VelX0, hm->VelY0, hm->VelZ0, hm->Density0, hm->Energy0, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->Density, hm->Energy, hm->Pressure, hm->Sound); for (int ip = 0; ip < hm->pn; ip++) { printf("%d %d %f %f %f %f %f \n", hm->Index[ip], hm->Hash[ip], hm->VelX[ip], hm->VelY[ip], hm->VelZ[ip], hm->Density[ip], hm->Pressure[ip]); } return 0; } int RKstepDevice(struct model *dm, float alpha) { int blocks, threads; blocks = (dm->pn + THREADS - 1) / THREADS; threads = THREADS; // wrap raw pointer with a device_ptr thrust::device_ptr<float> tVelDotX(dm->VelDotX); thrust::device_ptr<float> tVelDotY(dm->VelDotY); thrust::device_ptr<float> tVelDotZ(dm->VelDotZ); thrust::device_ptr<float> tDensityDot(dm->DensityDot); thrust::device_ptr<float> tEnergyDot(dm->EnergyDot); // use device_ptr in thrust algorithms thrust::fill(tVelDotX, tVelDotX + dm->pn, 0.0f); thrust::fill(tVelDotY, tVelDotY + dm->pn, 0.0f); thrust::fill(tVelDotZ, tVelDotZ + dm->pn, 0.0f); thrust::fill(tDensityDot, tDensityDot + dm->pn, 0.0f); thrust::fill(tEnergyDot, tEnergyDot + dm->pn, 0.0f); // External loads hipLaunchKernelGGL(( updateLoadsDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->Material, dm->PosX, dm->PosY, dm->PosZ, dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->EnergyDot); // Calculate particle interactions hipLaunchKernelGGL(( balanceMassMomentumDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->List, dm->Material, dm->Mass, dm->Smooth, dm->PosX, dm->PosY, dm->PosZ, dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Pressure, dm->Sound, dm->DensityDot, dm->VelDotX, dm->VelDotY, dm->VelDotZ); hipLaunchKernelGGL(( balanceEnergyDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->Pressure, dm->Density, dm->DensityDot, dm->EnergyDot); // Update particles hipLaunchKernelGGL(( updateParticlesDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, alpha, dm->Material, dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->DensityDot, dm->EnergyDot, dm->PosX0, dm->PosY0, dm->PosZ0, dm->VelX0, dm->VelY0, dm->VelZ0, dm->Density0, dm->Energy0, dm->PosX, dm->PosY, dm->PosZ, dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Energy, dm->Pressure, dm->Sound); return 0; } int RKintegrateHost(struct model *hm) { /** * \brief Runge Kutta 3rd order time integration * * Integrate the Navier Stokes equations in time with the * Total Variation Diminishing Runge-Kutta algorithm of the 3rd order * * \date Dec 20, 2010 * \author Luca Massidda */ int ts; // TIME CYCLE for (ts = 0; ts <= hRun.tsn; ts++) { // Output data if ((ts % hRun.ssi) == 0) { printf("Saving time: %g \n", ts * hRun.dt); printData(hm); outputVTK(hm, ts / hRun.ssi); } // Calculate neighbour list neighbourListHost(hm); // Save initial condition backupDataHost(hm); // Step 1 RKstepHost(hm, 1.0f); /* // Step 2 RKstepHost(hm, 1.0f / 4.0f); // Step 3 RKstepHost(hm, 2.0f / 3.0f); */ } return 0; } int RKintegrateDevice(struct model *hm, struct model *dm) { /** * \brief Runge Kutta 3rd order time integration * * Integrate the Navier Stokes equations in time with the * Total Variation Diminishing Runge-Kutta algorithm of the 3rd order * * \date Dec 20, 2010 * \author Luca Massidda */ int ts; size_t available, total; // TIME CYCLE // for (ts = 0; ts <= hRun.tsn; ts++) { for (ts = 0; ts < 1; ts++) { // Calculate neighbour list neighbourListDevice(dm); // Save initial condition backupDataDevice(dm); // Step 1 RKstepDevice(dm, 1.0f); /* // Step 2 RKstepDevice(dm, 1.0f / 4.0f); // Step 3 RKstepDevice(dm, 2.0f / 3.0f); */ // Output data if ((ts % hRun.ssi) == 0) { printf("Saving time: %g \n", ts * hRun.dt); copyDeviceToHost(dm, hm); printf("Particles: %d \n", hm->pn); hipMemGetInfo(&available, &total); printf("Available memory %d MB\n", available/1024/1024); printData(hm); outputVTK(hm, ts / hRun.ssi); } } return 0; } int main() { /** * \brief armando2D v2.0 * * An SPH code for non stationary fluid dynamics. * This is the reviewed and improved C version of Armando v1.0 * developed at CERN in 2008 * * \date May 2, 2012 * \author Luca Massidda */ struct model hModel, dModel; size_t available, total; hipMemGetInfo(&available, &total); printf("Occupied memory %d of %dMB\n", available/1024/1024, total/1024/1024); for (int i = 0; i < 10; i++) { hLoad[i].gx = 0.0f; hLoad[i].gy = 0.0f; hLoad[i].gz = 0.0f; hLoad[i].w = 0.0f; hOut[i].nX = 0.0f; hOut[i].nY = 0.0f; hOut[i].nZ = 0.0f; } initHost(&hModel); //initSingle(&hModel); //initPump(&hModel); //initBlock(&hModel); //initDamBreak(&hModel); //initChannel(&hModel); initBox(&hModel); /* initDevice(&dModel); hipMemGetInfo(&available, &total); printf("Available memory %d MB\n", available/1024/1024); //copyHostToDevice(&hModel, &dModel); hipMemGetInfo(&available, &total); printf("Available memory %d MB\n", available/1024/1024); thrust::device_ptr<int> tIndex((&dModel)->Index); thrust::sequence(tIndex, tIndex + (&dModel)->pn, 1); neighbourListDevice(&dModel); copyDeviceToHost(&dModel, &hModel); printData(&hModel); */ //RKintegrateDevice(&hModel, &dModel); RKintegrateHost(&hModel); return 0; }
4f29b40125f01405fa85371b5a3ae033a847b9b9.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/sequence.h> #include <thrust/count.h> //#define MAXN 24 #define MAXN 96 #define MAXP 300000 #define MAXS 3000000 #define PI 3.14159f #define THREADS 256 struct pair { int key; int value; }; struct grid { float oX, oY, oZ; float size; int nX, nY, nZ; }; struct simulation { float minX, maxX; float minY, maxY; float minZ, maxZ; float dt; int tsn; int ssi; int nsi; }; struct load { float minX, maxX; float minY, maxY; float minZ, maxZ; float gx; float gy; float gz; float w; }; struct fix { float minX, maxX; float minY, maxY; float minZ, maxZ; float velX, velY, velZ; }; struct outlet { float oX, oY, oZ; float nX, nY, nZ; }; struct inlet { int Material; float Mass, Smooth; float oX, oY, oZ; float uX, uY, uZ; float vX, vY, vZ; float nX, nY, nZ; int nu, nv; float Velocity; float Density, Energy; float Distance; }; struct model { int pn; int* Material; float* Mass; float* Smooth; float* PosX; float* PosY; float* PosZ; float* VelX; float* VelY; float* VelZ; float* Density; float* Energy; float* Pressure; float* Sound; float* VelDotX; float* VelDotY; float* VelDotZ; float* DensityDot; float* EnergyDot; float* PosX0; float* PosY0; float* PosZ0; float* VelX0; float* VelY0; float* VelZ0; float* Density0; float* Energy0; int* List; int* Hash; int* Index; int* SetStart; int* SetStop; int* IntDummy; float* FloatDummy; }; // Host Variables int hMatType[10]; float hMatProp[10][10]; struct simulation hRun; struct grid hGrid; struct load hLoad[10]; struct fix hFix[10]; struct outlet hOut[10]; struct inlet hIn[10]; // Device Variables __device__ __constant__ int dMatType[10]; __device__ __constant__ float dMatProp[10][10]; __device__ __constant__ struct simulation dRun; __device__ struct grid dGrid; __device__ __constant__ struct load dLoad[10]; __device__ __constant__ struct fix dFix[10]; __device__ __constant__ struct outlet dOut[10]; __device__ struct inlet dIn[10]; void initBox(struct model *hm) { int i, j, k, m, b, q, ip; double rho, c0, pmin; double dr; q = 2; m = 1; b = 2; rho = 1000.f; c0 = 20.0f; pmin = -1.e4; hMatType[m] = 3; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 3; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.02f / q; // x4 ip = 0; for (k = 0; k < 2 * q ; k++) { for (j = 0; j < 2 * q ; j++) { for (i = 0; i < 2 * q; i++) { hm->PosX[ip] = i * dr; hm->PosY[ip] = j * dr; hm->PosZ[ip] = k * dr; hm->Material[ip] = m; hm->VelX[ip] = 0.0f; ip++; } } } for (k = 0; k < 2 * q ; k++) { for (j = -2; j < -1; j++) { for (i = -0; i < 2 * q; i++) { hm->PosX[ip] = i * dr; hm->PosY[ip] = j * dr; hm->PosZ[ip] = k * dr; hm->Material[ip] = b; hm->VelX[ip] = 0.0f; ip++; } } } hm->pn = ip; thrust::fill(hm->Mass, hm->Mass + hm->pn, rho * dr * dr * dr); thrust::fill(hm->Smooth, hm->Smooth + hm->pn, 1.2f * dr); thrust::fill(hm->VelY, hm->VelY + hm->pn, 0.0f); thrust::fill(hm->VelZ, hm->VelZ + hm->pn, 0.0f); thrust::fill(hm->Density, hm->Density + hm->pn, rho); thrust::fill(hm->Energy, hm->Energy + hm->pn, 0.0f); thrust::fill(hm->Pressure, hm->Pressure + hm->pn, 0.0f); thrust::fill(hm->Sound, hm->Sound + hm->pn, c0); hRun.minX = -0.2f; hRun.maxX = 1.0f; hRun.minY = -0.2f; hRun.maxY = 1.0f; hRun.minZ = -0.2f; hRun.maxZ = 1.0f; hRun.dt = dr / hMatProp[m][1]; hRun.tsn = 1000 * q; //1000; hRun.ssi = 100 * q; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; printf("Box \n"); printf("Particles: %i \n", hm->pn); printf("Grid: %i \n", hGrid.nX * hGrid.nY * hGrid.nZ); } int copyHostToDevice(struct model *hm, struct model *dm) { dm->pn = hm->pn; cudaMemcpy(dm->Material, hm->Material, (MAXP * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Mass, hm->Mass, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Smooth, hm->Smooth, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosX, hm->PosX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosY, hm->PosY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosZ, hm->PosZ, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelX, hm->VelX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelY, hm->VelY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelZ, hm->VelZ, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Density, hm->Density, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Energy, hm->Energy, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Pressure, hm->Pressure, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Sound, hm->Sound, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelDotX, hm->VelDotX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelDotY, hm->VelDotY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelDotZ, hm->VelDotZ, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->DensityDot, hm->DensityDot, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->EnergyDot, hm->EnergyDot, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosX0, hm->PosX0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosY0, hm->PosY0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosZ0, hm->PosZ0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelX0, hm->VelX0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelY0, hm->VelY0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelZ0, hm->VelZ0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Density0, hm->Density0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Energy0, hm->Energy0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->List, hm->List, (MAXP * MAXN * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Hash, hm->Hash, (MAXP * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Index, hm->Index, (MAXP * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->IntDummy, hm->IntDummy, (MAXP * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->FloatDummy, hm->FloatDummy, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->SetStart, hm->SetStart, (MAXS * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->SetStop, hm->SetStop, (MAXS * sizeof(int)), cudaMemcpyHostToDevice); dGrid.oX = hGrid.oX; dGrid.oY = hGrid.oY; dGrid.oZ = hGrid.oZ; dGrid.nX = hGrid.nX; dGrid.nY = hGrid.nY; dGrid.nZ = hGrid.nZ; dGrid.size = hGrid.size; for (int i = 0; i < 10; i++) dIn[i] = hIn[i]; cudaMemcpyToSymbol("dMatType", hMatType, 10 * sizeof(int)); cudaMemcpyToSymbol("dMatProp", hMatProp, 100 * sizeof(float)); cudaMemcpyToSymbol("dRun", &hRun, sizeof(struct simulation)); cudaMemcpyToSymbol("dLoad", &hLoad, 10 * sizeof(struct load)); cudaMemcpyToSymbol("dFix", &hFix, 10 * sizeof(struct fix)); cudaMemcpyToSymbol("dOut", &hOut, 10 * sizeof(struct outlet)); return 0; } int copyDeviceToHost(struct model *dm, struct model *hm) { hm->pn = dm->pn; cudaMemcpy(hm->Material, dm->Material, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Mass, dm->Mass, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Smooth, dm->Smooth, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosX, dm->PosX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosY, dm->PosY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosZ, dm->PosZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelX, dm->VelX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelY, dm->VelY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelZ, dm->VelZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Density, dm->Density, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Energy, dm->Energy, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Pressure, dm->Pressure, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Sound, dm->Sound, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelDotX, dm->VelDotX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelDotY, dm->VelDotY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelDotZ, dm->VelDotZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->DensityDot, dm->DensityDot, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->EnergyDot, dm->EnergyDot, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosX0, dm->PosX0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosY0, dm->PosY0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosZ0, dm->PosZ0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelX0, dm->VelX0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelY0, dm->VelY0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelZ0, dm->VelZ0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Density0, dm->Density0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Energy0, dm->Energy0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->List, dm->List, (MAXP * MAXN * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Hash, dm->Hash, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Index, dm->Index, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->IntDummy, dm->IntDummy, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->FloatDummy, dm->FloatDummy, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->SetStart, dm->SetStart, (MAXS * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->SetStop, dm->SetStop, (MAXS * sizeof(int)), cudaMemcpyDeviceToHost); hGrid.oX = dGrid.oX; hGrid.oY = dGrid.oY; hGrid.oZ = dGrid.oZ; hGrid.nX = dGrid.nX; hGrid.nY = dGrid.nY; hGrid.nZ = dGrid.nZ; hGrid.size = dGrid.size; for (int i = 0; i < 10; i++) hIn[i] = dIn[i]; return 0; } int initHost(struct model *hm) { hm->Material = (int *) malloc(MAXP * sizeof(int)); hm->Mass = (float *) malloc(MAXP * sizeof(float)); hm->Smooth = (float *) malloc(MAXP * sizeof(float)); hm->PosX = (float *) malloc(MAXP * sizeof(float)); hm->PosY = (float *) malloc(MAXP * sizeof(float)); hm->PosZ = (float *) malloc(MAXP * sizeof(float)); hm->VelX = (float *) malloc(MAXP * sizeof(float)); hm->VelY = (float *) malloc(MAXP * sizeof(float)); hm->VelZ = (float *) malloc(MAXP * sizeof(float)); hm->Density = (float *) malloc(MAXP * sizeof(float)); hm->Energy = (float *) malloc(MAXP * sizeof(float)); hm->Pressure = (float *) malloc(MAXP * sizeof(float)); hm->Sound = (float *) malloc(MAXP * sizeof(float)); hm->VelDotX = (float *) malloc(MAXP * sizeof(float)); hm->VelDotY = (float *) malloc(MAXP * sizeof(float)); hm->VelDotZ = (float *) malloc(MAXP * sizeof(float)); hm->DensityDot = (float *) malloc(MAXP * sizeof(float)); hm->EnergyDot = (float *) malloc(MAXP * sizeof(float)); hm->PosX0 = (float *) malloc(MAXP * sizeof(float)); hm->PosY0 = (float *) malloc(MAXP * sizeof(float)); hm->PosZ0 = (float *) malloc(MAXP * sizeof(float)); hm->VelX0 = (float *) malloc(MAXP * sizeof(float)); hm->VelY0 = (float *) malloc(MAXP * sizeof(float)); hm->VelZ0 = (float *) malloc(MAXP * sizeof(float)); hm->Density0 = (float *) malloc(MAXP * sizeof(float)); hm->Energy0 = (float *) malloc(MAXP * sizeof(float)); hm->Hash = (int *) malloc(MAXP * sizeof(int)); hm->Index = (int *) malloc(MAXP * sizeof(int)); hm->List = (int *) malloc(MAXP * MAXN * sizeof(int)); hm->IntDummy = (int *) malloc(MAXP * sizeof(int)); hm->FloatDummy = (float *) malloc(MAXP * sizeof(float)); hm->SetStart = (int *) malloc(MAXS * sizeof(int)); hm->SetStop = (int *) malloc(MAXS * sizeof(int)); return 0; } int initDevice(struct model *dm) { cudaMalloc((void**) &(dm->Material), (MAXP * sizeof(int))); cudaMalloc((void**) &(dm->Mass), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Smooth), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosX), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosY), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosZ), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelX), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelY), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelZ), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Density), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Energy), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Pressure), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Sound), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelDotX), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelDotY), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelDotZ), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->DensityDot), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->EnergyDot), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosX0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosY0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosZ0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelX0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelY0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelZ0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Density0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Energy0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Hash), (MAXP * sizeof(int))); cudaMalloc((void**) &(dm->Index), (MAXP * sizeof(int))); cudaMalloc((void**) &(dm->List), (MAXP * MAXN * sizeof(int))); cudaMalloc((void**) &(dm->IntDummy), (MAXP * sizeof(int))); cudaMalloc((void**) &(dm->FloatDummy), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->SetStart), (MAXS * sizeof(int))); cudaMalloc((void**) &(dm->SetStop), (MAXS * sizeof(int))); return 0; } int printData(struct model *hm) { /** * \brief Particle data file output * * Saves particle data on a disk file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; int i; // Stream file position stream = fopen("new_pos.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fclose(stream); // Stream file velocity stream = fopen("new_vel.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]); fclose(stream); // Stream file info stream = fopen("new_info.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i %+14.8e %+14.8e \n", hm->Material[i], hm->Mass[i], hm->Smooth[i]); fclose(stream); // Stream file field stream = fopen("new_field.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->Density[i], hm->Pressure[i], hm->Energy[i]); fclose(stream); /* // Stream file add1 stream = fopen("new_debug.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%d %d %d %f %f %f\n", i, hm->Index[i], hm->Hash[i], hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fclose(stream); */ // Stream file add1 stream = fopen("new_debug.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%d %f %f %f %f %f %f\n", i, hm->VelX[i], hm->VelY[i], hm->VelZ[i], hm->Density[i], hm->Energy[i], hm->Pressure[i]); fclose(stream); /* for (i = 0; i < hm->pn; i++) { printf("%d - ", i); for (int j = 0; j < MAXN; j++) printf("%d ", hm->List[i * MAXN +j]); printf("\n"); } */ return 0; } int outputVTK(struct model *hm, int ss) { /** * \brief Output Data file * * Saves vtk data file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; char filename[80]; int i; // Stream position file sprintf(filename, "out%05d.vtk", ss); stream = fopen(filename, "w"); fprintf(stream, "# vtk DataFile Version 2.0\n"); fprintf(stream, "Unstructured Grid Example\n"); fprintf(stream, "ASCII\n"); fprintf(stream, "DATASET UNSTRUCTURED_GRID\n"); fprintf(stream, "POINTS %i float\n", hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e %+e %+e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fprintf(stream, "CELLS %i %i \n", hm->pn, 2*hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i %i \n", 1, i); fprintf(stream, "CELL_TYPES %i \n", hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i \n", 1); fprintf(stream, "POINT_DATA %i \n", hm->pn); fprintf(stream, "SCALARS material int 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+d \n", hm->Material[i]); fprintf(stream, "SCALARS density float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Density[i]); fprintf(stream, "SCALARS pressure float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Pressure[i]); fprintf(stream, "SCALARS energy float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Energy[i]); fprintf(stream, "VECTORS velocity float\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e %+e %+e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]); fclose(stream); /* for (i = 0; i < hm->pn; i++) printf("%d %d \n", i, hm->Hash[i]); printf("\n\n\n"); for (i = 0; i < hm->SetStart.size(); i++) printf("%d %d %d \n", i, hm->SetStart[i], hm->SetStop[i]); for (i = 0; i < hm->pn; i++) { printf("%d - ", i); for (j = 0; j < MAXN; j++) printf("%d ", hm->List[i*MAXN +j]); printf("\n"); } */ return 0; } __host__ void addInletHost(const int pn, const int* Index, const float* Smooth, float* PosX, float* PosY, float* PosZ, float* PosX0, float* PosY0, float* PosZ0) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; float dx, dy, dz, d; for (ip = 0; ip < pn; ip++) if (Index[ip] == 1) { dx = PosX[ip] - PosX0[ip]; dy = PosY[ip] - PosY0[ip]; dz = PosZ[ip] - PosZ0[ip]; d = sqrtf(dx*dx + dy*dy + dz*dz); dx *= Smooth[ip] / (1.2f * d); dy *= Smooth[ip] / (1.2f * d); dz *= Smooth[ip] / (1.2f * d); PosX[ip] -= dx; PosY[ip] -= dy; PosZ[ip] -= dz; PosX0[ip] = PosX[ip]; PosY0[ip] = PosY[ip]; PosZ0[ip] = PosZ[ip]; } } __host__ void updateHashHost(const int pn, const struct grid Grid, const float* PosX, const float* PosY, const float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, ix, iy, iz, ic; for (ip = 0; ip < pn; ip++) { ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size); iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size); iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; Hash[ip] = ic; } } __global__ void updateHashDevice(const int pn, const struct grid Grid, const float* PosX, const float* PosY, const float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, ix, iy, iz, ic; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size); iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size); iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; Hash[ip] = ic; } } __host__ void checkBoundariesHost(const int pn, const struct grid Grid, const float* PosX, const float* PosY, const float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; float minX, maxX; float minY, maxY; float minZ, maxZ; minX = Grid.oX; maxX = Grid.oX + Grid.size * Grid.nX; minY = Grid.oY; maxY = Grid.oY + Grid.size * Grid.nY; minZ = Grid.oZ; maxZ = Grid.oZ + Grid.size * Grid.nZ; for (ip = 0; ip < pn; ip++) { if ((PosX[ip] < minX) || (PosX[ip] > maxX) || (PosY[ip] < minY) || (PosY[ip] > maxY) || (PosZ[ip] < minZ) || (PosZ[ip] > maxZ)) { Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } } __global__ void checkBoundariesDevice(const int pn, const struct grid Grid, float* PosX, float* PosY, float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; float minX, maxX; float minY, maxY; float minZ, maxZ; ip = threadIdx.x + blockDim.x * blockIdx.x; minX = Grid.oX; maxX = Grid.oX + Grid.size * Grid.nX; minY = Grid.oY; maxY = Grid.oY + Grid.size * Grid.nY; minZ = Grid.oZ; maxZ = Grid.oZ + Grid.size * Grid.nZ; if (ip < pn) { if ((PosX[ip] < minX) || (PosX[ip] > maxX) || (PosY[ip] < minY) || (PosY[ip] > maxY) || (PosZ[ip] < minZ) || (PosZ[ip] > maxZ)) { PosX[ip] = 0.5f*(minX + maxX); PosY[ip] = 0.5f*(minY + maxY); PosZ[ip] = 0.5f*(minZ + maxZ); //Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } /* if (PosX[ip] < minX) PosX[ip] = minX; if (PosY[ip] < minY) PosY[ip] = minY; if (PosZ[ip] < minZ) PosZ[ip] = minZ; if (PosX[ip] > maxX) PosX[ip] = maxX; if (PosY[ip] > maxY) PosY[ip] = maxY; if (PosZ[ip] > maxZ) PosZ[ip] = maxZ; if (Hash[ip] > Grid.nX * Grid.nY * Grid.nZ) Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; */ } } __host__ void checkOutletHost(const int pn, const struct grid Grid, const float* PosX, const float* PosY, const float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ float d; int ip, i; for (ip = 0; ip < pn; ip++) { for (i = 0; i < 10; i++) { d = 0.0f; d += (PosX[ip] - hOut[i].oX) * hOut[i].nX; d += (PosY[ip] - hOut[i].oY) * hOut[i].nY; d += (PosZ[ip] - hOut[i].oZ) * hOut[i].nZ; if (d > 0.0f) { Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } } } __global__ void checkOutletDevice(const int pn, const struct grid Grid, const float* PosX, const float* PosY, const float* PosZ, int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ float d; int ip, i; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { for (i = 0; i < 10; i++) { d = 0.0f; d += (PosX[ip] - dOut[i].oX) * dOut[i].nX; d += (PosY[ip] - dOut[i].oY) * dOut[i].nY; d += (PosZ[ip] - dOut[i].oZ) * dOut[i].nZ; if (d > 0.0f) { Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } } } /* __host__ void makeInletHost(struct model *hm, struct pointer_model *pm) { int i, j, iu, iv; for (i = 0; i < 10; i++) if (hIn[i].Material != 0) { hIn[i].Distance += hIn[i].Velocity * hRun.dt; if ((1.2f * hIn[i].Distance) > hIn[i].Smooth) { hIn[i].Distance = 0.0f; for (iv = 0; iv < hIn[i].nv; iv++) { for (iu = 0; iu < hIn[i].nu; iu++) { hm->pn++; pm->pn++; hm->Material.push_back(hIn[i].Material); hm->Mass.push_back(hIn[i].Mass); hm->Smooth.push_back(hIn[i].Smooth); hm->PosX.push_back(hIn[i].oX + iu * hIn[i].uX + iv * hIn[i].vX); hm->PosY.push_back(hIn[i].oY + iu * hIn[i].uY + iv * hIn[i].vY); hm->PosZ.push_back(hIn[i].oZ + iu * hIn[i].uZ + iv * hIn[i].vZ); hm->VelX.push_back(hIn[i].Velocity * hIn[i].nX); hm->VelY.push_back(hIn[i].Velocity * hIn[i].nY); hm->VelZ.push_back(hIn[i].Velocity * hIn[i].nZ); hm->Density.push_back(hIn[i].Density); hm->Energy.push_back(hIn[i].Energy); hm->PosX0.push_back(hIn[i].oX + iu * hIn[i].uX + iv * hIn[i].vX); hm->PosY0.push_back(hIn[i].oY + iu * hIn[i].uY + iv * hIn[i].vY); hm->PosZ0.push_back(hIn[i].oZ + iu * hIn[i].uZ + iv * hIn[i].vZ); hm->VelX0.push_back(hIn[i].Velocity * hIn[i].nX); hm->VelY0.push_back(hIn[i].Velocity * hIn[i].nY); hm->VelZ0.push_back(hIn[i].Velocity * hIn[i].nZ); hm->Density0.push_back(hIn[i].Density); hm->Energy0.push_back(hIn[i].Energy); hm->VelDotX.push_back(0.0f); hm->VelDotY.push_back(0.0f); hm->VelDotZ.push_back(0.0f); hm->DensityDot.push_back(0.0f); hm->EnergyDot.push_back(0.0f); hm->Pressure.push_back(0.0f); hm->Sound.push_back(0.0f); for (j = 0; j < MAXN; j++) hm->List.push_back(0); hm->Hash.push_back(0); hm->Index.push_back(0); hm->IntDummy.push_back(0); hm->FloatDummy.push_back(0.0f); } } } } copyHostToPointer(hm, pm); } void makeInletDevice(struct model *dm) { int i, j, iu, iv; for (i = 0; i < 10; i++) if (dIn[i].Material != 0) { dIn[i].Distance += dIn[i].Velocity * hRun.dt; if ((1.2f * dIn[i].Distance) > dIn[i].Smooth) { dIn[i].Distance = 0.0f; printf("Inlet!\n"); for (iv = 0; iv < dIn[i].nv; iv++) { for (iu = 0; iu < dIn[i].nu; iu++) { dm->pn++; dm->Material.push_back(dIn[i].Material); dm->Mass.push_back(dIn[i].Mass); dm->Smooth.push_back(dIn[i].Smooth); dm->PosX.push_back(dIn[i].oX + iu * dIn[i].uX + iv * dIn[i].vX); dm->PosY.push_back(dIn[i].oY + iu * dIn[i].uY + iv * dIn[i].vY); dm->PosZ.push_back(dIn[i].oZ + iu * dIn[i].uZ + iv * dIn[i].vZ); dm->VelX.push_back(dIn[i].Velocity * dIn[i].nX); dm->VelY.push_back(dIn[i].Velocity * dIn[i].nY); dm->VelZ.push_back(dIn[i].Velocity * dIn[i].nZ); dm->Density.push_back(dIn[i].Density); dm->Energy.push_back(dIn[i].Energy); dm->PosX0.push_back(dIn[i].oX + iu * dIn[i].uX + iv * dIn[i].vX); dm->PosY0.push_back(dIn[i].oY + iu * dIn[i].uY + iv * dIn[i].vY); dm->PosZ0.push_back(dIn[i].oZ + iu * dIn[i].uZ + iv * dIn[i].vZ); dm->VelX0.push_back(dIn[i].Velocity * dIn[i].nX); dm->VelY0.push_back(dIn[i].Velocity * dIn[i].nY); dm->VelZ0.push_back(dIn[i].Velocity * dIn[i].nZ); dm->Density0.push_back(dIn[i].Density); dm->Energy0.push_back(dIn[i].Energy); dm->VelDotX.push_back(0.0f); dm->VelDotY.push_back(0.0f); dm->VelDotZ.push_back(0.0f); dm->DensityDot.push_back(0.0f); dm->EnergyDot.push_back(0.0f); dm->Pressure.push_back(0.0f); dm->Sound.push_back(0.0f); for (j = 0; j < MAXN; j++) dm->List.push_back(0); dm->Hash.push_back(0); dm->Index.push_back(0); dm->IntDummy.push_back(0); dm->FloatDummy.push_back(0.0f); } } } } } */ __host__ void updateSetsHost(const int pn, int *SetStart, int *SetStop, const int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; int hash, nextHash, prevHash; for (ip = 0; ip < pn; ip++) { hash = Hash[ip]; if (ip == 0) prevHash = -1; else prevHash = Hash[ip -1]; if (ip == pn -1) nextHash = -1; else nextHash = Hash[ip +1]; if (hash != prevHash) SetStart[hash] = ip; if (hash != nextHash) SetStop[hash] = ip +1; } } __global__ void updateSetsDevice(const int pn, int *SetStart, int *SetStop, const int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ __shared__ int prevHash[THREADS]; __shared__ int nextHash[THREADS]; int ip; int hash; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= pn) return; hash = Hash[ip]; if (threadIdx.x < THREADS -1) prevHash[threadIdx.x +1] = hash; if (threadIdx.x > 0) nextHash[threadIdx.x -1] = hash; if (threadIdx.x == 0) { if (ip == 0) prevHash[threadIdx.x] = -1; else prevHash[threadIdx.x] = Hash[ip -1]; } if (threadIdx.x == THREADS -1) { if (ip == pn -1) nextHash[threadIdx.x] = -1; else nextHash[threadIdx.x] = Hash[ip +1]; } __syncthreads(); if (hash != prevHash[threadIdx.x]) SetStart[hash] = ip; if (hash != nextHash[threadIdx.x]) SetStop[hash] = ip +1; } __host__ void updateListHost(const int pn, int *List, const int* SetStart, const int* SetStop, const struct grid Grid, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ) { int ip, ic, ix, iy, iz, i, j, k, jp, jc, np; float dx, dy, dz, dr; // Particles list is filled for (ip = 0; ip < pn; ip++) { ix = (int) ((PosX[ip] - Grid.oX) / Grid.size); iy = (int) ((PosY[ip] - Grid.oY) / Grid.size); iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; np = 0; for (k = -1; k <= 1; k++) { for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY; for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) { dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) { List[ip * MAXN + np] = jp; np++; } } } } } while (np < MAXN) { List[ip * MAXN + np] = ip; np++; } } } __global__ void updateListDevice(const int pn, int *List, const int* SetStart, const int* SetStop, const struct grid Grid, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ) { int ip, ic, ix, iy, iz, i, j, k, jp, jc, np; float dx, dy, dz, dr; // Particles list is filled ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= pn) return; ix = (int) ((PosX[ip] - Grid.oX) / Grid.size); iy = (int) ((PosY[ip] - Grid.oY) / Grid.size); iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; np = 0; for (k = -1; k <= 1; k++) { for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY; for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) { dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) { List[ip * MAXN + np] = jp; np++; } } } } } while (np < MAXN) { List[ip * MAXN + np] = ip; np++; } } struct is_in { __host__ __device__ bool operator()(int x) { return x == 1; } }; struct is_out { __host__ __device__ bool operator()(int x) { return x == -1; } }; int neighbourListDevice(struct model *dm) { int blocks, threads; blocks = (dm->pn + THREADS - 1) / THREADS; threads = THREADS; updateHashDevice <<< blocks, threads >>> (dm->pn, dGrid, dm->PosX, dm->PosY, dm->PosZ, dm->Hash); checkOutletDevice <<< blocks, threads >>> (dm->pn, dGrid, dm->PosX, dm->PosY, dm->PosZ, dm->Hash); checkBoundariesDevice <<< blocks, threads >>> (dm->pn, dGrid, dm->PosX, dm->PosY, dm->PosZ, dm->Hash); // wrap raw pointer with a device_ptr thrust::device_ptr<int> tIndex(dm->Index); thrust::device_ptr<int> tHash(dm->Hash); thrust::device_ptr<int> tMaterial(dm->Material); thrust::device_ptr<float> tMass(dm->Mass); thrust::device_ptr<float> tSmooth(dm->Smooth); thrust::device_ptr<float> tPosX(dm->PosX); thrust::device_ptr<float> tPosY(dm->PosY); thrust::device_ptr<float> tPosZ(dm->PosZ); thrust::device_ptr<float> tVelX(dm->VelX); thrust::device_ptr<float> tVelY(dm->VelY); thrust::device_ptr<float> tVelZ(dm->VelZ); thrust::device_ptr<float> tDensity(dm->Density); thrust::device_ptr<float> tEnergy(dm->Energy); thrust::device_ptr<int> tIntDummy(dm->IntDummy); thrust::device_ptr<float> tFloatDummy(dm->FloatDummy); // use device_ptr in thrust algorithms thrust::sequence(tIndex, tIndex + dm->pn, 1); thrust::sort_by_key(tHash, tHash + dm->pn, tIndex); thrust::copy(tMaterial, tMaterial + dm->pn, tIntDummy); thrust::gather(tIndex, tIndex + dm->pn, tIntDummy, tMaterial); thrust::copy(tMass, tMass + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tMass); thrust::copy(tSmooth, tSmooth + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tSmooth); thrust::copy(tPosX, tPosX + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosX); thrust::copy(tPosY, tPosY + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosY); thrust::copy(tPosZ, tPosZ + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosZ); thrust::copy(tVelX, tVelX + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelX); thrust::copy(tVelY, tVelY + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelY); thrust::copy(tVelZ, tVelZ + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelZ); thrust::copy(tDensity, tDensity + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tDensity); thrust::copy(tEnergy, tEnergy + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tEnergy); thrust::device_ptr<int> tSetStart(dm->SetStart); thrust::device_ptr<int> tSetStop(dm->SetStop); thrust::fill(tSetStart, tSetStart + dm->pn, 0); thrust::fill(tSetStop, tSetStop + dm->pn, 0); updateSetsDevice <<< blocks, threads >>> (dm->pn, dm->SetStart, dm->SetStop, dm->Hash); updateListDevice <<< blocks, threads >>> (dm->pn, dm->List, dm->SetStart, dm->SetStop, dGrid, dm->Smooth, dm->PosX, dm->PosY, dm->PosZ); return 0; } int iSort(int *array, int *perm, int *dummy, int n) { int i; for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int fSort(float *array, int *perm, int n) { int i; static float* dummy = NULL; if (!dummy) dummy = (float *) malloc(MAXP * sizeof(float)); for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int mapCompare(const void *a, const void *b) { int c; struct pair m1, m2; c = 0; m1 = *(struct pair*)a; m2 = *(struct pair*)b; if (m1.key < m2.key) c = -1; if (m1.key > m2.key) c = 1; return c; } int neighbourListHost(struct model *hm) { struct pair map[MAXP]; updateHashHost(hm->pn, hGrid, hm->PosX, hm->PosY, hm->PosZ, hm->Hash); /* checkOutletHost(hm->pn, hGrid, hm->PosX, hm->PosY, hm->PosZ, hm->Hash); checkBoundariesHost(hm->pn, hGrid, hm->PosX, hm->PosY, hm->PosZ, hm->Hash); */ //thrust::sequence(hm->Index, hm->Index + hm->pn); for (int ip = 0; ip < hm->pn; ip++) hm->Index[ip] = ip; //for (int ip = 0; ip < hm->pn; ip++) printf("%d\n", hm->Hash[ip]), for (int ip = 0; ip < hm->pn; ip++) { map[ip].key = hm->Hash[ip]; map[ip].value = hm->Index[ip]; } qsort(map, hm->pn, sizeof(struct pair), mapCompare); for (int ip = 0; ip < hm->pn; ip++) { hm->Hash[ip] = map[ip].key; hm->Index[ip] = map[ip].value; } //for (int ip = 0; ip < hm->pn; ip++) hm->Index[ip] = map[ip].value; //thrust::sort_by_key(hm->Hash, hm->Hash + hm->pn, hm->Index); /* thrust::copy(hm->Material, hm->Material + hm->pn, hm->IntDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->IntDummy, hm->Material); thrust::copy(hm->Mass, hm->Mass + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->Mass); thrust::copy(hm->Smooth, hm->Smooth + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->Smooth); thrust::copy(hm->PosX, hm->PosX + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->PosX); thrust::copy(hm->PosY, hm->PosY + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->PosY); thrust::copy(hm->PosZ, hm->PosZ + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->PosZ); thrust::copy(hm->VelX, hm->VelX + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->VelX); thrust::copy(hm->VelY, hm->VelY + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->VelY); thrust::copy(hm->VelZ, hm->VelZ + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->VelZ); thrust::copy(hm->Density, hm->Density + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->Density); thrust::copy(hm->Energy, hm->Energy + hm->pn, hm->FloatDummy); thrust::gather(hm->Index, hm->Index + hm->pn, hm->FloatDummy, hm->Energy); */ //iSort(hm->Hash, hm->Index, hm->pn); iSort(hm->Material, hm->Index, hm->IntDummy, hm->pn); fSort(hm->Mass, hm->Index, hm->pn); fSort(hm->Smooth, hm->Index, hm->pn); fSort(hm->PosX, hm->Index, hm->pn); fSort(hm->PosY, hm->Index, hm->pn); fSort(hm->PosZ, hm->Index, hm->pn); fSort(hm->VelX, hm->Index, hm->pn); fSort(hm->VelY, hm->Index, hm->pn); fSort(hm->VelZ, hm->Index, hm->pn); fSort(hm->Density, hm->Index, hm->pn); fSort(hm->Energy, hm->Index, hm->pn); //thrust::fill(hm->SetStart, hm->SetStart + hm->pn, 0); for (int i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStart[i] = 0; //thrust::fill(hm->SetStop, hm->SetStop + hm->pn, 0); for (int i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStop[i] = 0; updateSetsHost(hm->pn, hm->SetStart, hm->SetStop, hm->Hash); updateListHost(hm->pn, hm->List, hm->SetStart, hm->SetStop, hGrid, hm->Smooth, hm->PosX, hm->PosY, hm->PosZ); return 0; } int backupDataHost(struct model *hm) { thrust::copy(hm->PosX, hm->PosX + hm->pn, hm->PosX0); thrust::copy(hm->PosY, hm->PosY + hm->pn, hm->PosY0); thrust::copy(hm->PosZ, hm->PosZ + hm->pn, hm->PosZ0); thrust::copy(hm->VelX, hm->VelX + hm->pn, hm->VelX0); thrust::copy(hm->VelY, hm->VelY + hm->pn, hm->VelY0); thrust::copy(hm->VelZ, hm->VelZ + hm->pn, hm->VelZ0); thrust::copy(hm->Density, hm->Density + hm->pn, hm->Density0); thrust::copy(hm->Energy, hm->Energy + hm->pn, hm->Energy0); return 0; } int backupDataHostOld(struct model *hm) { memcpy(hm->PosX0, hm->PosX, MAXP * sizeof(float)); memcpy(hm->PosY0, hm->PosY, MAXP * sizeof(float)); memcpy(hm->PosZ0, hm->PosZ, MAXP * sizeof(float)); memcpy(hm->VelX0, hm->VelX, MAXP * sizeof(float)); memcpy(hm->VelY0, hm->VelY, MAXP * sizeof(float)); memcpy(hm->VelZ0, hm->VelZ, MAXP * sizeof(float)); memcpy(hm->Density0, hm->Density, MAXP * sizeof(float)); memcpy(hm->Energy0, hm->Energy, MAXP * sizeof(float)); return 0; } int backupDataDevice(struct model *dm) { // wrap raw pointer with a device_ptr thrust::device_ptr<float> tPosX(dm->PosX); thrust::device_ptr<float> tPosY(dm->PosY); thrust::device_ptr<float> tPosZ(dm->PosZ); thrust::device_ptr<float> tVelX(dm->VelX); thrust::device_ptr<float> tVelY(dm->VelY); thrust::device_ptr<float> tVelZ(dm->VelZ); thrust::device_ptr<float> tDensity(dm->Density); thrust::device_ptr<float> tEnergy(dm->Energy); thrust::device_ptr<float> tPosX0(dm->PosX0); thrust::device_ptr<float> tPosY0(dm->PosY0); thrust::device_ptr<float> tPosZ0(dm->PosZ0); thrust::device_ptr<float> tVelX0(dm->VelX0); thrust::device_ptr<float> tVelY0(dm->VelY0); thrust::device_ptr<float> tVelZ0(dm->VelZ0); thrust::device_ptr<float> tDensity0(dm->Density0); thrust::device_ptr<float> tEnergy0(dm->Energy0); // use device_ptr in thrust algorithms thrust::copy(tPosX, tPosX + dm->pn, tPosX0); thrust::copy(tPosY, tPosY + dm->pn, tPosY0); thrust::copy(tPosZ, tPosZ + dm->pn, tPosZ0); thrust::copy(tVelX, tVelX + dm->pn, tVelX0); thrust::copy(tVelY, tVelY + dm->pn, tVelY0); thrust::copy(tVelZ, tVelZ + dm->pn, tVelZ0); thrust::copy(tDensity, tDensity + dm->pn, tDensity0); thrust::copy(tEnergy, tEnergy + dm->pn, tEnergy0); return 0; } __host__ __device__ float pressureGas(float* properties, float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = properties[1] * pshift = properties[2] * * \date Jun 10, 2010 * \author Luca Massidda */ float p; p = (properties[1] - 1.0f) * rho * u; p += properties[2]; return p; } __host__ __device__ float pressurePoly(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p; mu = (rho - properties[0]) / properties[0]; if (mu < 0) p = (properties[6] * mu + properties[7] * mu*mu) + (properties[4] * properties[0] * u); else p = (properties[1] * mu + properties[2] * mu*mu + properties[3] * mu*mu*mu) + ((properties[4] + properties[5] * mu) * properties[0] * u); //if (p < properties[8]) p = properties[8]; return p; } __host__ __device__ float pressureShock(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p, ph; mu = (rho - properties[0]) / properties[0]; ph = (properties[0] * powf(properties[1], 2) * mu*(1.0f +mu)) / powf((1.0f - (properties[3] -1.0f) * mu), 2); p = ph + properties[2] * properties[0] * (u - (0.5f * ph / properties[0] * (mu / (1.0f + mu)))); //if (p < properties[4]) p = properties[4]; return p; } __host__ __device__ float pressureTait(float* properties, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float p; p = properties[0] * powf(properties[1], 2) / 7.0f * (powf((rho / properties[0]), 7) - 1.0f); //if (p < properties[2]) p = properties[2]; return p; } __host__ __device__ float soundGas(float* properties ,float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = properties[1] * pshift = properties[2] * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = sqrtf(properties[1] * (properties[1] - 1.0f) * u); return c; } __host__ __device__ float soundPoly(float* properties , float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = sqrtf(properties[1] / rho); return c; } __host__ __device__ float soundShock(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = properties[1]; return c; } __host__ __device__ float soundTait(float* properties, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = properties[1]; return c; } __host__ __device__ float densityPoly(float* properties , float rho) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } __host__ __device__ float densityShock(float* properties, float rho) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } __host__ __device__ float densityTait(float* properties, float rho) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } __host__ void updateParticlesHost(const int pn, const float alpha, const int* Material, const float* VelDotX, const float* VelDotY, const float* VelDotZ, const float* DensityDot, const float* EnergyDot, const float* PosX0, const float* PosY0, const float* PosZ0, const float* VelX0, const float* VelY0, const float* VelZ0, const float* Density0, const float* Energy0, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* Density, float* Energy, float* Pressure, float* Sound) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, i; int iMaterial; for (ip = 0; ip < pn; ip++) if (Material[ip] != 0) { PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + hRun.dt * VelX[ip] - PosX0[ip]); PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + hRun.dt * VelY[ip] - PosY0[ip]); PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + hRun.dt * VelZ[ip] - PosZ0[ip]); VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + hRun.dt * VelDotX[ip] - VelX0[ip]); VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + hRun.dt * VelDotY[ip] - VelY0[ip]); VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + hRun.dt * VelDotZ[ip] - VelZ0[ip]); //VelZ[ip] = 0.0f; Density[ip] = Density0[ip] + alpha * (Density[ip] + hRun.dt * DensityDot[ip] - Density0[ip]); Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + hRun.dt * EnergyDot[ip] - Energy0[ip]); iMaterial = Material[ip]; if (iMaterial <= 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; } for (i = 0; i < 10; i++) if ((PosX[ip] > hFix[i].minX) && (PosX[ip] < hFix[i].maxX) && (PosY[ip] > hFix[i].minY) && (PosY[ip] < hFix[i].maxY) && (PosZ[ip] > hFix[i].minZ) && (PosZ[ip] < hFix[i].maxZ)) { VelX[ip] = hFix[i].velX; VelY[ip] = hFix[i].velY; VelZ[ip] = hFix[i].velZ; } iMaterial = abs(iMaterial); if (hMatType[iMaterial] == 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; } switch (hMatType[iMaterial]) { case (0) : // BOUNDARY Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]); Pressure[ip] = 0.0f*pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (1) : // IDEAL GAS EOS Pressure[ip] = pressureGas(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundGas(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS Density[ip] = densityPoly(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressurePoly(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundPoly(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (3) : // MIE-GRUNEISEN SHOCK EOS Density[ip] = densityShock(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureShock(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundShock(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (4) : // TAIT EOS Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]); break; default : Pressure[ip] = 0.0f; } } } __global__ void updateParticlesDevice(const int pn, const float alpha, const int* Material, const float* VelDotX, const float* VelDotY, const float* VelDotZ, const float* DensityDot, const float* EnergyDot, const float* PosX0, const float* PosY0, const float* PosZ0, const float* VelX0, const float* VelY0, const float* VelZ0, const float* Density0, const float* Energy0, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* Density, float* Energy, float* Pressure, float* Sound) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, i; int iMaterial; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + dRun.dt * VelX[ip] - PosX0[ip]); PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + dRun.dt * VelY[ip] - PosY0[ip]); PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + dRun.dt * VelZ[ip] - PosZ0[ip]); VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + dRun.dt * VelDotX[ip] - VelX0[ip]); VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + dRun.dt * VelDotY[ip] - VelY0[ip]); VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + dRun.dt * VelDotZ[ip] - VelZ0[ip]); //VelZ[ip] = 0.0f; Density[ip] = Density0[ip] + alpha * (Density[ip] + dRun.dt * DensityDot[ip] - Density0[ip]); Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + dRun.dt * EnergyDot[ip] - Energy0[ip]); iMaterial = Material[ip]; for (i = 0; i < 10; i++) if ((PosX[ip] > dFix[i].minX) && (PosX[ip] < dFix[i].maxX) && (PosY[ip] > dFix[i].minY) && (PosY[ip] < dFix[i].maxY) && (PosZ[ip] > dFix[i].minZ) && (PosZ[ip] < dFix[i].maxZ)) { VelX[ip] = dFix[i].velX; VelY[ip] = dFix[i].velY; VelZ[ip] = dFix[i].velZ; } if (dMatType[iMaterial] == 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; } switch (dMatType[iMaterial]) { case (0) : // BOUNDARY //Pressure[ip] = pressureShock(dMatProp[iMaterial], iDensity, iEnergy); //Sound[ip] = soundShock(dMatProp[iMaterial], iDensity, iEnergy); Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]); Pressure[ip] = 0.0f*pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (1) : // IDEAL GAS EOS Pressure[ip] = pressureGas(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundGas(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS Density[ip] = densityPoly(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressurePoly(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundPoly(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (3) : // MIE-GRUNEISEN SHOCK EOS Density[ip] = densityShock(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureShock(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundShock(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (4) : // TAIT EOS Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]); break; default : Pressure[ip] = 0.0f; } } } __host__ __device__ float kernelWendland(float r, float h) { float q, alpha, w; /** * \brief Wendland kernel * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D alpha = 15.0f / (16.0f * PI * h * h * h); // for 2D //alpha = 7.0f / (4.0f * PI * h * h); w = 0.0f; if (q < 2) { w = powf((1.0f - 0.5f*q),4); w *= 1.0f + 2.0f*q; w *= alpha; } return w; } __host__ __device__ float kernelDerivWendland(float r, float h) { float q, alpha, dwdr; /** * \brief Wendland kernel derivative * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D alpha = 15.0f / (16.0f * PI * h * h * h); // for 2D //alpha = 7.0f / (4.0f * PI * h * h); dwdr = 0.0f; if (q < 2) { dwdr = 5.0f / 8.0f * q * powf((q - 2.0f), 3) ; dwdr *= alpha / h; } return dwdr; } __host__ __device__ float kernelGauss(float r, float h) { float r2, q2, h2, alpha, w;//, dwdr; /** * \brief Gauss kernel * * \date Dec 21, 2010 * \author Luca Massidda */ r2 = r * r ; h2 = h * h; q2 = r2 / h2; alpha = 1.0 / (pow(h, 1) * pow(3.14, 0.5)); //alpha = 1.0f / (3.14f * h2); w = 0.0f; //dwdr = 0.0; if (q2 < 4.0f) { w = alpha * expf(-q2); //dwdr = w * (-2.0 * r / h2); } return w; } __host__ __device__ float kernelDerivGauss(float r, float h) { float r2, q2, h2, alpha, w, dwdr; /** * \brief Gauss kernel * * \date Dec 21, 2010 * \author Luca Massidda */ r2 = r * r ; h2 = h * h; q2 = r2 / h2; alpha = 1.0f / (h * powf(3.14f, 0.5f)); //alpha = 1.0f / (3.14f * h2); w = 0.0f; dwdr = 0.0f; if (q2 < 4.0f) { w = alpha * expf(-q2); dwdr = w * (-2.0f * r / h2); } return dwdr; } __host__ __device__ float kernelSpiky(float r, float h) { float q, alpha, w; /** * \brief Spiky kernel * * \date Dec 21, 2010 * \author Luca Massidda */ q = r / h; alpha = 15.0f / (64.0f * 3.14f * pow(h, 3)); w = 0.0f; if (q < 2.0f) { w = alpha * powf(2.0f - q, 3); } return w; } __host__ __device__ float kernelDerivSpiky(float r, float h) { float q, alpha, dwdr; /** * \brief Gauss kernel * * \date Dec 21, 2010 * \author Luca Massidda */ q = r / h; alpha = -45.0f / (64.0f * 3.14f * pow(h, 4)); dwdr = 0.0; if (q < 2.0f) { dwdr = alpha * powf(2.0f - q, 2); } return dwdr; } __host__ void updateLoadsHost(const int pn, const int* Material, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* VelDotX, float* VelDotY, float* VelDotZ, float* EnergyDot) { int ip, i; for (ip = 0; ip < pn; ip++) { if (Material[ip] > 0) { for (i = 0; i < 10; i++) { if ((PosX[ip] > hLoad[i].minX) && (PosX[ip] < hLoad[i].maxX) && (PosZ[ip] < hLoad[i].maxZ) && (PosY[ip] > hLoad[i].minY) && (PosY[ip] < hLoad[i].maxY) && (PosZ[ip] > hLoad[i].minZ) && (PosZ[ip] < hLoad[i].maxZ)) { VelDotX[ip] += hLoad[i].gx; VelDotY[ip] += hLoad[i].gy; VelDotZ[ip] += hLoad[i].gz; EnergyDot[ip] += hLoad[i].w; } } } } } __global__ void updateLoadsDevice(const int pn, const int* Material, const float* PosX, const float* PosY, const float* PosZ, float* VelDotX, float* VelDotY, float* VelDotZ, float* EnergyDot) { int ip, i; ip = threadIdx.x + blockDim.x * blockIdx.x; if ((ip < pn) && (Material[ip] > 0)) { for (i = 0; i < 10; i++) { if ((PosX[ip] > dLoad[i].minX) && (PosX[ip] < dLoad[i].maxX) && (PosZ[ip] < dLoad[i].maxZ) && (PosY[ip] > dLoad[i].minY) && (PosY[ip] < dLoad[i].maxY) && (PosZ[ip] > dLoad[i].minZ) && (PosZ[ip] < dLoad[i].maxZ)) { VelDotX[ip] += dLoad[i].gx; VelDotY[ip] += dLoad[i].gy; VelDotZ[ip] += dLoad[i].gz; EnergyDot[ip] += dLoad[i].w; } } } } __host__ void balanceMassMomentumHost(const int pn, const int* List, const int* Material, const float* Mass, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const float* VelX, const float* VelY, const float* VelZ, const float* Density, const float* Pressure, const float* Sound, float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; float iSmooth, jMass; float dx, dy, dz, dr, dvr, dwdr, f, w, w0; for (ip = 0; ip < pn; ip++) { iDensityDot = 0.0f; iVelDotX = 0.0f; iVelDotY = 0.0f; iVelDotZ = 0.0f; iSmooth = Smooth[ip]; for (il = 0; il < MAXN; il++) { jp = List[ip * MAXN + il]; // for (jp = 0; jp < pn; jp++) { jMass = Mass[jp]; dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth; w = kernelWendland(dr, iSmooth); w0 = kernelWendland(0.0f, iSmooth); dwdr = kernelDerivWendland(dr, iSmooth); dvr = 0.0f; dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]); dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]); dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]); iDensityDot += jMass * dvr * dwdr / dr; // Calculate interparticle pressure action //f = -(Pressure[ip] + Pressure[jp]) // / (Density[ip] * Density[jp]); f = -(Pressure[ip] / powf(Density[ip], 2) + Pressure[jp] / powf(Density[jp], 2)); iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr; // Calculate shock correction for mass f = Density[ip] - Density[jp]; f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); iDensityDot += jMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0.0f) f = dvr; else f = 0.0f; f *= iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth); f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); f *= 0.03f; iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr; // Calculate boundary repulsion if (Material[ip] != Material[jp]) { f = 0.02f * w / w0 * Sound[ip] * Sound[jp] / dr; iVelDotX += jMass / (Mass[ip] + jMass) * f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass / (Mass[ip] + jMass) * f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass / (Mass[ip] + jMass) * f * (PosZ[ip] - PosZ[jp]) / dr; } } DensityDot[ip] += iDensityDot; VelDotX[ip] += iVelDotX; VelDotY[ip] += iVelDotY; VelDotZ[ip] += iVelDotZ; } } __global__ void balanceMassMomentumDevice(const int pn, const int* List, const int* Material, const float* Mass, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const float* VelX, const float* VelY, const float* VelZ, const float* Density, const float* Pressure, const float* Sound, float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; float iSmooth, jMass; volatile float dx, dy, dz, dr, dvr, dwdr, f, w, w0, q; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { iDensityDot = 0.0f; iVelDotX = 0.0f; iVelDotY = 0.0f; iVelDotZ = 0.0f; iSmooth = Smooth[ip]; for (il = 0; il < MAXN; il++) { jp = List[ip * MAXN + il]; jMass = Mass[jp]; dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth; w = kernelWendland(dr, iSmooth); dwdr = kernelDerivWendland(dr, iSmooth); if (Material[ip] == Material[jp]) { dvr = 0.0f; dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]); dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]); dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]); iDensityDot += jMass * dvr * dwdr / dr; // Calculate interparticle pressure action f = -(Pressure[ip] / powf(Density[ip], 2) + Pressure[jp] / powf(Density[jp], 2)); f *= jMass * dwdr; iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; // Calculate shock correction for mass f = Density[ip] - Density[jp]; f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); iDensityDot += jMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0.0f) f = dvr; else f = 0.0f; f *= iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth); f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); f *= 0.03f; f *= jMass * dwdr; iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; } // Calculate boundary repulsion if (Material[ip] != Material[jp]) { f = 0.25f * w * Mass[jp] / Density[jp] / Smooth[jp] * powf(Sound[jp], 2); iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; } } DensityDot[ip] += iDensityDot; VelDotX[ip] += iVelDotX; VelDotY[ip] += iVelDotY; VelDotZ[ip] += iVelDotZ; } } __host__ void balanceEnergyHost(const int pn, const float* Pressure, const float* Density, const float* DensityDot, float* EnergyDot) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ int ip; float iPressure, iDensity, iDensityDot; float iEnergyDot; for (ip = 0; ip < pn; ip++) { iPressure = Pressure[ip]; iDensity = Density[ip]; iDensityDot = DensityDot[ip]; iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity); EnergyDot[ip] += iEnergyDot; } } __global__ void balanceEnergyDevice(const int pn, const float* Pressure, const float* Density, const float* DensityDot, float* EnergyDot) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ volatile int ip; float iPressure, iDensity, iDensityDot; float iEnergyDot; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { iPressure = Pressure[ip]; iDensity = Density[ip]; iDensityDot = DensityDot[ip]; iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity); EnergyDot[ip] += iEnergyDot; } } int RKstepHost(struct model *hm, float alpha) { /* thrust::fill(hm->VelDotX, hm->VelDotX + hm->pn, 0.0f); thrust::fill(hm->VelDotY, hm->VelDotY + hm->pn, 0.0f); thrust::fill(hm->VelDotZ, hm->VelDotZ + hm->pn, 0.0f); thrust::fill(hm->DensityDot, hm->DensityDot + hm->pn, 0.0f); thrust::fill(hm->EnergyDot, hm->EnergyDot + hm->pn, 0.0f); */ for (int ip = 0; ip < hm->pn; ip++) { hm->VelDotX[ip] = 0.0f; hm->VelDotY[ip] = 0.0f; hm->VelDotZ[ip] = 0.0f; hm->DensityDot[ip] = 0.0f; hm->EnergyDot[ip] = 0.0f; } // External loads updateLoadsHost(hm->pn, hm->Material, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->EnergyDot); // Calculate particle interactions balanceMassMomentumHost(hm->pn, hm->List, hm->Material, hm->Mass, hm->Smooth, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->Density, hm->Pressure, hm->Sound, hm->DensityDot, hm->VelDotX, hm->VelDotY, hm->VelDotZ); balanceEnergyHost(hm->pn, hm->Pressure, hm->Density, hm->DensityDot, hm->EnergyDot); // Update particles updateParticlesHost(hm->pn, alpha, hm->Material, hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->DensityDot, hm->EnergyDot, hm->PosX0, hm->PosY0, hm->PosZ0, hm->VelX0, hm->VelY0, hm->VelZ0, hm->Density0, hm->Energy0, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->Density, hm->Energy, hm->Pressure, hm->Sound); for (int ip = 0; ip < hm->pn; ip++) { printf("%d %d %f %f %f %f %f \n", hm->Index[ip], hm->Hash[ip], hm->VelX[ip], hm->VelY[ip], hm->VelZ[ip], hm->Density[ip], hm->Pressure[ip]); } return 0; } int RKstepDevice(struct model *dm, float alpha) { int blocks, threads; blocks = (dm->pn + THREADS - 1) / THREADS; threads = THREADS; // wrap raw pointer with a device_ptr thrust::device_ptr<float> tVelDotX(dm->VelDotX); thrust::device_ptr<float> tVelDotY(dm->VelDotY); thrust::device_ptr<float> tVelDotZ(dm->VelDotZ); thrust::device_ptr<float> tDensityDot(dm->DensityDot); thrust::device_ptr<float> tEnergyDot(dm->EnergyDot); // use device_ptr in thrust algorithms thrust::fill(tVelDotX, tVelDotX + dm->pn, 0.0f); thrust::fill(tVelDotY, tVelDotY + dm->pn, 0.0f); thrust::fill(tVelDotZ, tVelDotZ + dm->pn, 0.0f); thrust::fill(tDensityDot, tDensityDot + dm->pn, 0.0f); thrust::fill(tEnergyDot, tEnergyDot + dm->pn, 0.0f); // External loads updateLoadsDevice <<< blocks, threads >>> (dm->pn, dm->Material, dm->PosX, dm->PosY, dm->PosZ, dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->EnergyDot); // Calculate particle interactions balanceMassMomentumDevice <<< blocks, threads >>> (dm->pn, dm->List, dm->Material, dm->Mass, dm->Smooth, dm->PosX, dm->PosY, dm->PosZ, dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Pressure, dm->Sound, dm->DensityDot, dm->VelDotX, dm->VelDotY, dm->VelDotZ); balanceEnergyDevice <<< blocks, threads >>> (dm->pn, dm->Pressure, dm->Density, dm->DensityDot, dm->EnergyDot); // Update particles updateParticlesDevice <<< blocks, threads >>> (dm->pn, alpha, dm->Material, dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->DensityDot, dm->EnergyDot, dm->PosX0, dm->PosY0, dm->PosZ0, dm->VelX0, dm->VelY0, dm->VelZ0, dm->Density0, dm->Energy0, dm->PosX, dm->PosY, dm->PosZ, dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Energy, dm->Pressure, dm->Sound); return 0; } int RKintegrateHost(struct model *hm) { /** * \brief Runge Kutta 3rd order time integration * * Integrate the Navier Stokes equations in time with the * Total Variation Diminishing Runge-Kutta algorithm of the 3rd order * * \date Dec 20, 2010 * \author Luca Massidda */ int ts; // TIME CYCLE for (ts = 0; ts <= hRun.tsn; ts++) { // Output data if ((ts % hRun.ssi) == 0) { printf("Saving time: %g \n", ts * hRun.dt); printData(hm); outputVTK(hm, ts / hRun.ssi); } // Calculate neighbour list neighbourListHost(hm); // Save initial condition backupDataHost(hm); // Step 1 RKstepHost(hm, 1.0f); /* // Step 2 RKstepHost(hm, 1.0f / 4.0f); // Step 3 RKstepHost(hm, 2.0f / 3.0f); */ } return 0; } int RKintegrateDevice(struct model *hm, struct model *dm) { /** * \brief Runge Kutta 3rd order time integration * * Integrate the Navier Stokes equations in time with the * Total Variation Diminishing Runge-Kutta algorithm of the 3rd order * * \date Dec 20, 2010 * \author Luca Massidda */ int ts; size_t available, total; // TIME CYCLE // for (ts = 0; ts <= hRun.tsn; ts++) { for (ts = 0; ts < 1; ts++) { // Calculate neighbour list neighbourListDevice(dm); // Save initial condition backupDataDevice(dm); // Step 1 RKstepDevice(dm, 1.0f); /* // Step 2 RKstepDevice(dm, 1.0f / 4.0f); // Step 3 RKstepDevice(dm, 2.0f / 3.0f); */ // Output data if ((ts % hRun.ssi) == 0) { printf("Saving time: %g \n", ts * hRun.dt); copyDeviceToHost(dm, hm); printf("Particles: %d \n", hm->pn); cudaMemGetInfo(&available, &total); printf("Available memory %d MB\n", available/1024/1024); printData(hm); outputVTK(hm, ts / hRun.ssi); } } return 0; } int main() { /** * \brief armando2D v2.0 * * An SPH code for non stationary fluid dynamics. * This is the reviewed and improved C version of Armando v1.0 * developed at CERN in 2008 * * \date May 2, 2012 * \author Luca Massidda */ struct model hModel, dModel; size_t available, total; cudaMemGetInfo(&available, &total); printf("Occupied memory %d of %dMB\n", available/1024/1024, total/1024/1024); for (int i = 0; i < 10; i++) { hLoad[i].gx = 0.0f; hLoad[i].gy = 0.0f; hLoad[i].gz = 0.0f; hLoad[i].w = 0.0f; hOut[i].nX = 0.0f; hOut[i].nY = 0.0f; hOut[i].nZ = 0.0f; } initHost(&hModel); //initSingle(&hModel); //initPump(&hModel); //initBlock(&hModel); //initDamBreak(&hModel); //initChannel(&hModel); initBox(&hModel); /* initDevice(&dModel); cudaMemGetInfo(&available, &total); printf("Available memory %d MB\n", available/1024/1024); //copyHostToDevice(&hModel, &dModel); cudaMemGetInfo(&available, &total); printf("Available memory %d MB\n", available/1024/1024); thrust::device_ptr<int> tIndex((&dModel)->Index); thrust::sequence(tIndex, tIndex + (&dModel)->pn, 1); neighbourListDevice(&dModel); copyDeviceToHost(&dModel, &hModel); printData(&hModel); */ //RKintegrateDevice(&hModel, &dModel); RKintegrateHost(&hModel); return 0; }
ea46addd5c6d483866c8a9c59a7efebf6c6669ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/cosine_embedding_criterion_op.h" namespace caffe2 { namespace { __global__ void CECKernel( const int N, const float* S, const int* Y, const float margin, float* output) { CUDA_1D_KERNEL_LOOP(i, N) { output[i] = Y[i] == 1 ? (1. - S[i]) : fmaxf(0.f, S[i] - margin); } } __global__ void CECGradientKernel( const int N, const float* S, const int* Y, const float* dOutput, const float margin, float* dS) { CUDA_1D_KERNEL_LOOP(i, N) { dS[i] = dOutput[i] * (Y[i] == 1 ? -1 : static_cast<float>(S[i] >= margin)); } } } // namespace template <> bool CosineEmbeddingCriterionOp<CUDAContext>::RunOnDevice() { auto& S = Input(0); auto& Y = Input(1); CAFFE_ENFORCE(S.numel() == Y.numel(), "The embedding and label should have the same size."); auto* output = Output(0, S.sizes(), at::dtype<float>()); const float* Sdata = S.data<float>(); const int* Ydata = Y.data<int>(); float* output_data = output->template mutable_data<float>(); hipLaunchKernelGGL(( CECKernel), dim3(CAFFE_GET_BLOCKS(S.numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), S.numel(), Sdata, Ydata, margin_, output_data); return true; } template <> bool CosineEmbeddingCriterionGradientOp<CUDAContext>::RunOnDevice() { auto& S = Input(0); auto& Y = Input(1); auto& dOutput = Input(2); auto* dS = Output(0, S.sizes(), at::dtype<float>()); const float* Sdata = S.data<float>(); const int* Ydata = Y.data<int>(); const float* dOutput_data = dOutput.data<float>(); float* dSdata = dS->template mutable_data<float>(); hipLaunchKernelGGL(( CECGradientKernel), dim3(CAFFE_GET_BLOCKS(S.numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), S.numel(), Sdata, Ydata, dOutput_data, margin_, dSdata); return true; } REGISTER_CUDA_OPERATOR( CosineEmbeddingCriterion, CosineEmbeddingCriterionOp<CUDAContext>); REGISTER_CUDA_OPERATOR( CosineEmbeddingCriterionGradient, CosineEmbeddingCriterionGradientOp<CUDAContext>); } // namespace caffe2
ea46addd5c6d483866c8a9c59a7efebf6c6669ad.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/cosine_embedding_criterion_op.h" namespace caffe2 { namespace { __global__ void CECKernel( const int N, const float* S, const int* Y, const float margin, float* output) { CUDA_1D_KERNEL_LOOP(i, N) { output[i] = Y[i] == 1 ? (1. - S[i]) : fmaxf(0.f, S[i] - margin); } } __global__ void CECGradientKernel( const int N, const float* S, const int* Y, const float* dOutput, const float margin, float* dS) { CUDA_1D_KERNEL_LOOP(i, N) { dS[i] = dOutput[i] * (Y[i] == 1 ? -1 : static_cast<float>(S[i] >= margin)); } } } // namespace template <> bool CosineEmbeddingCriterionOp<CUDAContext>::RunOnDevice() { auto& S = Input(0); auto& Y = Input(1); CAFFE_ENFORCE(S.numel() == Y.numel(), "The embedding and label should have the same size."); auto* output = Output(0, S.sizes(), at::dtype<float>()); const float* Sdata = S.data<float>(); const int* Ydata = Y.data<int>(); float* output_data = output->template mutable_data<float>(); CECKernel<<<CAFFE_GET_BLOCKS(S.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( S.numel(), Sdata, Ydata, margin_, output_data); return true; } template <> bool CosineEmbeddingCriterionGradientOp<CUDAContext>::RunOnDevice() { auto& S = Input(0); auto& Y = Input(1); auto& dOutput = Input(2); auto* dS = Output(0, S.sizes(), at::dtype<float>()); const float* Sdata = S.data<float>(); const int* Ydata = Y.data<int>(); const float* dOutput_data = dOutput.data<float>(); float* dSdata = dS->template mutable_data<float>(); CECGradientKernel<<<CAFFE_GET_BLOCKS(S.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( S.numel(), Sdata, Ydata, dOutput_data, margin_, dSdata); return true; } REGISTER_CUDA_OPERATOR( CosineEmbeddingCriterion, CosineEmbeddingCriterionOp<CUDAContext>); REGISTER_CUDA_OPERATOR( CosineEmbeddingCriterionGradient, CosineEmbeddingCriterionGradientOp<CUDAContext>); } // namespace caffe2
5e152964e8d0f176519af28056ca7e76ce71474d.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707. // All Rights reserved. See files LICENSE and NOTICE for details. // // This file is part of CEED, a collection of benchmarks, miniapps, software // libraries and APIs for efficient high-order finite element and spectral // element discretizations for exascale applications. For more information and // source code availability see http://github.com/ceed. // // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, // a collaborative effort of two U.S. Department of Energy organizations (Office // of Science and the National Nuclear Security Administration) responsible for // the planning and preparation of a capable exascale ecosystem, including // software, applications, hardware, advanced system engineering and early // testbed platforms, in support of the nation's exascale computing imperative. #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #include "../common/grad.h" ////////////////////////////////////////////////////////////////////////////////////////// template<typename T, int NCOMP, int P, int Q> static magma_int_t magma_gradn_2d_kernel_driver( const T *dinterp1d, const T *dgrad1d, magma_trans_t transT, const T *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU, T *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV, magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue) { magma_device_t device; magma_getdevice( &device ); magma_int_t shmem_max, nthreads_max; const int MAXPQ = maxpq(P,Q); magma_int_t nthreads = MAXPQ; magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads); magma_int_t shmem = 0; shmem += sizeof(T) * 2*P*Q; // for sTinterp and sTgrad shmem += sizeof(T) * ntcol * (P*MAXPQ); // for reforming rU we need PxP, and for the intermediate output we need PxQ hipDeviceGetAttribute (&nthreads_max, hipDeviceAttributeMaxThreadsPerBlock, device); #if TORCH_HIP_VERSION >= 9000 hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeSharedMemPerBlockOptin, device); if (shmem <= shmem_max) { hipFuncSetAttribute(magma_gradn_2d_kernel<T,NCOMP,P,Q,MAXPQ>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem); } #else hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeMaxSharedMemoryPerBlock, device); #endif // TORCH_HIP_VERSION >= 9000 if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) { return 1; // launch failed } else { magma_int_t nblocks = (nelem + ntcol-1) / ntcol; dim3 threads(nthreads, ntcol, 1); dim3 grid(nblocks, 1, 1); // IMPORTANT: we instantiate with DIM=1 instead of DIM=2 because the kernel handles one dimension at a time // We should instantiate with DIM >= 1 when we fuse the whole operator, because of the q-function hipLaunchKernelGGL(( magma_gradn_2d_kernel<T,NCOMP,P,Q,MAXPQ>), dim3(grid), dim3(threads), shmem, magma_queue_get_cuda_stream(queue), dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem); return (hipPeekAtLastError() == hipSuccess) ? 0 : 1; } } ////////////////////////////////////////////////////////////////////////////////////////// template<int P, int Q> static magma_int_t magma_gradn_2d_ncomp( magma_int_t ncomp, const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV, magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue) { magma_int_t launch_failed = 0; switch (ncomp) { case 1: launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,1,P,Q> (dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 2: launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,2,P,Q> (dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 3: launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,3,P,Q> (dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// template<int P> static magma_int_t magma_gradn_2d_ncomp_q( magma_int_t Q, magma_int_t ncomp, const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV, magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue) { magma_int_t launch_failed = 0; switch (Q) { case 1: launch_failed = magma_gradn_2d_ncomp<P, 1> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 2: launch_failed = magma_gradn_2d_ncomp<P, 2> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 3: launch_failed = magma_gradn_2d_ncomp<P, 3> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 4: launch_failed = magma_gradn_2d_ncomp<P, 4> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 5: launch_failed = magma_gradn_2d_ncomp<P, 5> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 6: launch_failed = magma_gradn_2d_ncomp<P, 6> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 7: launch_failed = magma_gradn_2d_ncomp<P, 7> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 8: launch_failed = magma_gradn_2d_ncomp<P, 8> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 9: launch_failed = magma_gradn_2d_ncomp<P, 9> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 10: launch_failed = magma_gradn_2d_ncomp<P,10> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// static magma_int_t magma_gradn_2d_ncomp_q_p( magma_int_t P, magma_int_t Q, magma_int_t ncomp, const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV, magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue) { magma_int_t launch_failed = 0; switch (P) { case 1: launch_failed = magma_gradn_2d_ncomp_q< 1> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 2: launch_failed = magma_gradn_2d_ncomp_q< 2> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 3: launch_failed = magma_gradn_2d_ncomp_q< 3> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 4: launch_failed = magma_gradn_2d_ncomp_q< 4> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 5: launch_failed = magma_gradn_2d_ncomp_q< 5> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 6: launch_failed = magma_gradn_2d_ncomp_q< 6> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 7: launch_failed = magma_gradn_2d_ncomp_q< 7> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 8: launch_failed = magma_gradn_2d_ncomp_q< 8> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 9: launch_failed = magma_gradn_2d_ncomp_q< 9> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 10: launch_failed = magma_gradn_2d_ncomp_q<10> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_gradn_2d( magma_int_t P, magma_int_t Q, magma_int_t ncomp, const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, CeedTransposeMode tmode, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV, magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue) { magma_int_t launch_failed = 0; magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans; launch_failed = magma_gradn_2d_ncomp_q_p( P, Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); return launch_failed; }
5e152964e8d0f176519af28056ca7e76ce71474d.cu
// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707. // All Rights reserved. See files LICENSE and NOTICE for details. // // This file is part of CEED, a collection of benchmarks, miniapps, software // libraries and APIs for efficient high-order finite element and spectral // element discretizations for exascale applications. For more information and // source code availability see http://github.com/ceed. // // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, // a collaborative effort of two U.S. Department of Energy organizations (Office // of Science and the National Nuclear Security Administration) responsible for // the planning and preparation of a capable exascale ecosystem, including // software, applications, hardware, advanced system engineering and early // testbed platforms, in support of the nation's exascale computing imperative. #include <cuda.h> // for CUDA_VERSION #include "../common/grad.h" ////////////////////////////////////////////////////////////////////////////////////////// template<typename T, int NCOMP, int P, int Q> static magma_int_t magma_gradn_2d_kernel_driver( const T *dinterp1d, const T *dgrad1d, magma_trans_t transT, const T *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU, T *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV, magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue) { magma_device_t device; magma_getdevice( &device ); magma_int_t shmem_max, nthreads_max; const int MAXPQ = maxpq(P,Q); magma_int_t nthreads = MAXPQ; magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads); magma_int_t shmem = 0; shmem += sizeof(T) * 2*P*Q; // for sTinterp and sTgrad shmem += sizeof(T) * ntcol * (P*MAXPQ); // for reforming rU we need PxP, and for the intermediate output we need PxQ cudaDeviceGetAttribute (&nthreads_max, cudaDevAttrMaxThreadsPerBlock, device); #if CUDA_VERSION >= 9000 cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlockOptin, device); if (shmem <= shmem_max) { cudaFuncSetAttribute(magma_gradn_2d_kernel<T,NCOMP,P,Q,MAXPQ>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem); } #else cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlock, device); #endif // CUDA_VERSION >= 9000 if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) { return 1; // launch failed } else { magma_int_t nblocks = (nelem + ntcol-1) / ntcol; dim3 threads(nthreads, ntcol, 1); dim3 grid(nblocks, 1, 1); // IMPORTANT: we instantiate with DIM=1 instead of DIM=2 because the kernel handles one dimension at a time // We should instantiate with DIM >= 1 when we fuse the whole operator, because of the q-function magma_gradn_2d_kernel<T,NCOMP,P,Q,MAXPQ><<<grid, threads, shmem, magma_queue_get_cuda_stream(queue)>>> (dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem); return (cudaPeekAtLastError() == cudaSuccess) ? 0 : 1; } } ////////////////////////////////////////////////////////////////////////////////////////// template<int P, int Q> static magma_int_t magma_gradn_2d_ncomp( magma_int_t ncomp, const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV, magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue) { magma_int_t launch_failed = 0; switch (ncomp) { case 1: launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,1,P,Q> (dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 2: launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,2,P,Q> (dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 3: launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,3,P,Q> (dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// template<int P> static magma_int_t magma_gradn_2d_ncomp_q( magma_int_t Q, magma_int_t ncomp, const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV, magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue) { magma_int_t launch_failed = 0; switch (Q) { case 1: launch_failed = magma_gradn_2d_ncomp<P, 1> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 2: launch_failed = magma_gradn_2d_ncomp<P, 2> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 3: launch_failed = magma_gradn_2d_ncomp<P, 3> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 4: launch_failed = magma_gradn_2d_ncomp<P, 4> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 5: launch_failed = magma_gradn_2d_ncomp<P, 5> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 6: launch_failed = magma_gradn_2d_ncomp<P, 6> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 7: launch_failed = magma_gradn_2d_ncomp<P, 7> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 8: launch_failed = magma_gradn_2d_ncomp<P, 8> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 9: launch_failed = magma_gradn_2d_ncomp<P, 9> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 10: launch_failed = magma_gradn_2d_ncomp<P,10> (ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// static magma_int_t magma_gradn_2d_ncomp_q_p( magma_int_t P, magma_int_t Q, magma_int_t ncomp, const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV, magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue) { magma_int_t launch_failed = 0; switch (P) { case 1: launch_failed = magma_gradn_2d_ncomp_q< 1> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 2: launch_failed = magma_gradn_2d_ncomp_q< 2> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 3: launch_failed = magma_gradn_2d_ncomp_q< 3> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 4: launch_failed = magma_gradn_2d_ncomp_q< 4> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 5: launch_failed = magma_gradn_2d_ncomp_q< 5> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 6: launch_failed = magma_gradn_2d_ncomp_q< 6> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 7: launch_failed = magma_gradn_2d_ncomp_q< 7> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 8: launch_failed = magma_gradn_2d_ncomp_q< 8> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 9: launch_failed = magma_gradn_2d_ncomp_q< 9> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; case 10: launch_failed = magma_gradn_2d_ncomp_q<10> (Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_gradn_2d( magma_int_t P, magma_int_t Q, magma_int_t ncomp, const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, CeedTransposeMode tmode, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV, magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue) { magma_int_t launch_failed = 0; magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans; launch_failed = magma_gradn_2d_ncomp_q_p( P, Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue); return launch_failed; }
d78f53c8159a7175c5ded5c8512ae884eb651b5a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #define TILE_WIDTH 2 /*matrix multiplication kernels*/ //non shared __global__ void MatrixMul( float *Md , float *Nd , float *Pd , const int WIDTH ) { // calculate thread id unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ; unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ; for (int k = 0 ; k<WIDTH ; k++ ) { Pd[row*WIDTH + col]+= Md[row * WIDTH + k ] * Nd[ k * WIDTH + col] ; } } // shared __global__ void MatrixMulSh( float *Md , float *Nd , float *Pd , const int WIDTH ) { //Taking shared array to break the MAtrix in Tile widht and fatch them in that array per ele __shared__ float Mds [TILE_WIDTH][TILE_WIDTH] ; __shared__ float Nds [TILE_WIDTH][TILE_WIDTH] ; // calculate thread id unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ; unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ; for (int m = 0 ; m<WIDTH/TILE_WIDTH ; m++ ) // m indicate number of phase { Mds[threadIdx.y][threadIdx.x] = Md[row*WIDTH + (m*TILE_WIDTH + threadIdx.x)] ; Nds[threadIdx.y][threadIdx.x] = Nd[ ( m*TILE_WIDTH + threadIdx.y) * WIDTH + col] ; __syncthreads() ; // for syncronizeing the threads // Do for tile for ( int k = 0; k<TILE_WIDTH ; k++ ) Pd[row*WIDTH + col]+= Mds[threadIdx.x][k] * Nds[k][threadIdx.y] ; __syncthreads() ; // for syncronizeing the threads } } // main routine int main () { const int WIDTH = 6 ; float array1_h[WIDTH][WIDTH] ,array2_h[WIDTH][WIDTH], result_array_h[WIDTH][WIDTH] ,M_result_array_h[WIDTH][WIDTH] ; float *array1_d , *array2_d ,*result_array_d ,*M_result_array_d ; // device array int i , j ; //input in host array for ( i = 0 ; i<WIDTH ; i++ ) { for (j = 0 ; j<WIDTH ; j++ ) { array1_h[i][j] = 1 ; array2_h[i][j] = 2 ; } } //create device array hipMalloc ( (void **)&array_name, sizeofmatrixinbytes) ; hipMalloc((void **) &array1_d , WIDTH*WIDTH*sizeof (int) ) ; hipMalloc((void **) &array2_d , WIDTH*WIDTH*sizeof (int) ) ; //copy host array to device array; hipMemcpy ( dest , source , WIDTH , direction ) hipMemcpy ( array1_d , array1_h , WIDTH*WIDTH*sizeof (int) , hipMemcpyHostToDevice ) ; hipMemcpy ( array2_d , array2_h , WIDTH*WIDTH*sizeof (int) , hipMemcpyHostToDevice ) ; //allocating memory for resultent device array hipMalloc((void **) &result_array_d , WIDTH*WIDTH*sizeof (int) ) ; hipMalloc((void **) &M_result_array_d , WIDTH*WIDTH*sizeof (int) ) ; //calling kernal dim3 dimGrid ( WIDTH/TILE_WIDTH , WIDTH/TILE_WIDTH ,1 ) ; dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ) ; // Change if 0 to if 1 for running non shared code and make if 0 for shared memory code #if 0 hipLaunchKernelGGL(( MatrixMul) , dim3(dimGrid),dim3(dimBlock), 0, 0, array1_d , array2_d ,M_result_array_d , WIDTH) ; #endif #if 1 hipLaunchKernelGGL(( MatrixMulSh), dim3(dimGrid),dim3(dimBlock), 0, 0, array1_d , array2_d ,M_result_array_d , WIDTH) ; #endif // all gpu function blocked till kernel is working //copy back result_array_d to result_array_h hipMemcpy(M_result_array_h , M_result_array_d , WIDTH*WIDTH*sizeof(int) , hipMemcpyDeviceToHost) ; //printf the result array for ( i = 0 ; i<WIDTH ; i++ ) { for ( j = 0 ; j < WIDTH ; j++ ) { printf ("%f ",M_result_array_h[i][j] ) ; } printf ("\n") ; } system("pause") ; }
d78f53c8159a7175c5ded5c8512ae884eb651b5a.cu
#include <stdio.h> #include <math.h> #define TILE_WIDTH 2 /*matrix multiplication kernels*/ //non shared __global__ void MatrixMul( float *Md , float *Nd , float *Pd , const int WIDTH ) { // calculate thread id unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ; unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ; for (int k = 0 ; k<WIDTH ; k++ ) { Pd[row*WIDTH + col]+= Md[row * WIDTH + k ] * Nd[ k * WIDTH + col] ; } } // shared __global__ void MatrixMulSh( float *Md , float *Nd , float *Pd , const int WIDTH ) { //Taking shared array to break the MAtrix in Tile widht and fatch them in that array per ele __shared__ float Mds [TILE_WIDTH][TILE_WIDTH] ; __shared__ float Nds [TILE_WIDTH][TILE_WIDTH] ; // calculate thread id unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ; unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ; for (int m = 0 ; m<WIDTH/TILE_WIDTH ; m++ ) // m indicate number of phase { Mds[threadIdx.y][threadIdx.x] = Md[row*WIDTH + (m*TILE_WIDTH + threadIdx.x)] ; Nds[threadIdx.y][threadIdx.x] = Nd[ ( m*TILE_WIDTH + threadIdx.y) * WIDTH + col] ; __syncthreads() ; // for syncronizeing the threads // Do for tile for ( int k = 0; k<TILE_WIDTH ; k++ ) Pd[row*WIDTH + col]+= Mds[threadIdx.x][k] * Nds[k][threadIdx.y] ; __syncthreads() ; // for syncronizeing the threads } } // main routine int main () { const int WIDTH = 6 ; float array1_h[WIDTH][WIDTH] ,array2_h[WIDTH][WIDTH], result_array_h[WIDTH][WIDTH] ,M_result_array_h[WIDTH][WIDTH] ; float *array1_d , *array2_d ,*result_array_d ,*M_result_array_d ; // device array int i , j ; //input in host array for ( i = 0 ; i<WIDTH ; i++ ) { for (j = 0 ; j<WIDTH ; j++ ) { array1_h[i][j] = 1 ; array2_h[i][j] = 2 ; } } //create device array cudaMalloc ( (void **)&array_name, sizeofmatrixinbytes) ; cudaMalloc((void **) &array1_d , WIDTH*WIDTH*sizeof (int) ) ; cudaMalloc((void **) &array2_d , WIDTH*WIDTH*sizeof (int) ) ; //copy host array to device array; cudaMemcpy ( dest , source , WIDTH , direction ) cudaMemcpy ( array1_d , array1_h , WIDTH*WIDTH*sizeof (int) , cudaMemcpyHostToDevice ) ; cudaMemcpy ( array2_d , array2_h , WIDTH*WIDTH*sizeof (int) , cudaMemcpyHostToDevice ) ; //allocating memory for resultent device array cudaMalloc((void **) &result_array_d , WIDTH*WIDTH*sizeof (int) ) ; cudaMalloc((void **) &M_result_array_d , WIDTH*WIDTH*sizeof (int) ) ; //calling kernal dim3 dimGrid ( WIDTH/TILE_WIDTH , WIDTH/TILE_WIDTH ,1 ) ; dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ) ; // Change if 0 to if 1 for running non shared code and make if 0 for shared memory code #if 0 MatrixMul <<<dimGrid,dimBlock>>> ( array1_d , array2_d ,M_result_array_d , WIDTH) ; #endif #if 1 MatrixMulSh<<<dimGrid,dimBlock>>> ( array1_d , array2_d ,M_result_array_d , WIDTH) ; #endif // all gpu function blocked till kernel is working //copy back result_array_d to result_array_h cudaMemcpy(M_result_array_h , M_result_array_d , WIDTH*WIDTH*sizeof(int) , cudaMemcpyDeviceToHost) ; //printf the result array for ( i = 0 ; i<WIDTH ; i++ ) { for ( j = 0 ; j < WIDTH ; j++ ) { printf ("%f ",M_result_array_h[i][j] ) ; } printf ("\n") ; } system("pause") ; }
4bdd720fea91459ed01e9b758fa2a8c2e4b80ee7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2019 by XGBoost Contributors * * \file simple_csr_source.cuh * \brief An extension for the simple CSR source in-memory data structure to accept * foreign columnar. */ #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/scan.h> #include <xgboost/base.h> #include <xgboost/data.h> #include <cmath> #include <vector> #include <algorithm> #include "simple_csr_source.h" #include "columnar.h" #include "../common/math.h" #include "../common/bitfield.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace data { template <typename T> __global__ void CountValidKernel(Columnar<T> const column, bool has_missing, float missing, int32_t* flag, common::Span<size_t> offsets) { auto const tid = threadIdx.x + blockDim.x * blockIdx.x; bool const missing_is_nan = common::CheckNAN(missing); if (tid >= column.size) { return; } RBitField8 const mask = column.valid; if (!has_missing) { if ((mask.Data() == nullptr || mask.Check(tid)) && !common::CheckNAN(column.data[tid])) { offsets[tid+1] += 1; } } else if (missing_is_nan) { if (!common::CheckNAN(column.data[tid])) { offsets[tid+1] += 1; } } else { if (!common::CloseTo(column.data[tid], missing)) { offsets[tid+1] += 1; } if (common::CheckNAN(column.data[tid])) { *flag = 1; } } } template <typename T> __device__ void AssignValue(T fvalue, int32_t colid, common::Span<size_t> out_offsets, common::Span<Entry> out_data) { auto const tid = threadIdx.x + blockDim.x * blockIdx.x; int32_t oid = out_offsets[tid]; out_data[oid].fvalue = fvalue; out_data[oid].index = colid; out_offsets[tid] += 1; } template <typename T> __global__ void CreateCSRKernel(Columnar<T> const column, int32_t colid, bool has_missing, float missing, common::Span<size_t> offsets, common::Span<Entry> out_data) { auto const tid = threadIdx.x + blockDim.x * blockIdx.x; if (column.size <= tid) { return; } bool const missing_is_nan = common::CheckNAN(missing); if (!has_missing) { // no missing value is specified if ((column.valid.Data() == nullptr || column.valid.Check(tid)) && !common::CheckNAN(column.data[tid])) { AssignValue(column.data[tid], colid, offsets, out_data); } } else if (missing_is_nan) { // specified missing value, but it's NaN if (!common::CheckNAN(column.data[tid])) { AssignValue(column.data[tid], colid, offsets, out_data); } } else { // specified missing value, and it's not NaN if (!common::CloseTo(column.data[tid], missing)) { AssignValue(column.data[tid], colid, offsets, out_data); } } } template <typename T> void CountValid(std::vector<Json> const& j_columns, uint32_t column_id, bool has_missing, float missing, HostDeviceVector<size_t>* out_offset, dh::caching_device_vector<int32_t>* out_d_flag, uint32_t* out_n_rows) { int32_t constexpr kThreads = 256; auto const& j_column = j_columns[column_id]; auto const& column_obj = get<Object const>(j_column); Columnar<T> foreign_column = ArrayInterfaceHandler::ExtractArray<T>(column_obj); uint32_t const n_rows = foreign_column.size; auto ptr = foreign_column.data.data(); int32_t device = dh::CudaGetPointerDevice(ptr); CHECK_NE(device, -1); dh::safe_cuda(hipSetDevice(device)); if (column_id == 0) { out_offset->SetDevice(device); out_offset->Resize(n_rows + 1); } CHECK_EQ(out_offset->DeviceIdx(), device) << "All columns should use the same device."; CHECK_EQ(out_offset->Size(), n_rows + 1) << "All columns should have same number of rows."; common::Span<size_t> s_offsets = out_offset->DeviceSpan(); int32_t const kBlocks = common::DivRoundUp(n_rows, kThreads); hipLaunchKernelGGL(( CountValidKernel<T>), dim3(kBlocks), dim3(kThreads), 0, 0, foreign_column, has_missing, missing, out_d_flag->data().get(), s_offsets); *out_n_rows = n_rows; } template <typename T> void CreateCSR(std::vector<Json> const& j_columns, uint32_t column_id, uint32_t n_rows, bool has_missing, float missing, dh::device_vector<size_t>* tmp_offset, common::Span<Entry> s_data) { int32_t constexpr kThreads = 256; auto const& j_column = j_columns[column_id]; auto const& column_obj = get<Object const>(j_column); Columnar<T> foreign_column = ArrayInterfaceHandler::ExtractArray<T>(column_obj); int32_t kBlocks = common::DivRoundUp(n_rows, kThreads); hipLaunchKernelGGL(( CreateCSRKernel<T>), dim3(kBlocks), dim3(kThreads), 0, 0, foreign_column, column_id, has_missing, missing, dh::ToSpan(*tmp_offset), s_data); } void SimpleCSRSource::FromDeviceColumnar(std::vector<Json> const& columns, bool has_missing, float missing) { auto const n_cols = columns.size(); int32_t constexpr kThreads = 256; dh::caching_device_vector<int32_t> d_flag; if (!common::CheckNAN(missing)) { d_flag.resize(1); thrust::fill(d_flag.begin(), d_flag.end(), 0); } uint32_t n_rows {0}; for (size_t i = 0; i < n_cols; ++i) { auto const& typestr = get<String const>(columns[i]["typestr"]); DISPATCH_TYPE(CountValid, typestr, columns, i, has_missing, missing, &(this->page_.offset), &d_flag, &n_rows); } // don't pay for what you don't use. if (!common::CheckNAN(missing)) { int32_t flag {0}; dh::safe_cuda(hipMemcpy(&flag, d_flag.data().get(), sizeof(int32_t), hipMemcpyDeviceToHost)); CHECK_EQ(flag, 0) << "missing value is specifed but input data contains NaN."; } info.num_col_ = n_cols; info.num_row_ = n_rows; auto s_offsets = this->page_.offset.DeviceSpan(); thrust::device_ptr<size_t> p_offsets(s_offsets.data()); CHECK_GE(s_offsets.size(), n_rows + 1); thrust::inclusive_scan(p_offsets, p_offsets + n_rows + 1, p_offsets); // Created for building csr matrix, where we need to change index after processing each // column. dh::device_vector<size_t> tmp_offset(this->page_.offset.Size()); dh::safe_cuda(hipMemcpy(tmp_offset.data().get(), s_offsets.data(), s_offsets.size_bytes(), hipMemcpyDeviceToDevice)); // We can use null_count from columnar data format, but that will add a non-standard // entry in the array interface, also involves accumulating from all columns. Invoking // one copy seems easier. this->info.num_nonzero_ = tmp_offset.back(); // Device is obtained and set in `CountValid' int32_t const device = this->page_.offset.DeviceIdx(); this->page_.data.SetDevice(device); this->page_.data.Resize(this->info.num_nonzero_); auto s_data = this->page_.data.DeviceSpan(); int32_t kBlocks = common::DivRoundUp(n_rows, kThreads); for (size_t i = 0; i < n_cols; ++i) { auto const& typestr = get<String const>(columns[i]["typestr"]); DISPATCH_TYPE(CreateCSR, typestr, columns, i, n_rows, has_missing, missing, &tmp_offset, s_data); } } } // namespace data } // namespace xgboost
4bdd720fea91459ed01e9b758fa2a8c2e4b80ee7.cu
/*! * Copyright 2019 by XGBoost Contributors * * \file simple_csr_source.cuh * \brief An extension for the simple CSR source in-memory data structure to accept * foreign columnar. */ #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/scan.h> #include <xgboost/base.h> #include <xgboost/data.h> #include <cmath> #include <vector> #include <algorithm> #include "simple_csr_source.h" #include "columnar.h" #include "../common/math.h" #include "../common/bitfield.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace data { template <typename T> __global__ void CountValidKernel(Columnar<T> const column, bool has_missing, float missing, int32_t* flag, common::Span<size_t> offsets) { auto const tid = threadIdx.x + blockDim.x * blockIdx.x; bool const missing_is_nan = common::CheckNAN(missing); if (tid >= column.size) { return; } RBitField8 const mask = column.valid; if (!has_missing) { if ((mask.Data() == nullptr || mask.Check(tid)) && !common::CheckNAN(column.data[tid])) { offsets[tid+1] += 1; } } else if (missing_is_nan) { if (!common::CheckNAN(column.data[tid])) { offsets[tid+1] += 1; } } else { if (!common::CloseTo(column.data[tid], missing)) { offsets[tid+1] += 1; } if (common::CheckNAN(column.data[tid])) { *flag = 1; } } } template <typename T> __device__ void AssignValue(T fvalue, int32_t colid, common::Span<size_t> out_offsets, common::Span<Entry> out_data) { auto const tid = threadIdx.x + blockDim.x * blockIdx.x; int32_t oid = out_offsets[tid]; out_data[oid].fvalue = fvalue; out_data[oid].index = colid; out_offsets[tid] += 1; } template <typename T> __global__ void CreateCSRKernel(Columnar<T> const column, int32_t colid, bool has_missing, float missing, common::Span<size_t> offsets, common::Span<Entry> out_data) { auto const tid = threadIdx.x + blockDim.x * blockIdx.x; if (column.size <= tid) { return; } bool const missing_is_nan = common::CheckNAN(missing); if (!has_missing) { // no missing value is specified if ((column.valid.Data() == nullptr || column.valid.Check(tid)) && !common::CheckNAN(column.data[tid])) { AssignValue(column.data[tid], colid, offsets, out_data); } } else if (missing_is_nan) { // specified missing value, but it's NaN if (!common::CheckNAN(column.data[tid])) { AssignValue(column.data[tid], colid, offsets, out_data); } } else { // specified missing value, and it's not NaN if (!common::CloseTo(column.data[tid], missing)) { AssignValue(column.data[tid], colid, offsets, out_data); } } } template <typename T> void CountValid(std::vector<Json> const& j_columns, uint32_t column_id, bool has_missing, float missing, HostDeviceVector<size_t>* out_offset, dh::caching_device_vector<int32_t>* out_d_flag, uint32_t* out_n_rows) { int32_t constexpr kThreads = 256; auto const& j_column = j_columns[column_id]; auto const& column_obj = get<Object const>(j_column); Columnar<T> foreign_column = ArrayInterfaceHandler::ExtractArray<T>(column_obj); uint32_t const n_rows = foreign_column.size; auto ptr = foreign_column.data.data(); int32_t device = dh::CudaGetPointerDevice(ptr); CHECK_NE(device, -1); dh::safe_cuda(cudaSetDevice(device)); if (column_id == 0) { out_offset->SetDevice(device); out_offset->Resize(n_rows + 1); } CHECK_EQ(out_offset->DeviceIdx(), device) << "All columns should use the same device."; CHECK_EQ(out_offset->Size(), n_rows + 1) << "All columns should have same number of rows."; common::Span<size_t> s_offsets = out_offset->DeviceSpan(); int32_t const kBlocks = common::DivRoundUp(n_rows, kThreads); CountValidKernel<T><<<kBlocks, kThreads>>>( foreign_column, has_missing, missing, out_d_flag->data().get(), s_offsets); *out_n_rows = n_rows; } template <typename T> void CreateCSR(std::vector<Json> const& j_columns, uint32_t column_id, uint32_t n_rows, bool has_missing, float missing, dh::device_vector<size_t>* tmp_offset, common::Span<Entry> s_data) { int32_t constexpr kThreads = 256; auto const& j_column = j_columns[column_id]; auto const& column_obj = get<Object const>(j_column); Columnar<T> foreign_column = ArrayInterfaceHandler::ExtractArray<T>(column_obj); int32_t kBlocks = common::DivRoundUp(n_rows, kThreads); CreateCSRKernel<T><<<kBlocks, kThreads>>>(foreign_column, column_id, has_missing, missing, dh::ToSpan(*tmp_offset), s_data); } void SimpleCSRSource::FromDeviceColumnar(std::vector<Json> const& columns, bool has_missing, float missing) { auto const n_cols = columns.size(); int32_t constexpr kThreads = 256; dh::caching_device_vector<int32_t> d_flag; if (!common::CheckNAN(missing)) { d_flag.resize(1); thrust::fill(d_flag.begin(), d_flag.end(), 0); } uint32_t n_rows {0}; for (size_t i = 0; i < n_cols; ++i) { auto const& typestr = get<String const>(columns[i]["typestr"]); DISPATCH_TYPE(CountValid, typestr, columns, i, has_missing, missing, &(this->page_.offset), &d_flag, &n_rows); } // don't pay for what you don't use. if (!common::CheckNAN(missing)) { int32_t flag {0}; dh::safe_cuda(cudaMemcpy(&flag, d_flag.data().get(), sizeof(int32_t), cudaMemcpyDeviceToHost)); CHECK_EQ(flag, 0) << "missing value is specifed but input data contains NaN."; } info.num_col_ = n_cols; info.num_row_ = n_rows; auto s_offsets = this->page_.offset.DeviceSpan(); thrust::device_ptr<size_t> p_offsets(s_offsets.data()); CHECK_GE(s_offsets.size(), n_rows + 1); thrust::inclusive_scan(p_offsets, p_offsets + n_rows + 1, p_offsets); // Created for building csr matrix, where we need to change index after processing each // column. dh::device_vector<size_t> tmp_offset(this->page_.offset.Size()); dh::safe_cuda(cudaMemcpy(tmp_offset.data().get(), s_offsets.data(), s_offsets.size_bytes(), cudaMemcpyDeviceToDevice)); // We can use null_count from columnar data format, but that will add a non-standard // entry in the array interface, also involves accumulating from all columns. Invoking // one copy seems easier. this->info.num_nonzero_ = tmp_offset.back(); // Device is obtained and set in `CountValid' int32_t const device = this->page_.offset.DeviceIdx(); this->page_.data.SetDevice(device); this->page_.data.Resize(this->info.num_nonzero_); auto s_data = this->page_.data.DeviceSpan(); int32_t kBlocks = common::DivRoundUp(n_rows, kThreads); for (size_t i = 0; i < n_cols; ++i) { auto const& typestr = get<String const>(columns[i]["typestr"]); DISPATCH_TYPE(CreateCSR, typestr, columns, i, n_rows, has_missing, missing, &tmp_offset, s_data); } } } // namespace data } // namespace xgboost
e32559c81a9e9be647c434f53f5ebf0cf63e17cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/declarable/helpers/dynamic.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { template <typename X, typename Y> static _CUDA_G void dynamicPartitionScalarKernel(const void *vx, const Nd4jLong *xShapeInfo, const void *vi, const Nd4jLong *iShapeInfo, void **vz, Nd4jLong **zShapeInfos, const Nd4jLong numOutputs) { auto x = reinterpret_cast<const X*>(vx); auto i = reinterpret_cast<const Y*>(vi); auto xLength = shape::length(xShapeInfo); auto iLength = shape::length(iShapeInfo); extern __shared__ char shmem[]; __shared__ Y *rawIndices; __shared__ Y *trueIndices; if (threadIdx.x == 0) { rawIndices = reinterpret_cast<Y*>(shmem); trueIndices = rawIndices + blockDim.x; } __syncthreads(); // we run things in blocks, 1 partition per block of threads for (Nd4jLong o = blockIdx.x; o < numOutputs; o += gridDim.x) { auto z = reinterpret_cast<X*>(vz[o]); auto zShapeInfo = zShapeInfos[o]; auto zLength = shape::length(zShapeInfo); // iLimit should be multiple of blockDim.x auto iLimit = iLength <= blockDim.x ? blockDim.x : (iLength + (blockDim.x - (iLength % blockDim.x))); int cnt = 0; for (Nd4jLong e = threadIdx.x; e < iLimit; e += blockDim.x) { // load set of indices into shared memory if (e < iLength) rawIndices[threadIdx.x] = i[shape::getIndexOffset(e, iShapeInfo)]; __syncthreads(); // now we need to find out where our actual updates will be mapped // TODO: this can be improved obviously, by using prefix-sum like approach if (threadIdx.x == 0) { for (int f = 0; f < blockDim.x; f++) { if (rawIndices[f] == static_cast<Y>(o)) trueIndices[f] = cnt++; else trueIndices[f] = -1; } } __syncthreads(); // doing actual update if (e < iLength) if (trueIndices[threadIdx.x] >= 0) { z[trueIndices[threadIdx.x]] = x[shape::getIndexOffset(e, xShapeInfo)]; } __syncthreads(); } } } template <typename X, typename Y> static _CUDA_G void dynamicPartitionTadKernel(const void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets, Nd4jLong xLength, const void *vindices, const Nd4jLong *iShapeInfo, Nd4jLong iLength, void **vz, Nd4jLong **zTadShapeInfos, Nd4jLong **zTadOffsets, Nd4jLong numOutputs) { auto x = reinterpret_cast<const X*>(vx); auto indices = reinterpret_cast<const Y*>(vindices); // we run things in blocks, 1 partition per block of threads for (int i = blockIdx.x; i < numOutputs; i += gridDim.x) { auto z = reinterpret_cast<X*>(vz[i]); // each thread has own counter for partitions int outCnt = 0; for (Nd4jLong e = 0; e < iLength; e++) { if (indices[shape::getIndexOffset(e, iShapeInfo)] == i) { auto dx = x + xTadOffsets[e]; auto dz = z + zTadOffsets[i][outCnt++]; for (int f = threadIdx.x; f < xLength; f += blockDim.x) { dz[shape::getIndexOffset(f, zTadShapeInfos[i])] = dx[shape::getIndexOffset(f, xTadShapeInfo)]; } } } } } template <typename X, typename Y> static void _dynamicPartitionFunctor(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*>& outputList) { std::vector<std::pair<NDArray *, int>> outputs(outputList.size()); int sourceDimsLen = input->rankOf() - indices->rankOf(); unsigned int outSize = outputList.size(); PointersManager pm(context, "dynamicPartition"); if (sourceDimsLen) { // non-linear case std::vector<int> sourceDims(sourceDimsLen); for (int i = sourceDimsLen; i > 0; i--) sourceDims[sourceDimsLen - i] = input->rankOf() - i; //compute tad array for given dimensions auto packX = ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), sourceDims); std::vector<void *> outBuffers(outSize); std::vector<const Nd4jLong *> tadShapes(outSize); std::vector<const Nd4jLong *> tadOffsets(outSize); std::vector<Nd4jLong> numTads(outSize); // fill up dimensions array for before kernel for (unsigned int i = 0; i < outSize; i++) { outputs[i].first = outputList[i]; std::vector<int> outDims(outputs[i].first->rankOf() - 1); int r = outputs[i].first->rankOf(); for (int k = 1; k < r; k++) outDims[k - 1] = k; auto packZ = ConstantTadHelper::getInstance().tadForDimensions(outputList.at(i)->shapeInfo(), outDims); outBuffers[i] = outputList.at(i)->specialBuffer(); tadShapes[i] = packZ.platformShapeInfo(); tadOffsets[i] = packZ.platformOffsets(); } // we copy pointers to device auto dOutBuffers = reinterpret_cast<void **>(pm.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void *))); auto dOutTadShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(tadShapes.data(), tadShapes.size() * sizeof(Nd4jLong *))); auto dOutTadOffsets = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(tadOffsets.data(), tadOffsets.size() * sizeof(Nd4jLong *))); // run kernel on device hipLaunchKernelGGL(( dynamicPartitionTadKernel<X,Y>), dim3(256), dim3(256), 1024, *context->getCudaStream(), input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), shape::length(packX.primaryShapeInfo()), indices->specialBuffer(), indices->specialShapeInfo(), indices->lengthOf(), dOutBuffers, dOutTadShapes, dOutTadOffsets, outSize); } else { // linear case auto numThreads = 256; auto shmemSize = numThreads * sizeof(Y) * 2 + 1024; std::vector<void *> outBuffers; std::vector<const Nd4jLong *> outShapes; for (auto v:outputList) { outBuffers.emplace_back(v->specialBuffer()); outShapes.emplace_back(v->specialShapeInfo()); } auto dOutBuffers = reinterpret_cast<void **>(pm.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void *))); auto dOutShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(outShapes.data(), outShapes.size() * sizeof(Nd4jLong *))); hipLaunchKernelGGL(( dynamicPartitionScalarKernel<X,Y>), dim3(256), dim3(numThreads), shmemSize, *context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), dOutBuffers, dOutShapes, outSize); } pm.synchronize(); } template <typename X, typename Y> static _CUDA_G void dynamicStitchScalarKernel(void **vx, Nd4jLong **xShapeInfos, void **vindices, Nd4jLong **iShapeInfos, int inputSize, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong zLength) { auto z = reinterpret_cast<X*>(vz); for (int e = blockIdx.x; e < inputSize; e += gridDim.x) { auto x = reinterpret_cast<X*>(vx[e]); auto indices = reinterpret_cast<Y*>(vindices[e]); auto xShapeInfo = xShapeInfos[e]; auto iShapeInfo = iShapeInfos[e]; auto iLength = shape::length(iShapeInfo); for (int i = threadIdx.x; i < iLength; i += blockDim.x) { auto idx = indices[shape::getIndexOffset(i, iShapeInfo)]; if (idx >= 0 && idx < zLength) z[shape::getIndexOffset(idx, zShapeInfo)] = x[shape::getIndexOffset(i, xShapeInfo)]; } } } template <typename X, typename Y> static _CUDA_G void dynamicStitchTadKernel(void **vx, Nd4jLong **xTadShapeInfos, Nd4jLong **xTadOffsets, void **vindices, Nd4jLong **iShapeInfos, int inputSize, void *vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zTadOffsets) { auto bz = reinterpret_cast<X*>(vz); for (int e = blockIdx.x; e < inputSize; e += gridDim.x) { auto indices = reinterpret_cast<Y*>(vindices[e]); auto iShapeInfo = iShapeInfos[e]; if (shape::isEmpty(iShapeInfo)) continue; auto iLength = shape::length(iShapeInfo); auto zLength = shape::length(zTadShapeInfo); auto xShapeInfo = xTadShapeInfos[e]; auto xLength = shape::length(xShapeInfo); for (int i = 0; i < iLength; i++) { auto idx = indices[shape::getIndexOffset(i, iShapeInfo)]; auto z = bz + zTadOffsets[idx]; auto x = reinterpret_cast<X*>(vx[e]) + xTadOffsets[e][i]; for (int f = threadIdx.x; f < zLength; f += blockDim.x) { z[shape::getIndexOffset(f, zTadShapeInfo)] = x[shape::getIndexOffset(f, xShapeInfo)]; } __syncthreads(); } } } template <typename X, typename Y> static int _dynamicStitchFunctor(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray* output){ int inputSize = inputs.size(); PointersManager pm(context, "dynamicStitch"); if (output->isVector()) { std::vector<const void *> inputBuffers(inputSize); std::vector<const Nd4jLong *> inputShapes(inputSize); std::vector<const void *> indicesBuffers(inputSize); std::vector<const Nd4jLong *> indicesShapes(inputSize); for (int e = 0; e < inputSize; e++) { inputBuffers[e] = inputs.at(e)->specialBuffer(); indicesBuffers[e] = indices.at(e)->specialBuffer(); inputShapes[e] = inputs.at(e)->specialShapeInfo(); indicesShapes[e] = indices.at(e)->specialShapeInfo(); } // copying pointers to buffers to device auto dInputBuffers = reinterpret_cast<void **>(pm.replicatePointer(inputBuffers.data(), inputSize * sizeof(void *))); auto dIndicesBuffers = reinterpret_cast<void **>(pm.replicatePointer(indicesBuffers.data(), inputSize * sizeof(void *))); auto dInputShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputShapes.data(), inputSize * sizeof(Nd4jLong *))); auto dIndicesShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(indicesShapes.data(), inputSize * sizeof(Nd4jLong *))); hipLaunchKernelGGL(( dynamicStitchScalarKernel<X,Y>), dim3(256), dim3(256), 1024, *context->getCudaStream(), dInputBuffers, dInputShapes, dIndicesBuffers, dIndicesShapes, inputSize, output->specialBuffer(), output->specialShapeInfo(), output->lengthOf()); } else { std::vector<int> restDims(output->rankOf() - 1); for (int i = restDims.size(); i > 0; i--) restDims[restDims.size() - i] = output->rankOf() - i; auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), restDims); std::vector<const void *> inputBuffers(inputSize); std::vector<const Nd4jLong *> inputTadShapes(inputSize); std::vector<const Nd4jLong *> inputTadOffsets(inputSize); std::vector<const void *> indicesBuffers(inputSize); std::vector<const Nd4jLong *> indicesShapes(inputSize); for (int e = 0; e < inputSize; e++) { std::vector<int> sourceDims(inputs[e]->rankOf() - indices[e]->rankOf()); for (int i = sourceDims.size(); i > 0; i--) sourceDims[sourceDims.size() - i] = inputs[e]->rankOf() - i; auto packX = ConstantTadHelper::getInstance().tadForDimensions(inputs[e]->shapeInfo(), sourceDims); indicesBuffers[e] = indices[e]->specialBuffer(); indicesShapes[e] = indices[e]->specialShapeInfo(); inputBuffers[e] = inputs[e]->specialBuffer(); inputTadShapes[e] = packX.platformShapeInfo(); inputTadOffsets[e] = packX.platformOffsets(); } // copying pointers to buffers to device auto dInputBuffers = reinterpret_cast<void **>(pm.replicatePointer(inputBuffers.data(), inputSize * sizeof(void *))); auto dInputTadShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputTadShapes.data(), inputSize * sizeof(Nd4jLong *))); auto dInputTadOffsets = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputTadOffsets.data(), inputSize * sizeof(Nd4jLong *))); auto dIndicesBuffers = reinterpret_cast<void **>(pm.replicatePointer(indicesBuffers.data(), inputSize * sizeof(void *))); auto dIndicesShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(indicesShapes.data(), inputSize * sizeof(Nd4jLong *))); hipLaunchKernelGGL(( dynamicStitchTadKernel<X,Y>), dim3(256), dim3(256), 1024, *context->getCudaStream(), dInputBuffers, dInputTadShapes, dInputTadOffsets, dIndicesBuffers, dIndicesShapes, inputSize, output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets()); } pm.synchronize(); return Status::OK(); } template <typename T> static void _dynamicPartitionFunctorBP(NDArray const* input, NDArray const* indices, std::vector<NDArray*> const& inputGradientList, std::vector<NDArray*>& outputList) { } void dynamicPartitionFunctor(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*>& outputList) { auto xType = input->dataType(); auto yType = indices->dataType(); NDArray::prepareSpecialUse({}, {indices, input}); BUILD_DOUBLE_SELECTOR(xType, yType, _dynamicPartitionFunctor, (context, input, indices, outputList), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({}, {indices, input}); // TODO: it would be nice to have NDArray::registerSpecialUse signature that accepts something else beyond initializer_list for (auto v:outputList) { v->tickWriteDevice(); } } template <typename T> static int _dynamicStitchFunctorBP(std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray const* gradInput, std::vector<NDArray*>& outputList){ throw std::runtime_error("Not umplemented yet"); } int dynamicStitchFunctor(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray* output){ auto xType = inputs.at(0)->dataType(); auto yType = indices.at(0)->dataType(); for (auto v:indices) { v->syncToDevice(); v->tickReadDevice(); } for (auto v:inputs) { v->syncToDevice(); v->tickReadDevice(); } NDArray::prepareSpecialUse({output}, {}); BUILD_DOUBLE_SELECTOR(xType, yType, _dynamicStitchFunctor, (context, inputs, indices, output), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {}); return Status::OK(); } int dynamicStitchFunctorBP(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray const* gradInput, std::vector<NDArray*>& outputList) { auto xType = inputs.at(0)->dataType(); BUILD_SINGLE_SELECTOR(xType, return _dynamicStitchFunctorBP, (inputs, indices, gradInput, outputList), NUMERIC_TYPES); } void dynamicPartitionFunctorBP(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*> const& inputGradientList, std::vector<NDArray*>& outputList) { auto xType = input->dataType(); BUILD_SINGLE_SELECTOR(xType, _dynamicPartitionFunctorBP, (input, indices, inputGradientList, outputList), NUMERIC_TYPES); } } } }
e32559c81a9e9be647c434f53f5ebf0cf63e17cb.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/declarable/helpers/dynamic.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { template <typename X, typename Y> static _CUDA_G void dynamicPartitionScalarKernel(const void *vx, const Nd4jLong *xShapeInfo, const void *vi, const Nd4jLong *iShapeInfo, void **vz, Nd4jLong **zShapeInfos, const Nd4jLong numOutputs) { auto x = reinterpret_cast<const X*>(vx); auto i = reinterpret_cast<const Y*>(vi); auto xLength = shape::length(xShapeInfo); auto iLength = shape::length(iShapeInfo); extern __shared__ char shmem[]; __shared__ Y *rawIndices; __shared__ Y *trueIndices; if (threadIdx.x == 0) { rawIndices = reinterpret_cast<Y*>(shmem); trueIndices = rawIndices + blockDim.x; } __syncthreads(); // we run things in blocks, 1 partition per block of threads for (Nd4jLong o = blockIdx.x; o < numOutputs; o += gridDim.x) { auto z = reinterpret_cast<X*>(vz[o]); auto zShapeInfo = zShapeInfos[o]; auto zLength = shape::length(zShapeInfo); // iLimit should be multiple of blockDim.x auto iLimit = iLength <= blockDim.x ? blockDim.x : (iLength + (blockDim.x - (iLength % blockDim.x))); int cnt = 0; for (Nd4jLong e = threadIdx.x; e < iLimit; e += blockDim.x) { // load set of indices into shared memory if (e < iLength) rawIndices[threadIdx.x] = i[shape::getIndexOffset(e, iShapeInfo)]; __syncthreads(); // now we need to find out where our actual updates will be mapped // TODO: this can be improved obviously, by using prefix-sum like approach if (threadIdx.x == 0) { for (int f = 0; f < blockDim.x; f++) { if (rawIndices[f] == static_cast<Y>(o)) trueIndices[f] = cnt++; else trueIndices[f] = -1; } } __syncthreads(); // doing actual update if (e < iLength) if (trueIndices[threadIdx.x] >= 0) { z[trueIndices[threadIdx.x]] = x[shape::getIndexOffset(e, xShapeInfo)]; } __syncthreads(); } } } template <typename X, typename Y> static _CUDA_G void dynamicPartitionTadKernel(const void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets, Nd4jLong xLength, const void *vindices, const Nd4jLong *iShapeInfo, Nd4jLong iLength, void **vz, Nd4jLong **zTadShapeInfos, Nd4jLong **zTadOffsets, Nd4jLong numOutputs) { auto x = reinterpret_cast<const X*>(vx); auto indices = reinterpret_cast<const Y*>(vindices); // we run things in blocks, 1 partition per block of threads for (int i = blockIdx.x; i < numOutputs; i += gridDim.x) { auto z = reinterpret_cast<X*>(vz[i]); // each thread has own counter for partitions int outCnt = 0; for (Nd4jLong e = 0; e < iLength; e++) { if (indices[shape::getIndexOffset(e, iShapeInfo)] == i) { auto dx = x + xTadOffsets[e]; auto dz = z + zTadOffsets[i][outCnt++]; for (int f = threadIdx.x; f < xLength; f += blockDim.x) { dz[shape::getIndexOffset(f, zTadShapeInfos[i])] = dx[shape::getIndexOffset(f, xTadShapeInfo)]; } } } } } template <typename X, typename Y> static void _dynamicPartitionFunctor(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*>& outputList) { std::vector<std::pair<NDArray *, int>> outputs(outputList.size()); int sourceDimsLen = input->rankOf() - indices->rankOf(); unsigned int outSize = outputList.size(); PointersManager pm(context, "dynamicPartition"); if (sourceDimsLen) { // non-linear case std::vector<int> sourceDims(sourceDimsLen); for (int i = sourceDimsLen; i > 0; i--) sourceDims[sourceDimsLen - i] = input->rankOf() - i; //compute tad array for given dimensions auto packX = ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), sourceDims); std::vector<void *> outBuffers(outSize); std::vector<const Nd4jLong *> tadShapes(outSize); std::vector<const Nd4jLong *> tadOffsets(outSize); std::vector<Nd4jLong> numTads(outSize); // fill up dimensions array for before kernel for (unsigned int i = 0; i < outSize; i++) { outputs[i].first = outputList[i]; std::vector<int> outDims(outputs[i].first->rankOf() - 1); int r = outputs[i].first->rankOf(); for (int k = 1; k < r; k++) outDims[k - 1] = k; auto packZ = ConstantTadHelper::getInstance().tadForDimensions(outputList.at(i)->shapeInfo(), outDims); outBuffers[i] = outputList.at(i)->specialBuffer(); tadShapes[i] = packZ.platformShapeInfo(); tadOffsets[i] = packZ.platformOffsets(); } // we copy pointers to device auto dOutBuffers = reinterpret_cast<void **>(pm.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void *))); auto dOutTadShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(tadShapes.data(), tadShapes.size() * sizeof(Nd4jLong *))); auto dOutTadOffsets = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(tadOffsets.data(), tadOffsets.size() * sizeof(Nd4jLong *))); // run kernel on device dynamicPartitionTadKernel<X,Y><<<256, 256, 1024, *context->getCudaStream()>>>(input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), shape::length(packX.primaryShapeInfo()), indices->specialBuffer(), indices->specialShapeInfo(), indices->lengthOf(), dOutBuffers, dOutTadShapes, dOutTadOffsets, outSize); } else { // linear case auto numThreads = 256; auto shmemSize = numThreads * sizeof(Y) * 2 + 1024; std::vector<void *> outBuffers; std::vector<const Nd4jLong *> outShapes; for (auto v:outputList) { outBuffers.emplace_back(v->specialBuffer()); outShapes.emplace_back(v->specialShapeInfo()); } auto dOutBuffers = reinterpret_cast<void **>(pm.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void *))); auto dOutShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(outShapes.data(), outShapes.size() * sizeof(Nd4jLong *))); dynamicPartitionScalarKernel<X,Y><<<256, numThreads, shmemSize, *context->getCudaStream()>>>(input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), dOutBuffers, dOutShapes, outSize); } pm.synchronize(); } template <typename X, typename Y> static _CUDA_G void dynamicStitchScalarKernel(void **vx, Nd4jLong **xShapeInfos, void **vindices, Nd4jLong **iShapeInfos, int inputSize, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong zLength) { auto z = reinterpret_cast<X*>(vz); for (int e = blockIdx.x; e < inputSize; e += gridDim.x) { auto x = reinterpret_cast<X*>(vx[e]); auto indices = reinterpret_cast<Y*>(vindices[e]); auto xShapeInfo = xShapeInfos[e]; auto iShapeInfo = iShapeInfos[e]; auto iLength = shape::length(iShapeInfo); for (int i = threadIdx.x; i < iLength; i += blockDim.x) { auto idx = indices[shape::getIndexOffset(i, iShapeInfo)]; if (idx >= 0 && idx < zLength) z[shape::getIndexOffset(idx, zShapeInfo)] = x[shape::getIndexOffset(i, xShapeInfo)]; } } } template <typename X, typename Y> static _CUDA_G void dynamicStitchTadKernel(void **vx, Nd4jLong **xTadShapeInfos, Nd4jLong **xTadOffsets, void **vindices, Nd4jLong **iShapeInfos, int inputSize, void *vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zTadOffsets) { auto bz = reinterpret_cast<X*>(vz); for (int e = blockIdx.x; e < inputSize; e += gridDim.x) { auto indices = reinterpret_cast<Y*>(vindices[e]); auto iShapeInfo = iShapeInfos[e]; if (shape::isEmpty(iShapeInfo)) continue; auto iLength = shape::length(iShapeInfo); auto zLength = shape::length(zTadShapeInfo); auto xShapeInfo = xTadShapeInfos[e]; auto xLength = shape::length(xShapeInfo); for (int i = 0; i < iLength; i++) { auto idx = indices[shape::getIndexOffset(i, iShapeInfo)]; auto z = bz + zTadOffsets[idx]; auto x = reinterpret_cast<X*>(vx[e]) + xTadOffsets[e][i]; for (int f = threadIdx.x; f < zLength; f += blockDim.x) { z[shape::getIndexOffset(f, zTadShapeInfo)] = x[shape::getIndexOffset(f, xShapeInfo)]; } __syncthreads(); } } } template <typename X, typename Y> static int _dynamicStitchFunctor(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray* output){ int inputSize = inputs.size(); PointersManager pm(context, "dynamicStitch"); if (output->isVector()) { std::vector<const void *> inputBuffers(inputSize); std::vector<const Nd4jLong *> inputShapes(inputSize); std::vector<const void *> indicesBuffers(inputSize); std::vector<const Nd4jLong *> indicesShapes(inputSize); for (int e = 0; e < inputSize; e++) { inputBuffers[e] = inputs.at(e)->specialBuffer(); indicesBuffers[e] = indices.at(e)->specialBuffer(); inputShapes[e] = inputs.at(e)->specialShapeInfo(); indicesShapes[e] = indices.at(e)->specialShapeInfo(); } // copying pointers to buffers to device auto dInputBuffers = reinterpret_cast<void **>(pm.replicatePointer(inputBuffers.data(), inputSize * sizeof(void *))); auto dIndicesBuffers = reinterpret_cast<void **>(pm.replicatePointer(indicesBuffers.data(), inputSize * sizeof(void *))); auto dInputShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputShapes.data(), inputSize * sizeof(Nd4jLong *))); auto dIndicesShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(indicesShapes.data(), inputSize * sizeof(Nd4jLong *))); dynamicStitchScalarKernel<X,Y><<<256, 256, 1024, *context->getCudaStream()>>>(dInputBuffers, dInputShapes, dIndicesBuffers, dIndicesShapes, inputSize, output->specialBuffer(), output->specialShapeInfo(), output->lengthOf()); } else { std::vector<int> restDims(output->rankOf() - 1); for (int i = restDims.size(); i > 0; i--) restDims[restDims.size() - i] = output->rankOf() - i; auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), restDims); std::vector<const void *> inputBuffers(inputSize); std::vector<const Nd4jLong *> inputTadShapes(inputSize); std::vector<const Nd4jLong *> inputTadOffsets(inputSize); std::vector<const void *> indicesBuffers(inputSize); std::vector<const Nd4jLong *> indicesShapes(inputSize); for (int e = 0; e < inputSize; e++) { std::vector<int> sourceDims(inputs[e]->rankOf() - indices[e]->rankOf()); for (int i = sourceDims.size(); i > 0; i--) sourceDims[sourceDims.size() - i] = inputs[e]->rankOf() - i; auto packX = ConstantTadHelper::getInstance().tadForDimensions(inputs[e]->shapeInfo(), sourceDims); indicesBuffers[e] = indices[e]->specialBuffer(); indicesShapes[e] = indices[e]->specialShapeInfo(); inputBuffers[e] = inputs[e]->specialBuffer(); inputTadShapes[e] = packX.platformShapeInfo(); inputTadOffsets[e] = packX.platformOffsets(); } // copying pointers to buffers to device auto dInputBuffers = reinterpret_cast<void **>(pm.replicatePointer(inputBuffers.data(), inputSize * sizeof(void *))); auto dInputTadShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputTadShapes.data(), inputSize * sizeof(Nd4jLong *))); auto dInputTadOffsets = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(inputTadOffsets.data(), inputSize * sizeof(Nd4jLong *))); auto dIndicesBuffers = reinterpret_cast<void **>(pm.replicatePointer(indicesBuffers.data(), inputSize * sizeof(void *))); auto dIndicesShapes = reinterpret_cast<Nd4jLong **>(pm.replicatePointer(indicesShapes.data(), inputSize * sizeof(Nd4jLong *))); dynamicStitchTadKernel<X,Y><<<256, 256, 1024, *context->getCudaStream()>>>(dInputBuffers, dInputTadShapes, dInputTadOffsets, dIndicesBuffers, dIndicesShapes, inputSize, output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets()); } pm.synchronize(); return Status::OK(); } template <typename T> static void _dynamicPartitionFunctorBP(NDArray const* input, NDArray const* indices, std::vector<NDArray*> const& inputGradientList, std::vector<NDArray*>& outputList) { } void dynamicPartitionFunctor(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*>& outputList) { auto xType = input->dataType(); auto yType = indices->dataType(); NDArray::prepareSpecialUse({}, {indices, input}); BUILD_DOUBLE_SELECTOR(xType, yType, _dynamicPartitionFunctor, (context, input, indices, outputList), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({}, {indices, input}); // TODO: it would be nice to have NDArray::registerSpecialUse signature that accepts something else beyond initializer_list for (auto v:outputList) { v->tickWriteDevice(); } } template <typename T> static int _dynamicStitchFunctorBP(std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray const* gradInput, std::vector<NDArray*>& outputList){ throw std::runtime_error("Not umplemented yet"); } int dynamicStitchFunctor(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray* output){ auto xType = inputs.at(0)->dataType(); auto yType = indices.at(0)->dataType(); for (auto v:indices) { v->syncToDevice(); v->tickReadDevice(); } for (auto v:inputs) { v->syncToDevice(); v->tickReadDevice(); } NDArray::prepareSpecialUse({output}, {}); BUILD_DOUBLE_SELECTOR(xType, yType, _dynamicStitchFunctor, (context, inputs, indices, output), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {}); return Status::OK(); } int dynamicStitchFunctorBP(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, std::vector<NDArray*> const& indices, NDArray const* gradInput, std::vector<NDArray*>& outputList) { auto xType = inputs.at(0)->dataType(); BUILD_SINGLE_SELECTOR(xType, return _dynamicStitchFunctorBP, (inputs, indices, gradInput, outputList), NUMERIC_TYPES); } void dynamicPartitionFunctorBP(sd::LaunchContext * context, NDArray const* input, NDArray const* indices, std::vector<NDArray*> const& inputGradientList, std::vector<NDArray*>& outputList) { auto xType = input->dataType(); BUILD_SINGLE_SELECTOR(xType, _dynamicPartitionFunctorBP, (input, indices, inputGradientList, outputList), NUMERIC_TYPES); } } } }
4147346fc0c5914bc894b709c9655efa4152b5b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { // CUDA kernele for forward template <typename Dtype> __global__ void PReLUForward(const int n, const int channels, const int dim, const Dtype* in, Dtype* out, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } } // CUDA kernel for bottom backward template <typename Dtype> __global__ void PReLUBackward(const int n, const int channels, const int dim, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); } } // CUDA kernel for element-wise parameter backward template <typename Dtype> __global__ void PReLUParamBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); } } template <typename Dtype> void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->shape(1); const Dtype* slope_data = this->blobs_[0]->gpu_data(); const int div_factor = channel_shared_ ? channels : 1; // For in-place computation if (top[0] == bottom[0]) { caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, dim, bottom_data, top_data, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->shape(1); // For in-place computation if (top[0] == bottom[0]) { bottom_data = bottom_memory_.gpu_data(); } // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); int cdim = channels * dim; Dtype dsum = 0.; vector<int> offset_vector(bottom[0]->num_axes(),0); for (int n = 0; n < bottom[0]->shape(0); ++n) { offset_vector[0] = n; // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(cdim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, cdim, top_diff + top[0]->offset(offset_vector), bottom_data + bottom[0]->offset(offset_vector), backward_buff_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1., backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); int div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer); } // namespace caffe
4147346fc0c5914bc894b709c9655efa4152b5b6.cu
#include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { // CUDA kernele for forward template <typename Dtype> __global__ void PReLUForward(const int n, const int channels, const int dim, const Dtype* in, Dtype* out, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } } // CUDA kernel for bottom backward template <typename Dtype> __global__ void PReLUBackward(const int n, const int channels, const int dim, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); } } // CUDA kernel for element-wise parameter backward template <typename Dtype> __global__ void PReLUParamBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); } } template <typename Dtype> void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->shape(1); const Dtype* slope_data = this->blobs_[0]->gpu_data(); const int div_factor = channel_shared_ ? channels : 1; // For in-place computation if (top[0] == bottom[0]) { caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); } // NOLINT_NEXT_LINE(whitespace/operators) PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, dim, bottom_data, top_data, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->shape(1); // For in-place computation if (top[0] == bottom[0]) { bottom_data = bottom_memory_.gpu_data(); } // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); int cdim = channels * dim; Dtype dsum = 0.; vector<int> offset_vector(bottom[0]->num_axes(),0); for (int n = 0; n < bottom[0]->shape(0); ++n) { offset_vector[0] = n; // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) PReLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(cdim), CAFFE_CUDA_NUM_THREADS>>>( cdim, top_diff + top[0]->offset(offset_vector), bottom_data + bottom[0]->offset(offset_vector), backward_buff_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1., backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); int div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) PReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer); } // namespace caffe
787e4462dc07a480bd54f2b26ffba9a878a311e3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include <sys/time.h> #define MAX_THREADS_PER_BLOCK 512 int no_of_nodes; int edge_list_size; FILE *fp; //Structure to hold a node information struct Node { int starting; int no_of_edges; }; __global__ void Kernel3(Node* g_graph_nodes, int* g_graph_edges,int* cd, bool* f1d, bool *f2d,int no_of_nodes,bool *md) { int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; //int idx, idxi; //printf("\n Thread id: %d",i); int ci, cj; if(f1d[i]==true) { f1d[i]=false; ci=cd[i]; bool cimod=false; int temp; for(int j=g_graph_nodes[i].starting;j<(g_graph_nodes[i].starting + g_graph_nodes[i].no_of_edges); j++) { temp=g_graph_edges[j]; cj = cd[temp]; if ( ci < cj ) { atomicMin(&cd[temp],ci); f2d[temp]=true; *md=true; } else if (ci>cj) { ci=cj; cimod=true; } } if(cimod==true) { atomicMin(&cd[i],ci); f2d[i]=true; *md=true; } } // printf("\n End of kernel: %d", cd[i]); } long long start_timer(); long long stop_timer(long long start_time, char *name); void GPLGraph(int argc, char** argv); //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { no_of_nodes=0; edge_list_size=0; GPLGraph( argc, argv); } void Usage(int argc, char**argv){ fprintf(stderr,"Usage: %s <input_file>\n", argv[0]); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void GPLGraph( int argc, char** argv) { char *input_f; if(argc!=2){ Usage(argc, argv); exit(0); } input_f = argv[1]; printf("Reading File\n"); //Read in Graph from a file fp = fopen(input_f,"r"); if(!fp) { printf("Error Reading graph file\n"); return; } int source = 0; fscanf(fp,"%d",&no_of_nodes); int num_of_blocks = 1; int num_of_threads_per_block = no_of_nodes; //Make execution Parameters according to the number of nodes //Distribute threads across multiple Blocks if necessary if(no_of_nodes>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } // allocate host memory Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes); int start, edgeno; // initalize the memory for( unsigned int i = 0; i < no_of_nodes; i++) { fscanf(fp,"%d %d",&start,&edgeno); h_graph_nodes[i].starting = start; h_graph_nodes[i].no_of_edges = edgeno; } //read the source node from the file fscanf(fp,"%d",&source); source=0; fscanf(fp,"%d",&edge_list_size); int id,cost; int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size); for(unsigned int i=0; i < edge_list_size ; i++) { fscanf(fp,"%d",&id); fscanf(fp,"%d",&cost); h_graph_edges[i] = id; } int* c = (int*) malloc(sizeof(int)*no_of_nodes); bool* f1 = (bool*) malloc(sizeof(bool)*no_of_nodes); bool* f2 = (bool*) malloc(sizeof(bool)*no_of_nodes); bool* f3 = (bool*) malloc(sizeof(bool)*no_of_nodes); for(unsigned int i=0; i < no_of_nodes ; i++) { c[i]=i; f1[i]=true; f2[i]=false; } if(fp) fclose(fp); printf("Read File\n"); //Copy the Node list to device memory Node* d_graph_nodes; hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ; hipMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ; //Copy the Edge List to device Memory int* d_graph_edges; hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ; hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ; //Allocate Color Array in device Memory int* cd; hipMalloc( (void**) &cd, sizeof(int)*no_of_nodes); hipMemcpy( cd, c, sizeof(int)*no_of_nodes,hipMemcpyHostToDevice); //Allocate Boolean Array in current Iteration bool* f1d; hipMalloc( (void**) &f1d, sizeof(bool)*no_of_nodes); hipMemcpy( f1d, f1, sizeof(bool)*no_of_nodes,hipMemcpyHostToDevice); //Allocate Boolean Array for next Iteration bool* f2d; hipMalloc( (void**) &f2d, sizeof(bool)*no_of_nodes); hipMemcpy( f2d, f2, sizeof(bool)*no_of_nodes,hipMemcpyHostToDevice); bool* f3d; hipMalloc( (void**) &f3d, sizeof(bool)*no_of_nodes); bool m; bool *md; hipMalloc( (void**) &md, sizeof(bool)); printf("Copied Everything to Kernel"); // setup execution parameters dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); long long timer; timer = start_timer(); int k=0; printf("Start traversing the tree\n"); //Call the Kernel untill all the elements of Frontier are not false do { m=false; //if no thread changes this value then the loop stops hipMemcpy( md, &m, sizeof(bool), hipMemcpyHostToDevice) ; hipLaunchKernelGGL(( Kernel3), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges,cd,f1d,f2d, no_of_nodes,md); // check if kernel execution generated and error hipMemcpy( f1, f1d, sizeof(bool)*no_of_nodes,hipMemcpyDeviceToHost); hipMemcpy( f2, f2d, sizeof(bool)*no_of_nodes,hipMemcpyDeviceToHost); hipMemcpy( f1d, f2, sizeof(bool)*no_of_nodes,hipMemcpyHostToDevice); hipMemcpy( f2d, f1, sizeof(bool)*no_of_nodes,hipMemcpyHostToDevice); k++; hipMemcpy( &m,md , sizeof(bool), hipMemcpyDeviceToHost) ; // printf("\n \n Return from kernel: %d",m); } while(m); printf("Kernel Executed %d times\n",k); // copy result from device to host hipMemcpy( c,cd, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost) ; //Store the result into a file FILE *fpo = fopen("result.txt","w"); for(unsigned int i=0;i<no_of_nodes;i++) fprintf(fpo,"%d) color:%d\n",i,c[i]); fclose(fpo); printf("Result stored in result.txt\n"); // cleanup memory free( h_graph_nodes); free( h_graph_edges); hipFree(d_graph_nodes); hipFree(d_graph_edges); hipFree(cd); hipFree(f1d); hipFree(f2d); //hipFree(md); stop_timer(timer, "Total Processing time"); } long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } long long stop_timer(long long start_time, char *label) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; printf("%s: %.5f sec\n", label, ((float) (end_time - start_time)) / (1000 * 1000)); return end_time - start_time; }
787e4462dc07a480bd54f2b26ffba9a878a311e3.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda.h> #include <sys/time.h> #define MAX_THREADS_PER_BLOCK 512 int no_of_nodes; int edge_list_size; FILE *fp; //Structure to hold a node information struct Node { int starting; int no_of_edges; }; __global__ void Kernel3(Node* g_graph_nodes, int* g_graph_edges,int* cd, bool* f1d, bool *f2d,int no_of_nodes,bool *md) { int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; //int idx, idxi; //printf("\n Thread id: %d",i); int ci, cj; if(f1d[i]==true) { f1d[i]=false; ci=cd[i]; bool cimod=false; int temp; for(int j=g_graph_nodes[i].starting;j<(g_graph_nodes[i].starting + g_graph_nodes[i].no_of_edges); j++) { temp=g_graph_edges[j]; cj = cd[temp]; if ( ci < cj ) { atomicMin(&cd[temp],ci); f2d[temp]=true; *md=true; } else if (ci>cj) { ci=cj; cimod=true; } } if(cimod==true) { atomicMin(&cd[i],ci); f2d[i]=true; *md=true; } } // printf("\n End of kernel: %d", cd[i]); } long long start_timer(); long long stop_timer(long long start_time, char *name); void GPLGraph(int argc, char** argv); //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { no_of_nodes=0; edge_list_size=0; GPLGraph( argc, argv); } void Usage(int argc, char**argv){ fprintf(stderr,"Usage: %s <input_file>\n", argv[0]); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void GPLGraph( int argc, char** argv) { char *input_f; if(argc!=2){ Usage(argc, argv); exit(0); } input_f = argv[1]; printf("Reading File\n"); //Read in Graph from a file fp = fopen(input_f,"r"); if(!fp) { printf("Error Reading graph file\n"); return; } int source = 0; fscanf(fp,"%d",&no_of_nodes); int num_of_blocks = 1; int num_of_threads_per_block = no_of_nodes; //Make execution Parameters according to the number of nodes //Distribute threads across multiple Blocks if necessary if(no_of_nodes>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } // allocate host memory Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes); int start, edgeno; // initalize the memory for( unsigned int i = 0; i < no_of_nodes; i++) { fscanf(fp,"%d %d",&start,&edgeno); h_graph_nodes[i].starting = start; h_graph_nodes[i].no_of_edges = edgeno; } //read the source node from the file fscanf(fp,"%d",&source); source=0; fscanf(fp,"%d",&edge_list_size); int id,cost; int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size); for(unsigned int i=0; i < edge_list_size ; i++) { fscanf(fp,"%d",&id); fscanf(fp,"%d",&cost); h_graph_edges[i] = id; } int* c = (int*) malloc(sizeof(int)*no_of_nodes); bool* f1 = (bool*) malloc(sizeof(bool)*no_of_nodes); bool* f2 = (bool*) malloc(sizeof(bool)*no_of_nodes); bool* f3 = (bool*) malloc(sizeof(bool)*no_of_nodes); for(unsigned int i=0; i < no_of_nodes ; i++) { c[i]=i; f1[i]=true; f2[i]=false; } if(fp) fclose(fp); printf("Read File\n"); //Copy the Node list to device memory Node* d_graph_nodes; cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ; cudaMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ; //Copy the Edge List to device Memory int* d_graph_edges; cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ; cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ; //Allocate Color Array in device Memory int* cd; cudaMalloc( (void**) &cd, sizeof(int)*no_of_nodes); cudaMemcpy( cd, c, sizeof(int)*no_of_nodes,cudaMemcpyHostToDevice); //Allocate Boolean Array in current Iteration bool* f1d; cudaMalloc( (void**) &f1d, sizeof(bool)*no_of_nodes); cudaMemcpy( f1d, f1, sizeof(bool)*no_of_nodes,cudaMemcpyHostToDevice); //Allocate Boolean Array for next Iteration bool* f2d; cudaMalloc( (void**) &f2d, sizeof(bool)*no_of_nodes); cudaMemcpy( f2d, f2, sizeof(bool)*no_of_nodes,cudaMemcpyHostToDevice); bool* f3d; cudaMalloc( (void**) &f3d, sizeof(bool)*no_of_nodes); bool m; bool *md; cudaMalloc( (void**) &md, sizeof(bool)); printf("Copied Everything to Kernel"); // setup execution parameters dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); long long timer; timer = start_timer(); int k=0; printf("Start traversing the tree\n"); //Call the Kernel untill all the elements of Frontier are not false do { m=false; //if no thread changes this value then the loop stops cudaMemcpy( md, &m, sizeof(bool), cudaMemcpyHostToDevice) ; Kernel3<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges,cd,f1d,f2d, no_of_nodes,md); // check if kernel execution generated and error cudaMemcpy( f1, f1d, sizeof(bool)*no_of_nodes,cudaMemcpyDeviceToHost); cudaMemcpy( f2, f2d, sizeof(bool)*no_of_nodes,cudaMemcpyDeviceToHost); cudaMemcpy( f1d, f2, sizeof(bool)*no_of_nodes,cudaMemcpyHostToDevice); cudaMemcpy( f2d, f1, sizeof(bool)*no_of_nodes,cudaMemcpyHostToDevice); k++; cudaMemcpy( &m,md , sizeof(bool), cudaMemcpyDeviceToHost) ; // printf("\n \n Return from kernel: %d",m); } while(m); printf("Kernel Executed %d times\n",k); // copy result from device to host cudaMemcpy( c,cd, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost) ; //Store the result into a file FILE *fpo = fopen("result.txt","w"); for(unsigned int i=0;i<no_of_nodes;i++) fprintf(fpo,"%d) color:%d\n",i,c[i]); fclose(fpo); printf("Result stored in result.txt\n"); // cleanup memory free( h_graph_nodes); free( h_graph_edges); cudaFree(d_graph_nodes); cudaFree(d_graph_edges); cudaFree(cd); cudaFree(f1d); cudaFree(f2d); //cudaFree(md); stop_timer(timer, "Total Processing time"); } long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } long long stop_timer(long long start_time, char *label) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; printf("%s: %.5f sec\n", label, ((float) (end_time - start_time)) / (1000 * 1000)); return end_time - start_time; }
6cea112aa14ad966f5bfdb5175d7c700183a4e27.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "auto_arima.cuh" #include <cuml/tsa/auto_arima.h> #include <common/cumlHandle.hpp> namespace ML { int divide_by_mask_build_index(const cumlHandle& handle, const bool* d_mask, int* d_index, int batch_size) { hipStream_t stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); return ML::TimeSeries::divide_by_mask_build_index(d_mask, d_index, batch_size, allocator, stream); } template <typename DataT> inline void divide_by_mask_execute_helper(const cumlHandle& handle, const DataT* d_in, const bool* d_mask, const int* d_index, DataT* d_out0, DataT* d_out1, int batch_size, int n_obs) { hipStream_t stream = handle.getStream(); ML::TimeSeries::divide_by_mask_execute(d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs, stream); } void divide_by_mask_execute(const cumlHandle& handle, const float* d_in, const bool* d_mask, const int* d_index, float* d_out0, float* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const cumlHandle& handle, const double* d_in, const bool* d_mask, const int* d_index, double* d_out0, double* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const cumlHandle& handle, const int* d_in, const bool* d_mask, const int* d_index, int* d_out0, int* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } template <typename DataT> inline void divide_by_min_build_index_helper(const cumlHandle& handle, const DataT* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { hipStream_t stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); ML::TimeSeries::divide_by_min_build_index( d_matrix, d_batch, d_index, h_size, batch_size, n_sub, allocator, stream); } void divide_by_min_build_index(const cumlHandle& handle, const float* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } void divide_by_min_build_index(const cumlHandle& handle, const double* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } template <typename DataT> inline void divide_by_min_execute_helper(const cumlHandle& handle, const DataT* d_in, const int* d_batch, const int* d_index, DataT** hd_out, int batch_size, int n_sub, int n_obs) { hipStream_t stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); ML::TimeSeries::divide_by_min_execute(d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs, allocator, stream); } void divide_by_min_execute(const cumlHandle& handle, const float* d_in, const int* d_batch, const int* d_index, float** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const cumlHandle& handle, const double* d_in, const int* d_batch, const int* d_index, double** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const cumlHandle& handle, const int* d_in, const int* d_batch, const int* d_index, int** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void build_division_map(const cumlHandle& handle, const int* const* hd_id, const int* h_size, int* d_id_to_pos, int* d_id_to_model, int batch_size, int n_sub) { hipStream_t stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); ML::TimeSeries::build_division_map(hd_id, h_size, d_id_to_pos, d_id_to_model, batch_size, n_sub, allocator, stream); } template <typename DataT> inline void merge_series_helper(const cumlHandle& handle, const DataT* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, DataT* d_out, int batch_size, int n_sub, int n_obs) { hipStream_t stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); ML::TimeSeries::merge_series(hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs, allocator, stream); } void merge_series(const cumlHandle& handle, const float* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, float* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } void merge_series(const cumlHandle& handle, const double* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, double* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } } // namespace ML
6cea112aa14ad966f5bfdb5175d7c700183a4e27.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "auto_arima.cuh" #include <cuml/tsa/auto_arima.h> #include <common/cumlHandle.hpp> namespace ML { int divide_by_mask_build_index(const cumlHandle& handle, const bool* d_mask, int* d_index, int batch_size) { cudaStream_t stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); return ML::TimeSeries::divide_by_mask_build_index(d_mask, d_index, batch_size, allocator, stream); } template <typename DataT> inline void divide_by_mask_execute_helper(const cumlHandle& handle, const DataT* d_in, const bool* d_mask, const int* d_index, DataT* d_out0, DataT* d_out1, int batch_size, int n_obs) { cudaStream_t stream = handle.getStream(); ML::TimeSeries::divide_by_mask_execute(d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs, stream); } void divide_by_mask_execute(const cumlHandle& handle, const float* d_in, const bool* d_mask, const int* d_index, float* d_out0, float* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const cumlHandle& handle, const double* d_in, const bool* d_mask, const int* d_index, double* d_out0, double* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const cumlHandle& handle, const int* d_in, const bool* d_mask, const int* d_index, int* d_out0, int* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } template <typename DataT> inline void divide_by_min_build_index_helper(const cumlHandle& handle, const DataT* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { cudaStream_t stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); ML::TimeSeries::divide_by_min_build_index( d_matrix, d_batch, d_index, h_size, batch_size, n_sub, allocator, stream); } void divide_by_min_build_index(const cumlHandle& handle, const float* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } void divide_by_min_build_index(const cumlHandle& handle, const double* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } template <typename DataT> inline void divide_by_min_execute_helper(const cumlHandle& handle, const DataT* d_in, const int* d_batch, const int* d_index, DataT** hd_out, int batch_size, int n_sub, int n_obs) { cudaStream_t stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); ML::TimeSeries::divide_by_min_execute(d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs, allocator, stream); } void divide_by_min_execute(const cumlHandle& handle, const float* d_in, const int* d_batch, const int* d_index, float** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const cumlHandle& handle, const double* d_in, const int* d_batch, const int* d_index, double** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const cumlHandle& handle, const int* d_in, const int* d_batch, const int* d_index, int** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void build_division_map(const cumlHandle& handle, const int* const* hd_id, const int* h_size, int* d_id_to_pos, int* d_id_to_model, int batch_size, int n_sub) { cudaStream_t stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); ML::TimeSeries::build_division_map(hd_id, h_size, d_id_to_pos, d_id_to_model, batch_size, n_sub, allocator, stream); } template <typename DataT> inline void merge_series_helper(const cumlHandle& handle, const DataT* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, DataT* d_out, int batch_size, int n_sub, int n_obs) { cudaStream_t stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); ML::TimeSeries::merge_series(hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs, allocator, stream); } void merge_series(const cumlHandle& handle, const float* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, float* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } void merge_series(const cumlHandle& handle, const double* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, double* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } } // namespace ML
279feb1cff48f89dfd5edc9c6b4aa16dfc4b116a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @File nbody.cu * * Implementation of the N-Body problem * * Paraleln programovn na GPU (PCG 2020) * Projekt c. 1 (cuda) * Login: xpavel34 */ #include <cmath> #include <cfloat> #include "nbody.h" /** * CUDA kernel to calculate velocity * @param p_in - particles * @param tmp_vel - temp array for velocities * @param N - Number of particles * @param dt - Size of the time step */ __global__ void calculate_velocity(t_particles p_in, t_particles p_out, int N, float dt) { extern __shared__ float cache[]; float *posX = cache; float *posY = &cache[blockDim.x]; float *posZ = &cache[blockDim.x * 2]; float *velX = &cache[blockDim.x * 3]; float *velY = &cache[blockDim.x * 4]; float *velZ = &cache[blockDim.x * 5]; float *weights = &cache[blockDim.x * 6]; unsigned int threadsTotal = gridDim.x * blockDim.x; unsigned int gridSteps = ceil(float(N) / threadsTotal); unsigned int tileWidth = blockDim.x; unsigned int tileCount = ceil(float(N) / tileWidth); /// Grid stride loop (if there's not enough total threads to cover all particles) for (unsigned int gridIdx = 0; gridIdx < gridSteps; gridIdx++) { float dx, dy, dz; float accVelocityX = 0; float accVelocityY = 0; float accVelocityZ = 0; unsigned int globalIdx = (gridIdx * threadsTotal) + (blockIdx.x * blockDim.x + threadIdx.x); bool inBounds = globalIdx < N; float p1_x = inBounds ? p_in.positionsX[globalIdx] : 0.0f; float p1_y = inBounds ? p_in.positionsY[globalIdx] : 0.0f; float p1_z = inBounds ? p_in.positionsZ[globalIdx] : 0.0f; float p1_vel_x = inBounds ? p_in.velocitiesX[globalIdx] : 0.0f; float p1_vel_y = inBounds ? p_in.velocitiesY[globalIdx] : 0.0f; float p1_vel_z = inBounds ? p_in.velocitiesZ[globalIdx] : 0.0f; float p1_weight = inBounds ? p_in.weights[globalIdx] : 0.0f; /// Loop over all tiles with each thread for (unsigned int tileIdx = 0; tileIdx < tileCount; tileIdx++) { unsigned int tileOffset = tileIdx * blockDim.x; unsigned int threadOffset = tileOffset + threadIdx.x; posX[threadIdx.x] = (threadOffset < N) ? p_in.positionsX[threadOffset] : 0.0f; posY[threadIdx.x] = (threadOffset < N) ? p_in.positionsY[threadOffset] : 0.0f; posZ[threadIdx.x] = (threadOffset < N) ? p_in.positionsZ[threadOffset] : 0.0f; velX[threadIdx.x] = (threadOffset < N) ? p_in.velocitiesX[threadOffset] : 0.0f; velY[threadIdx.x] = (threadOffset < N) ? p_in.velocitiesY[threadOffset] : 0.0f; velZ[threadIdx.x] = (threadOffset < N) ? p_in.velocitiesZ[threadOffset] : 0.0f; weights[threadIdx.x] = (threadOffset < N) ? p_in.weights[threadOffset] : 0.0f; /// Synchronize threads before using shared memory __syncthreads(); /// Loop over all points in a single tile for (int p2_idx = 0; p2_idx < tileWidth; p2_idx++) { dx = p1_x - posX[p2_idx]; dy = p1_y - posY[p2_idx]; dz = p1_z - posZ[p2_idx]; float rr = dx * dx + dy * dy + dz * dz; float r = sqrt(rr); if (r > COLLISION_DISTANCE) { // Fg*dt/m1/r = G*m1*m2*dt / r^3 / m1 = G*dt/r^3 * m2 // vx = - Fx*dt/m2 = - Fg*dt/m2 * dx/r = - Fg*dt/m2/r * dx float r3 = rr * r + FLT_MIN; float G_dt_r3 = -G * dt / r3; float Fg_dt_m2_r = G_dt_r3 * weights[p2_idx]; accVelocityX += Fg_dt_m2_r * dx; accVelocityY += Fg_dt_m2_r * dy; accVelocityZ += Fg_dt_m2_r * dz; } else { float weightSum = p1_weight + weights[p2_idx]; float weightDiff = p1_weight - weights[p2_idx]; float p2_w2 = 2 * weights[p2_idx]; bool colliding = r > 0.0f; accVelocityX += colliding ? ((p1_vel_x * weightDiff + p2_w2 * velX[p2_idx]) / weightSum) - p1_vel_x : 0.0f; accVelocityY += colliding ? ((p1_vel_y * weightDiff + p2_w2 * velY[p2_idx]) / weightSum) - p1_vel_y : 0.0f; accVelocityZ += colliding ? ((p1_vel_z * weightDiff + p2_w2 * velZ[p2_idx]) / weightSum) - p1_vel_z : 0.0f; } } /// Wait for all threads to finish to avoid overwritten shared memory __syncthreads(); } if (globalIdx < N) { p_out.velocitiesX[globalIdx] = p1_vel_x + accVelocityX; p_out.velocitiesY[globalIdx] = p1_vel_y + accVelocityY; p_out.velocitiesZ[globalIdx] = p1_vel_z + accVelocityZ; p_out.positionsX[globalIdx] = p1_x + p_out.velocitiesX[globalIdx] * dt; p_out.positionsY[globalIdx] = p1_y + p_out.velocitiesY[globalIdx] * dt; p_out.positionsZ[globalIdx] = p1_z + p_out.velocitiesZ[globalIdx] * dt; } } }// end of calculate_gravitation_velocity /** * CUDA kernel to update particles * @param p - particles * @param comX - pointer to a center of mass position in X * @param comY - pointer to a center of mass position in Y * @param comZ - pointer to a center of mass position in Z * @param comW - pointer to a center of mass weight * @param lock - pointer to a user-implemented lock * @param N - Number of particles */ __global__ void centerOfMass(t_particles p, float *comX, float *comY, float *comZ, float *comW, int *lock, const int N) { extern __shared__ float sharedCOMs[]; float *posX = sharedCOMs; float *posY = &sharedCOMs[blockDim.x]; float *posZ = &sharedCOMs[blockDim.x * 2]; float *weights = &sharedCOMs[blockDim.x * 3]; unsigned int threadsTotal = gridDim.x * blockDim.x; unsigned int gridSteps = ceil(float(N) / threadsTotal); /// Local accumulator for COM (in case one thread block runs more than once) float4 comLocal = {0.0f, 0.0f, 0.0f, 0.0f}; /// Grid stride loop (if there's not enough total threads to cover all particles) for (unsigned int gridIdx = 0; gridIdx < gridSteps; gridIdx++) { unsigned int globalIdx = (gridIdx * threadsTotal) + (blockIdx.x * blockDim.x + threadIdx.x); bool inBounds = globalIdx < N; float weight = inBounds ? p.weights[globalIdx] : 0.0f; float dx = inBounds ? p.positionsX[globalIdx] : 0.0f; float dy = inBounds ? p.positionsY[globalIdx] : 0.0f; float dz = inBounds ? p.positionsZ[globalIdx] : 0.0f; float dw = (weight > 0.0f) ? 1.0f : 0.0f; posX[threadIdx.x] = dx * dw; posY[threadIdx.x] = dy * dw; posZ[threadIdx.x] = dz * dw; weights[threadIdx.x] = weight; __syncthreads(); /// Block level reduction in shared memory for (unsigned int stride = blockDim.x >> 1ul; stride > 0; stride >>= 1ul) { if (threadIdx.x < stride) { dx = posX[threadIdx.x + stride] - posX[threadIdx.x]; dy = posY[threadIdx.x + stride] - posY[threadIdx.x]; dz = posZ[threadIdx.x + stride] - posZ[threadIdx.x]; weight = weights[threadIdx.x + stride]; dw = ((weight + weights[threadIdx.x]) > 0.0f) ? (weight / (weight + weights[threadIdx.x])) : 0.0f; posX[threadIdx.x] += dx * dw; posY[threadIdx.x] += dy * dw; posZ[threadIdx.x] += dz * dw; weights[threadIdx.x] += weights[threadIdx.x + stride]; } __syncthreads(); } /// Merge COMs across multile grid steps (if there's more than 1, otherwise result is unchanged) if (threadIdx.x == 0 && globalIdx < N) { dw = ((weights[0] + comLocal.w) > 0.0f) ? (weights[0] / (weights[0] + comLocal.w)) : 0.0f; comLocal.x += (posX[0] - comLocal.x) * dw; comLocal.y += (posY[0] - comLocal.y) * dw; comLocal.z += (posZ[0] - comLocal.z) * dw; comLocal.w += weights[0]; } } /// Global reduction if (threadIdx.x == 0) { while (atomicExch(lock, 1u) != 0u); /// Lock float dw = ((comLocal.w + *comW) > 0.0f) ? (comLocal.w / (comLocal.w + *comW)) : 0.0f; *comX += (comLocal.x - *comX) * dw; *comY += (comLocal.y - *comY) * dw; *comZ += (comLocal.z - *comZ) * dw; *comW += comLocal.w; atomicExch(lock, 0u); /// Unlock } }// end of centerOfMass //---------------------------------------------------------------------------------------------------------------------- /** * CPU implementation of the Center of Mass calculation * @param particles - All particles in the system * @param N - Number of particles */ __host__ float4 centerOfMassCPU(MemDesc &memDesc) { float4 com = {0, 0, 0, 0}; for (int i = 0; i < memDesc.getDataSize(); i++) { // Calculate the vector on the line connecting points and most recent position of center-of-mass const float dx = memDesc.getPosX(i) - com.x; const float dy = memDesc.getPosY(i) - com.y; const float dz = memDesc.getPosZ(i) - com.z; // Calculate weight ratio only if at least one particle isn't massless const float dw = ((memDesc.getWeight(i) + com.w) > 0.0f) ? (memDesc.getWeight(i) / (memDesc.getWeight(i) + com.w)) : 0.0f; // Update position and weight of the center-of-mass according to the weight ration and vector com.x += dx * dw; com.y += dy * dw; com.z += dz * dw; com.w += memDesc.getWeight(i); } return com; }// enf of centerOfMassCPU //----------------------------------------------------------------------------------------------------------------------
279feb1cff48f89dfd5edc9c6b4aa16dfc4b116a.cu
/** * @File nbody.cu * * Implementation of the N-Body problem * * Paralelní programování na GPU (PCG 2020) * Projekt c. 1 (cuda) * Login: xpavel34 */ #include <cmath> #include <cfloat> #include "nbody.h" /** * CUDA kernel to calculate velocity * @param p_in - particles * @param tmp_vel - temp array for velocities * @param N - Number of particles * @param dt - Size of the time step */ __global__ void calculate_velocity(t_particles p_in, t_particles p_out, int N, float dt) { extern __shared__ float cache[]; float *posX = cache; float *posY = &cache[blockDim.x]; float *posZ = &cache[blockDim.x * 2]; float *velX = &cache[blockDim.x * 3]; float *velY = &cache[blockDim.x * 4]; float *velZ = &cache[blockDim.x * 5]; float *weights = &cache[blockDim.x * 6]; unsigned int threadsTotal = gridDim.x * blockDim.x; unsigned int gridSteps = ceil(float(N) / threadsTotal); unsigned int tileWidth = blockDim.x; unsigned int tileCount = ceil(float(N) / tileWidth); /// Grid stride loop (if there's not enough total threads to cover all particles) for (unsigned int gridIdx = 0; gridIdx < gridSteps; gridIdx++) { float dx, dy, dz; float accVelocityX = 0; float accVelocityY = 0; float accVelocityZ = 0; unsigned int globalIdx = (gridIdx * threadsTotal) + (blockIdx.x * blockDim.x + threadIdx.x); bool inBounds = globalIdx < N; float p1_x = inBounds ? p_in.positionsX[globalIdx] : 0.0f; float p1_y = inBounds ? p_in.positionsY[globalIdx] : 0.0f; float p1_z = inBounds ? p_in.positionsZ[globalIdx] : 0.0f; float p1_vel_x = inBounds ? p_in.velocitiesX[globalIdx] : 0.0f; float p1_vel_y = inBounds ? p_in.velocitiesY[globalIdx] : 0.0f; float p1_vel_z = inBounds ? p_in.velocitiesZ[globalIdx] : 0.0f; float p1_weight = inBounds ? p_in.weights[globalIdx] : 0.0f; /// Loop over all tiles with each thread for (unsigned int tileIdx = 0; tileIdx < tileCount; tileIdx++) { unsigned int tileOffset = tileIdx * blockDim.x; unsigned int threadOffset = tileOffset + threadIdx.x; posX[threadIdx.x] = (threadOffset < N) ? p_in.positionsX[threadOffset] : 0.0f; posY[threadIdx.x] = (threadOffset < N) ? p_in.positionsY[threadOffset] : 0.0f; posZ[threadIdx.x] = (threadOffset < N) ? p_in.positionsZ[threadOffset] : 0.0f; velX[threadIdx.x] = (threadOffset < N) ? p_in.velocitiesX[threadOffset] : 0.0f; velY[threadIdx.x] = (threadOffset < N) ? p_in.velocitiesY[threadOffset] : 0.0f; velZ[threadIdx.x] = (threadOffset < N) ? p_in.velocitiesZ[threadOffset] : 0.0f; weights[threadIdx.x] = (threadOffset < N) ? p_in.weights[threadOffset] : 0.0f; /// Synchronize threads before using shared memory __syncthreads(); /// Loop over all points in a single tile for (int p2_idx = 0; p2_idx < tileWidth; p2_idx++) { dx = p1_x - posX[p2_idx]; dy = p1_y - posY[p2_idx]; dz = p1_z - posZ[p2_idx]; float rr = dx * dx + dy * dy + dz * dz; float r = sqrt(rr); if (r > COLLISION_DISTANCE) { // Fg*dt/m1/r = G*m1*m2*dt / r^3 / m1 = G*dt/r^3 * m2 // vx = - Fx*dt/m2 = - Fg*dt/m2 * dx/r = - Fg*dt/m2/r * dx float r3 = rr * r + FLT_MIN; float G_dt_r3 = -G * dt / r3; float Fg_dt_m2_r = G_dt_r3 * weights[p2_idx]; accVelocityX += Fg_dt_m2_r * dx; accVelocityY += Fg_dt_m2_r * dy; accVelocityZ += Fg_dt_m2_r * dz; } else { float weightSum = p1_weight + weights[p2_idx]; float weightDiff = p1_weight - weights[p2_idx]; float p2_w2 = 2 * weights[p2_idx]; bool colliding = r > 0.0f; accVelocityX += colliding ? ((p1_vel_x * weightDiff + p2_w2 * velX[p2_idx]) / weightSum) - p1_vel_x : 0.0f; accVelocityY += colliding ? ((p1_vel_y * weightDiff + p2_w2 * velY[p2_idx]) / weightSum) - p1_vel_y : 0.0f; accVelocityZ += colliding ? ((p1_vel_z * weightDiff + p2_w2 * velZ[p2_idx]) / weightSum) - p1_vel_z : 0.0f; } } /// Wait for all threads to finish to avoid overwritten shared memory __syncthreads(); } if (globalIdx < N) { p_out.velocitiesX[globalIdx] = p1_vel_x + accVelocityX; p_out.velocitiesY[globalIdx] = p1_vel_y + accVelocityY; p_out.velocitiesZ[globalIdx] = p1_vel_z + accVelocityZ; p_out.positionsX[globalIdx] = p1_x + p_out.velocitiesX[globalIdx] * dt; p_out.positionsY[globalIdx] = p1_y + p_out.velocitiesY[globalIdx] * dt; p_out.positionsZ[globalIdx] = p1_z + p_out.velocitiesZ[globalIdx] * dt; } } }// end of calculate_gravitation_velocity /** * CUDA kernel to update particles * @param p - particles * @param comX - pointer to a center of mass position in X * @param comY - pointer to a center of mass position in Y * @param comZ - pointer to a center of mass position in Z * @param comW - pointer to a center of mass weight * @param lock - pointer to a user-implemented lock * @param N - Number of particles */ __global__ void centerOfMass(t_particles p, float *comX, float *comY, float *comZ, float *comW, int *lock, const int N) { extern __shared__ float sharedCOMs[]; float *posX = sharedCOMs; float *posY = &sharedCOMs[blockDim.x]; float *posZ = &sharedCOMs[blockDim.x * 2]; float *weights = &sharedCOMs[blockDim.x * 3]; unsigned int threadsTotal = gridDim.x * blockDim.x; unsigned int gridSteps = ceil(float(N) / threadsTotal); /// Local accumulator for COM (in case one thread block runs more than once) float4 comLocal = {0.0f, 0.0f, 0.0f, 0.0f}; /// Grid stride loop (if there's not enough total threads to cover all particles) for (unsigned int gridIdx = 0; gridIdx < gridSteps; gridIdx++) { unsigned int globalIdx = (gridIdx * threadsTotal) + (blockIdx.x * blockDim.x + threadIdx.x); bool inBounds = globalIdx < N; float weight = inBounds ? p.weights[globalIdx] : 0.0f; float dx = inBounds ? p.positionsX[globalIdx] : 0.0f; float dy = inBounds ? p.positionsY[globalIdx] : 0.0f; float dz = inBounds ? p.positionsZ[globalIdx] : 0.0f; float dw = (weight > 0.0f) ? 1.0f : 0.0f; posX[threadIdx.x] = dx * dw; posY[threadIdx.x] = dy * dw; posZ[threadIdx.x] = dz * dw; weights[threadIdx.x] = weight; __syncthreads(); /// Block level reduction in shared memory for (unsigned int stride = blockDim.x >> 1ul; stride > 0; stride >>= 1ul) { if (threadIdx.x < stride) { dx = posX[threadIdx.x + stride] - posX[threadIdx.x]; dy = posY[threadIdx.x + stride] - posY[threadIdx.x]; dz = posZ[threadIdx.x + stride] - posZ[threadIdx.x]; weight = weights[threadIdx.x + stride]; dw = ((weight + weights[threadIdx.x]) > 0.0f) ? (weight / (weight + weights[threadIdx.x])) : 0.0f; posX[threadIdx.x] += dx * dw; posY[threadIdx.x] += dy * dw; posZ[threadIdx.x] += dz * dw; weights[threadIdx.x] += weights[threadIdx.x + stride]; } __syncthreads(); } /// Merge COMs across multile grid steps (if there's more than 1, otherwise result is unchanged) if (threadIdx.x == 0 && globalIdx < N) { dw = ((weights[0] + comLocal.w) > 0.0f) ? (weights[0] / (weights[0] + comLocal.w)) : 0.0f; comLocal.x += (posX[0] - comLocal.x) * dw; comLocal.y += (posY[0] - comLocal.y) * dw; comLocal.z += (posZ[0] - comLocal.z) * dw; comLocal.w += weights[0]; } } /// Global reduction if (threadIdx.x == 0) { while (atomicExch(lock, 1u) != 0u); /// Lock float dw = ((comLocal.w + *comW) > 0.0f) ? (comLocal.w / (comLocal.w + *comW)) : 0.0f; *comX += (comLocal.x - *comX) * dw; *comY += (comLocal.y - *comY) * dw; *comZ += (comLocal.z - *comZ) * dw; *comW += comLocal.w; atomicExch(lock, 0u); /// Unlock } }// end of centerOfMass //---------------------------------------------------------------------------------------------------------------------- /** * CPU implementation of the Center of Mass calculation * @param particles - All particles in the system * @param N - Number of particles */ __host__ float4 centerOfMassCPU(MemDesc &memDesc) { float4 com = {0, 0, 0, 0}; for (int i = 0; i < memDesc.getDataSize(); i++) { // Calculate the vector on the line connecting points and most recent position of center-of-mass const float dx = memDesc.getPosX(i) - com.x; const float dy = memDesc.getPosY(i) - com.y; const float dz = memDesc.getPosZ(i) - com.z; // Calculate weight ratio only if at least one particle isn't massless const float dw = ((memDesc.getWeight(i) + com.w) > 0.0f) ? (memDesc.getWeight(i) / (memDesc.getWeight(i) + com.w)) : 0.0f; // Update position and weight of the center-of-mass according to the weight ration and vector com.x += dx * dw; com.y += dy * dw; com.z += dz * dw; com.w += memDesc.getWeight(i); } return com; }// enf of centerOfMassCPU //----------------------------------------------------------------------------------------------------------------------
e021f2d6361162ac747f6e5f68e8d1922f9584f4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaErrorCheck.cu" #include "cudaSolver.h" /* * Author: Mikers, Azlehria, lwYeo * Date: March 4 - September 2018 for 0xbitcoin dev * * based off of https://github.com/Dunhili/SHA3-gpu-brute-force-cracker/blob/master/sha3.cu * * Author: Brian Bowden * Date: 5/12/14 * * This is the parallel version of SHA-3. */ typedef union { uint2 uint2; uint64_t uint64; uint8_t uint8[UINT64_LENGTH]; } nonce_t; __constant__ int const mod5[8] = { 0, 1, 2, 3, 4, 0, 1, 2 }; __constant__ uint64_t d_midstate[25]; __constant__ uint64_t d_target[1]; __device__ __forceinline__ nonce_t bswap_64(nonce_t const input) { nonce_t output; asm("{" " prmt.b32 %0, %3, 0, 0x0123;" " prmt.b32 %1, %2, 0, 0x0123;" "}" : "=r"(output.uint2.x), "=r"(output.uint2.y) : "r"(input.uint2.x), "r"(input.uint2.y)); return output; } __device__ __forceinline__ nonce_t xor5(nonce_t const a, nonce_t const b, nonce_t const c, nonce_t const d, nonce_t const e) { nonce_t output; #if __CUDA_ARCH__ >= 500 asm("{" " lop3.b32 %0, %2, %4, %6, 0x96;" " lop3.b32 %1, %3, %5, %7, 0x96;" " lop3.b32 %0, %0, %8, %10, 0x96;" " lop3.b32 %1, %1, %9, %11, 0x96;" "}" : "=r"(output.uint2.x), "=r"(output.uint2.y) : "r"(a.uint2.x), "r"(a.uint2.y), "r"(b.uint2.x), "r"(b.uint2.y), "r"(c.uint2.x), "r"(c.uint2.y), "r"(d.uint2.x), "r"(d.uint2.y), "r"(e.uint2.x), "r"(e.uint2.y)); #else asm("{" " xor.b64 %0, %1, %2;" " xor.b64 %0, %0, %3;" " xor.b64 %0, %0, %4;" " xor.b64 %0, %0, %5;" "}" : "=l"(output.uint64) : "l"(a.uint64), "l"(b.uint64), "l"(c.uint64), "l"(d.uint64), "l"(e.uint64)); #endif return output; } __device__ __forceinline__ nonce_t xor3(nonce_t const a, nonce_t const b, nonce_t const c) { nonce_t output; #if __CUDA_ARCH__ >= 500 asm("{" " lop3.b32 %0, %2, %4, %6, 0x96;" " lop3.b32 %1, %3, %5, %7, 0x96;" "}" : "=r"(output.uint2.x), "=r"(output.uint2.y) : "r"(a.uint2.x), "r"(a.uint2.y), "r"(b.uint2.x), "r"(b.uint2.y), "r"(c.uint2.x), "r"(c.uint2.y)); #else asm("{" " xor.b64 %0, %1, %2;" " xor.b64 %0, %0, %3;" "}" : "=l"(output.uint64) : "l"(a.uint64), "l"(b.uint64), "l"(c.uint64)); #endif return output; } __device__ __forceinline__ nonce_t chi(nonce_t const a, nonce_t const b, nonce_t const c) { nonce_t output; #if __CUDA_ARCH__ >= 500 asm("{" " lop3.b32 %0, %2, %4, %6, 0xD2;" " lop3.b32 %1, %3, %5, %7, 0xD2;" "}" : "=r"(output.uint2.x), "=r"(output.uint2.y) : "r"(a.uint2.x), "r"(a.uint2.y), "r"(b.uint2.x), "r"(b.uint2.y), "r"(c.uint2.x), "r"(c.uint2.y)); #else output.uint64 = a.uint64 ^ ((~b.uint64) & c.uint64); #endif return output; } __device__ __forceinline__ nonce_t rotl(nonce_t input, uint32_t const offset) { #if __CUDA_ARCH__ >= 320 asm("{" " .reg .b32 tmp;" " shf.l.wrap.b32 tmp, %1, %0, %2;" " shf.l.wrap.b32 %1, %0, %1, %2;" " mov.b32 %0, tmp;" "}" : "+r"(input.uint2.x), "+r"(input.uint2.y) : "r"(offset)); #else input.uint64 = (input.uint64 << offset) ^ (input.uint64 >> (64u - offset)); #endif return input; } __device__ __forceinline__ nonce_t rotr(nonce_t input, uint32_t const offset) { #if __CUDA_ARCH__ >= 320 asm("{" " .reg .b32 tmp;" " shf.r.wrap.b32 tmp, %0, %1, %2;" " shf.r.wrap.b32 %1, %1, %0, %2;" " mov.b32 %0, tmp;" "}" : "+r"(input.uint2.x), "+r"(input.uint2.y) : "r"(offset)); #else input.uint64 = (input.uint64 >> offset) ^ (input.uint64 << (64u - offset)); #endif return input; } __global__ void hashMidstate(uint64_t *__restrict__ solutions, uint32_t *__restrict__ solutionCount, uint64_t startPosition) { nonce_t nonce, state[25], C[5], D[5], n[11]; nonce.uint64 = blockDim.x * blockIdx.x + threadIdx.x + startPosition; n[0] = rotl(nonce, 7); n[1] = rotl(n[0], 1); n[2] = rotl(n[1], 6); n[3] = rotl(n[2], 2); n[4] = rotl(n[3], 4); n[5] = rotl(n[4], 7); n[6] = rotl(n[5], 12); n[7] = rotl(n[6], 5); n[8] = rotl(n[7], 11); n[9] = rotl(n[8], 7); n[10] = rotl(n[9], 1); C[0].uint64 = d_midstate[0]; C[1].uint64 = d_midstate[1]; C[2].uint64 = d_midstate[2] ^ n[7].uint64; C[3].uint64 = d_midstate[3]; C[4].uint64 = d_midstate[4] ^ n[2].uint64; state[0].uint64 = chi(C[0], C[1], C[2]).uint64 ^ Keccak_f1600_RC[0]; state[1] = chi(C[1], C[2], C[3]); state[2] = chi(C[2], C[3], C[4]); state[3] = chi(C[3], C[4], C[0]); state[4] = chi(C[4], C[0], C[1]); C[0].uint64 = d_midstate[5]; C[1].uint64 = d_midstate[6] ^ n[4].uint64; C[2].uint64 = d_midstate[7]; C[3].uint64 = d_midstate[8]; C[4].uint64 = d_midstate[9] ^ n[9].uint64; state[5] = chi(C[0], C[1], C[2]); state[6] = chi(C[1], C[2], C[3]); state[7] = chi(C[2], C[3], C[4]); state[8] = chi(C[3], C[4], C[0]); state[9] = chi(C[4], C[0], C[1]); C[0].uint64 = d_midstate[10]; C[1].uint64 = d_midstate[11] ^ n[0].uint64; C[2].uint64 = d_midstate[12]; C[3].uint64 = d_midstate[13] ^ n[1].uint64; C[4].uint64 = d_midstate[14]; state[10] = chi(C[0], C[1], C[2]); state[11] = chi(C[1], C[2], C[3]); state[12] = chi(C[2], C[3], C[4]); state[13] = chi(C[3], C[4], C[0]); state[14] = chi(C[4], C[0], C[1]); C[0].uint64 = d_midstate[15] ^ n[5].uint64; C[1].uint64 = d_midstate[16]; C[2].uint64 = d_midstate[17]; C[3].uint64 = d_midstate[18] ^ n[3].uint64; C[4].uint64 = d_midstate[19]; state[15] = chi(C[0], C[1], C[2]); state[16] = chi(C[1], C[2], C[3]); state[17] = chi(C[2], C[3], C[4]); state[18] = chi(C[3], C[4], C[0]); state[19] = chi(C[4], C[0], C[1]); C[0].uint64 = d_midstate[20] ^ n[10].uint64; C[1].uint64 = d_midstate[21] ^ n[8].uint64; C[2].uint64 = d_midstate[22] ^ n[6].uint64; C[3].uint64 = d_midstate[23]; C[4].uint64 = d_midstate[24]; state[20] = chi(C[0], C[1], C[2]); state[21] = chi(C[1], C[2], C[3]); state[22] = chi(C[2], C[3], C[4]); state[23] = chi(C[3], C[4], C[0]); state[24] = chi(C[4], C[0], C[1]); #if __CUDA_ARCH__ >= 350 # pragma unroll #endif for (int i{ 1 }; i < 23; ++i) { C[1] = xor5(state[0], state[5], state[10], state[15], state[20]); C[2] = xor5(state[1], state[6], state[11], state[16], state[21]); C[3] = xor5(state[2], state[7], state[12], state[17], state[22]); C[4] = xor5(state[3], state[8], state[13], state[18], state[23]); C[0] = xor5(state[4], state[9], state[14], state[19], state[24]); #if __CUDA_ARCH__ >= 350 # pragma unroll #endif for (int x{ 0 }; x < 5; ++x) { #if __CUDA_ARCH__ >= 350 D[x] = rotl(C[mod5[x + 2]], 1); state[x] = xor3(state[x], D[x], C[x]); state[x + 5] = xor3(state[x + 5], D[x], C[x]); state[x + 10] = xor3(state[x + 10], D[x], C[x]); state[x + 15] = xor3(state[x + 15], D[x], C[x]); state[x + 20] = xor3(state[x + 20], D[x], C[x]); #else D[x].uint64 = rotl(C[mod5[x + 2]], 1).uint64 ^ C[x].uint64; state[x].uint64 = state[x].uint64 ^ D[x].uint64; state[x + 5].uint64 = state[x + 5].uint64 ^ D[x].uint64; state[x + 10].uint64 = state[x + 10].uint64 ^ D[x].uint64; state[x + 15].uint64 = state[x + 15].uint64 ^ D[x].uint64; state[x + 20].uint64 = state[x + 20].uint64 ^ D[x].uint64; #endif } C[0] = state[1]; state[1] = rotr(state[6], 20); state[6] = rotl(state[9], 20); state[9] = rotr(state[22], 3); state[22] = rotr(state[14], 25); state[14] = rotl(state[20], 18); state[20] = rotr(state[2], 2); state[2] = rotr(state[12], 21); state[12] = rotl(state[13], 25); state[13] = rotl(state[19], 8); state[19] = rotr(state[23], 8); state[23] = rotr(state[15], 23); state[15] = rotl(state[4], 27); state[4] = rotl(state[24], 14); state[24] = rotl(state[21], 2); state[21] = rotr(state[8], 9); state[8] = rotr(state[16], 19); state[16] = rotr(state[5], 28); state[5] = rotl(state[3], 28); state[3] = rotl(state[18], 21); state[18] = rotl(state[17], 15); state[17] = rotl(state[11], 10); state[11] = rotl(state[7], 6); state[7] = rotl(state[10], 3); state[10] = rotl(C[0], 1); #if __CUDA_ARCH__ >= 350 # pragma unroll #endif for (int x{ 0 }; x < 25; x += 5) { C[0] = state[x]; C[1] = state[x + 1]; C[2] = state[x + 2]; C[3] = state[x + 3]; C[4] = state[x + 4]; state[x] = chi(C[0], C[1], C[2]); state[x + 1] = chi(C[1], C[2], C[3]); state[x + 2] = chi(C[2], C[3], C[4]); state[x + 3] = chi(C[3], C[4], C[0]); state[x + 4] = chi(C[4], C[0], C[1]); } state[0].uint64 = state[0].uint64 ^ Keccak_f1600_RC[i]; } C[1] = xor5(state[0], state[5], state[10], state[15], state[20]); C[2] = xor5(state[1], state[6], state[11], state[16], state[21]); C[3] = xor5(state[2], state[7], state[12], state[17], state[22]); C[4] = xor5(state[3], state[8], state[13], state[18], state[23]); C[0] = xor5(state[4], state[9], state[14], state[19], state[24]); D[0] = rotl(C[2], 1); D[1] = rotl(C[3], 1); D[2] = rotl(C[4], 1); state[0] = xor3(state[0], D[0], C[0]); state[6] = xor3(state[6], D[1], C[1]); state[12] = xor3(state[12], D[2], C[2]); state[6] = rotr(state[6], 20); state[12] = rotr(state[12], 21); state[0].uint64 = chi(state[0], state[6], state[12]).uint64 ^ Keccak_f1600_RC[23]; if (bswap_64(state[0]).uint64 <= d_target[0]) // LTE is allowed because d_target is high 64 bits of uint256 (let CPU do the verification) { (*solutionCount)++; if ((*solutionCount) < MAX_SOLUTION_COUNT_DEVICE) solutions[(*solutionCount) - 1] = nonce.uint64; } } // -------------------------------------------------------------------- // CudaSolver // -------------------------------------------------------------------- namespace CUDASolver { void CudaSolver::pushMessage(std::unique_ptr<Device> &device) { hipMemcpyToSymbol(d_midstate, &device->currentMidstate, SPONGE_LENGTH, 0, hipMemcpyHostToDevice); device->isNewMessage = false; } void CudaSolver::pushTarget(std::unique_ptr<Device> &device) { hipMemcpyToSymbol(d_target, &device->currentHigh64Target, UINT64_LENGTH, 0, hipMemcpyHostToDevice); device->isNewTarget = false; } void CudaSolver::findSolution(int const deviceID) { std::string errorMessage; auto& device = *std::find_if(m_devices.begin(), m_devices.end(), [&](std::unique_ptr<Device>& device) { return device->deviceID == deviceID; }); if (!device->initialized) return; while (!(device->isNewTarget || device->isNewMessage)) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); } errorMessage = CudaSafeCall(hipSetDevice(device->deviceID)); if (!errorMessage.empty()) onMessage(device->deviceID, "Error", errorMessage); char *c_currentChallenge = (char *)malloc(s_challenge.size()); #ifdef __linux__ strcpy(c_currentChallenge, s_challenge.c_str()); #else strcpy_s(c_currentChallenge, s_challenge.size() + 1, s_challenge.c_str()); #endif onMessage(device->deviceID, "Info", "Start mining..."); onMessage(device->deviceID, "Debug", "Threads: " + std::to_string(device->threads()) + " Grid size: " + std::to_string(device->grid().x) + " Block size:" + std::to_string(device->block().x)); device->mining = true; device->hashCount.store(0ull); device->hashStartTime = std::chrono::steady_clock::now() - std::chrono::milliseconds(1000); // reduce excessive high hashrate reporting at start do { while (m_pause) { device->hashCount.store(0ull); device->hashStartTime = std::chrono::steady_clock::now(); std::this_thread::sleep_for(std::chrono::milliseconds(500)); } checkInputs(device, c_currentChallenge); hipLaunchKernelGGL(( hashMidstate), dim3(device->grid()), dim3(device->block()), 0, 0, device->d_Solutions, device->d_SolutionCount, getNextWorkPosition(device)); errorMessage = CudaSyncAndCheckError(); if (!errorMessage.empty()) { onMessage(device->deviceID, "Error", "Kernel launch failed: " + errorMessage); device->mining = false; break; } if (*device->h_SolutionCount > 0u) { std::set<uint64_t> uniqueSolutions; for (uint32_t i{ 0u }; i < MAX_SOLUTION_COUNT_DEVICE && i < *device->h_SolutionCount; ++i) { uint64_t const tempSolution{ device->h_Solutions[i] }; if (tempSolution != 0u && uniqueSolutions.find(tempSolution) == uniqueSolutions.end()) uniqueSolutions.emplace(tempSolution); } std::thread t{ &CudaSolver::submitSolutions, this, uniqueSolutions, std::string{ c_currentChallenge }, device->deviceID }; t.detach(); std::memset(device->h_SolutionCount, 0u, UINT32_LENGTH); } } while (device->mining); onMessage(device->deviceID, "Info", "Stop mining..."); device->hashCount.store(0ull); errorMessage = CudaSafeCall(hipHostFree(device->h_SolutionCount)); if (!errorMessage.empty()) onMessage(device->deviceID, "Error", errorMessage); errorMessage = CudaSafeCall(hipHostFree(device->h_Solutions)); if (!errorMessage.empty()) onMessage(device->deviceID, "Error", errorMessage); errorMessage = CudaSafeCall(hipDeviceReset()); if (!errorMessage.empty()) onMessage(device->deviceID, "Error", errorMessage); device->initialized = false; onMessage(device->deviceID, "Info", "Mining stopped."); } }
e021f2d6361162ac747f6e5f68e8d1922f9584f4.cu
#include "cudaErrorCheck.cu" #include "cudaSolver.h" /* * Author: Mikers, Azlehria, lwYeo * Date: March 4 - September 2018 for 0xbitcoin dev * * based off of https://github.com/Dunhili/SHA3-gpu-brute-force-cracker/blob/master/sha3.cu * * Author: Brian Bowden * Date: 5/12/14 * * This is the parallel version of SHA-3. */ typedef union { uint2 uint2; uint64_t uint64; uint8_t uint8[UINT64_LENGTH]; } nonce_t; __constant__ int const mod5[8] = { 0, 1, 2, 3, 4, 0, 1, 2 }; __constant__ uint64_t d_midstate[25]; __constant__ uint64_t d_target[1]; __device__ __forceinline__ nonce_t bswap_64(nonce_t const input) { nonce_t output; asm("{" " prmt.b32 %0, %3, 0, 0x0123;" " prmt.b32 %1, %2, 0, 0x0123;" "}" : "=r"(output.uint2.x), "=r"(output.uint2.y) : "r"(input.uint2.x), "r"(input.uint2.y)); return output; } __device__ __forceinline__ nonce_t xor5(nonce_t const a, nonce_t const b, nonce_t const c, nonce_t const d, nonce_t const e) { nonce_t output; #if __CUDA_ARCH__ >= 500 asm("{" " lop3.b32 %0, %2, %4, %6, 0x96;" " lop3.b32 %1, %3, %5, %7, 0x96;" " lop3.b32 %0, %0, %8, %10, 0x96;" " lop3.b32 %1, %1, %9, %11, 0x96;" "}" : "=r"(output.uint2.x), "=r"(output.uint2.y) : "r"(a.uint2.x), "r"(a.uint2.y), "r"(b.uint2.x), "r"(b.uint2.y), "r"(c.uint2.x), "r"(c.uint2.y), "r"(d.uint2.x), "r"(d.uint2.y), "r"(e.uint2.x), "r"(e.uint2.y)); #else asm("{" " xor.b64 %0, %1, %2;" " xor.b64 %0, %0, %3;" " xor.b64 %0, %0, %4;" " xor.b64 %0, %0, %5;" "}" : "=l"(output.uint64) : "l"(a.uint64), "l"(b.uint64), "l"(c.uint64), "l"(d.uint64), "l"(e.uint64)); #endif return output; } __device__ __forceinline__ nonce_t xor3(nonce_t const a, nonce_t const b, nonce_t const c) { nonce_t output; #if __CUDA_ARCH__ >= 500 asm("{" " lop3.b32 %0, %2, %4, %6, 0x96;" " lop3.b32 %1, %3, %5, %7, 0x96;" "}" : "=r"(output.uint2.x), "=r"(output.uint2.y) : "r"(a.uint2.x), "r"(a.uint2.y), "r"(b.uint2.x), "r"(b.uint2.y), "r"(c.uint2.x), "r"(c.uint2.y)); #else asm("{" " xor.b64 %0, %1, %2;" " xor.b64 %0, %0, %3;" "}" : "=l"(output.uint64) : "l"(a.uint64), "l"(b.uint64), "l"(c.uint64)); #endif return output; } __device__ __forceinline__ nonce_t chi(nonce_t const a, nonce_t const b, nonce_t const c) { nonce_t output; #if __CUDA_ARCH__ >= 500 asm("{" " lop3.b32 %0, %2, %4, %6, 0xD2;" " lop3.b32 %1, %3, %5, %7, 0xD2;" "}" : "=r"(output.uint2.x), "=r"(output.uint2.y) : "r"(a.uint2.x), "r"(a.uint2.y), "r"(b.uint2.x), "r"(b.uint2.y), "r"(c.uint2.x), "r"(c.uint2.y)); #else output.uint64 = a.uint64 ^ ((~b.uint64) & c.uint64); #endif return output; } __device__ __forceinline__ nonce_t rotl(nonce_t input, uint32_t const offset) { #if __CUDA_ARCH__ >= 320 asm("{" " .reg .b32 tmp;" " shf.l.wrap.b32 tmp, %1, %0, %2;" " shf.l.wrap.b32 %1, %0, %1, %2;" " mov.b32 %0, tmp;" "}" : "+r"(input.uint2.x), "+r"(input.uint2.y) : "r"(offset)); #else input.uint64 = (input.uint64 << offset) ^ (input.uint64 >> (64u - offset)); #endif return input; } __device__ __forceinline__ nonce_t rotr(nonce_t input, uint32_t const offset) { #if __CUDA_ARCH__ >= 320 asm("{" " .reg .b32 tmp;" " shf.r.wrap.b32 tmp, %0, %1, %2;" " shf.r.wrap.b32 %1, %1, %0, %2;" " mov.b32 %0, tmp;" "}" : "+r"(input.uint2.x), "+r"(input.uint2.y) : "r"(offset)); #else input.uint64 = (input.uint64 >> offset) ^ (input.uint64 << (64u - offset)); #endif return input; } __global__ void hashMidstate(uint64_t *__restrict__ solutions, uint32_t *__restrict__ solutionCount, uint64_t startPosition) { nonce_t nonce, state[25], C[5], D[5], n[11]; nonce.uint64 = blockDim.x * blockIdx.x + threadIdx.x + startPosition; n[0] = rotl(nonce, 7); n[1] = rotl(n[0], 1); n[2] = rotl(n[1], 6); n[3] = rotl(n[2], 2); n[4] = rotl(n[3], 4); n[5] = rotl(n[4], 7); n[6] = rotl(n[5], 12); n[7] = rotl(n[6], 5); n[8] = rotl(n[7], 11); n[9] = rotl(n[8], 7); n[10] = rotl(n[9], 1); C[0].uint64 = d_midstate[0]; C[1].uint64 = d_midstate[1]; C[2].uint64 = d_midstate[2] ^ n[7].uint64; C[3].uint64 = d_midstate[3]; C[4].uint64 = d_midstate[4] ^ n[2].uint64; state[0].uint64 = chi(C[0], C[1], C[2]).uint64 ^ Keccak_f1600_RC[0]; state[1] = chi(C[1], C[2], C[3]); state[2] = chi(C[2], C[3], C[4]); state[3] = chi(C[3], C[4], C[0]); state[4] = chi(C[4], C[0], C[1]); C[0].uint64 = d_midstate[5]; C[1].uint64 = d_midstate[6] ^ n[4].uint64; C[2].uint64 = d_midstate[7]; C[3].uint64 = d_midstate[8]; C[4].uint64 = d_midstate[9] ^ n[9].uint64; state[5] = chi(C[0], C[1], C[2]); state[6] = chi(C[1], C[2], C[3]); state[7] = chi(C[2], C[3], C[4]); state[8] = chi(C[3], C[4], C[0]); state[9] = chi(C[4], C[0], C[1]); C[0].uint64 = d_midstate[10]; C[1].uint64 = d_midstate[11] ^ n[0].uint64; C[2].uint64 = d_midstate[12]; C[3].uint64 = d_midstate[13] ^ n[1].uint64; C[4].uint64 = d_midstate[14]; state[10] = chi(C[0], C[1], C[2]); state[11] = chi(C[1], C[2], C[3]); state[12] = chi(C[2], C[3], C[4]); state[13] = chi(C[3], C[4], C[0]); state[14] = chi(C[4], C[0], C[1]); C[0].uint64 = d_midstate[15] ^ n[5].uint64; C[1].uint64 = d_midstate[16]; C[2].uint64 = d_midstate[17]; C[3].uint64 = d_midstate[18] ^ n[3].uint64; C[4].uint64 = d_midstate[19]; state[15] = chi(C[0], C[1], C[2]); state[16] = chi(C[1], C[2], C[3]); state[17] = chi(C[2], C[3], C[4]); state[18] = chi(C[3], C[4], C[0]); state[19] = chi(C[4], C[0], C[1]); C[0].uint64 = d_midstate[20] ^ n[10].uint64; C[1].uint64 = d_midstate[21] ^ n[8].uint64; C[2].uint64 = d_midstate[22] ^ n[6].uint64; C[3].uint64 = d_midstate[23]; C[4].uint64 = d_midstate[24]; state[20] = chi(C[0], C[1], C[2]); state[21] = chi(C[1], C[2], C[3]); state[22] = chi(C[2], C[3], C[4]); state[23] = chi(C[3], C[4], C[0]); state[24] = chi(C[4], C[0], C[1]); #if __CUDA_ARCH__ >= 350 # pragma unroll #endif for (int i{ 1 }; i < 23; ++i) { C[1] = xor5(state[0], state[5], state[10], state[15], state[20]); C[2] = xor5(state[1], state[6], state[11], state[16], state[21]); C[3] = xor5(state[2], state[7], state[12], state[17], state[22]); C[4] = xor5(state[3], state[8], state[13], state[18], state[23]); C[0] = xor5(state[4], state[9], state[14], state[19], state[24]); #if __CUDA_ARCH__ >= 350 # pragma unroll #endif for (int x{ 0 }; x < 5; ++x) { #if __CUDA_ARCH__ >= 350 D[x] = rotl(C[mod5[x + 2]], 1); state[x] = xor3(state[x], D[x], C[x]); state[x + 5] = xor3(state[x + 5], D[x], C[x]); state[x + 10] = xor3(state[x + 10], D[x], C[x]); state[x + 15] = xor3(state[x + 15], D[x], C[x]); state[x + 20] = xor3(state[x + 20], D[x], C[x]); #else D[x].uint64 = rotl(C[mod5[x + 2]], 1).uint64 ^ C[x].uint64; state[x].uint64 = state[x].uint64 ^ D[x].uint64; state[x + 5].uint64 = state[x + 5].uint64 ^ D[x].uint64; state[x + 10].uint64 = state[x + 10].uint64 ^ D[x].uint64; state[x + 15].uint64 = state[x + 15].uint64 ^ D[x].uint64; state[x + 20].uint64 = state[x + 20].uint64 ^ D[x].uint64; #endif } C[0] = state[1]; state[1] = rotr(state[6], 20); state[6] = rotl(state[9], 20); state[9] = rotr(state[22], 3); state[22] = rotr(state[14], 25); state[14] = rotl(state[20], 18); state[20] = rotr(state[2], 2); state[2] = rotr(state[12], 21); state[12] = rotl(state[13], 25); state[13] = rotl(state[19], 8); state[19] = rotr(state[23], 8); state[23] = rotr(state[15], 23); state[15] = rotl(state[4], 27); state[4] = rotl(state[24], 14); state[24] = rotl(state[21], 2); state[21] = rotr(state[8], 9); state[8] = rotr(state[16], 19); state[16] = rotr(state[5], 28); state[5] = rotl(state[3], 28); state[3] = rotl(state[18], 21); state[18] = rotl(state[17], 15); state[17] = rotl(state[11], 10); state[11] = rotl(state[7], 6); state[7] = rotl(state[10], 3); state[10] = rotl(C[0], 1); #if __CUDA_ARCH__ >= 350 # pragma unroll #endif for (int x{ 0 }; x < 25; x += 5) { C[0] = state[x]; C[1] = state[x + 1]; C[2] = state[x + 2]; C[3] = state[x + 3]; C[4] = state[x + 4]; state[x] = chi(C[0], C[1], C[2]); state[x + 1] = chi(C[1], C[2], C[3]); state[x + 2] = chi(C[2], C[3], C[4]); state[x + 3] = chi(C[3], C[4], C[0]); state[x + 4] = chi(C[4], C[0], C[1]); } state[0].uint64 = state[0].uint64 ^ Keccak_f1600_RC[i]; } C[1] = xor5(state[0], state[5], state[10], state[15], state[20]); C[2] = xor5(state[1], state[6], state[11], state[16], state[21]); C[3] = xor5(state[2], state[7], state[12], state[17], state[22]); C[4] = xor5(state[3], state[8], state[13], state[18], state[23]); C[0] = xor5(state[4], state[9], state[14], state[19], state[24]); D[0] = rotl(C[2], 1); D[1] = rotl(C[3], 1); D[2] = rotl(C[4], 1); state[0] = xor3(state[0], D[0], C[0]); state[6] = xor3(state[6], D[1], C[1]); state[12] = xor3(state[12], D[2], C[2]); state[6] = rotr(state[6], 20); state[12] = rotr(state[12], 21); state[0].uint64 = chi(state[0], state[6], state[12]).uint64 ^ Keccak_f1600_RC[23]; if (bswap_64(state[0]).uint64 <= d_target[0]) // LTE is allowed because d_target is high 64 bits of uint256 (let CPU do the verification) { (*solutionCount)++; if ((*solutionCount) < MAX_SOLUTION_COUNT_DEVICE) solutions[(*solutionCount) - 1] = nonce.uint64; } } // -------------------------------------------------------------------- // CudaSolver // -------------------------------------------------------------------- namespace CUDASolver { void CudaSolver::pushMessage(std::unique_ptr<Device> &device) { cudaMemcpyToSymbol(d_midstate, &device->currentMidstate, SPONGE_LENGTH, 0, cudaMemcpyHostToDevice); device->isNewMessage = false; } void CudaSolver::pushTarget(std::unique_ptr<Device> &device) { cudaMemcpyToSymbol(d_target, &device->currentHigh64Target, UINT64_LENGTH, 0, cudaMemcpyHostToDevice); device->isNewTarget = false; } void CudaSolver::findSolution(int const deviceID) { std::string errorMessage; auto& device = *std::find_if(m_devices.begin(), m_devices.end(), [&](std::unique_ptr<Device>& device) { return device->deviceID == deviceID; }); if (!device->initialized) return; while (!(device->isNewTarget || device->isNewMessage)) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); } errorMessage = CudaSafeCall(cudaSetDevice(device->deviceID)); if (!errorMessage.empty()) onMessage(device->deviceID, "Error", errorMessage); char *c_currentChallenge = (char *)malloc(s_challenge.size()); #ifdef __linux__ strcpy(c_currentChallenge, s_challenge.c_str()); #else strcpy_s(c_currentChallenge, s_challenge.size() + 1, s_challenge.c_str()); #endif onMessage(device->deviceID, "Info", "Start mining..."); onMessage(device->deviceID, "Debug", "Threads: " + std::to_string(device->threads()) + " Grid size: " + std::to_string(device->grid().x) + " Block size:" + std::to_string(device->block().x)); device->mining = true; device->hashCount.store(0ull); device->hashStartTime = std::chrono::steady_clock::now() - std::chrono::milliseconds(1000); // reduce excessive high hashrate reporting at start do { while (m_pause) { device->hashCount.store(0ull); device->hashStartTime = std::chrono::steady_clock::now(); std::this_thread::sleep_for(std::chrono::milliseconds(500)); } checkInputs(device, c_currentChallenge); hashMidstate<<<device->grid(), device->block()>>>(device->d_Solutions, device->d_SolutionCount, getNextWorkPosition(device)); errorMessage = CudaSyncAndCheckError(); if (!errorMessage.empty()) { onMessage(device->deviceID, "Error", "Kernel launch failed: " + errorMessage); device->mining = false; break; } if (*device->h_SolutionCount > 0u) { std::set<uint64_t> uniqueSolutions; for (uint32_t i{ 0u }; i < MAX_SOLUTION_COUNT_DEVICE && i < *device->h_SolutionCount; ++i) { uint64_t const tempSolution{ device->h_Solutions[i] }; if (tempSolution != 0u && uniqueSolutions.find(tempSolution) == uniqueSolutions.end()) uniqueSolutions.emplace(tempSolution); } std::thread t{ &CudaSolver::submitSolutions, this, uniqueSolutions, std::string{ c_currentChallenge }, device->deviceID }; t.detach(); std::memset(device->h_SolutionCount, 0u, UINT32_LENGTH); } } while (device->mining); onMessage(device->deviceID, "Info", "Stop mining..."); device->hashCount.store(0ull); errorMessage = CudaSafeCall(cudaFreeHost(device->h_SolutionCount)); if (!errorMessage.empty()) onMessage(device->deviceID, "Error", errorMessage); errorMessage = CudaSafeCall(cudaFreeHost(device->h_Solutions)); if (!errorMessage.empty()) onMessage(device->deviceID, "Error", errorMessage); errorMessage = CudaSafeCall(cudaDeviceReset()); if (!errorMessage.empty()) onMessage(device->deviceID, "Error", errorMessage); device->initialized = false; onMessage(device->deviceID, "Info", "Mining stopped."); } }
matrix_mul_fp32_simt_gemv_batched_strided_1x64x32_1x2x4.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_gemv_batched_strided_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl" using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 32>; using ThreadShape = cutlass::gemm::GemmShape<1, 2, 4>; using GemvKernel = cutlass::gemm::kernel::DefaultGemv< ThreadBlockShape, ThreadShape, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor>; template void megdnn::cuda::cutlass_wrapper:: cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>( BatchedGemmCoord const& problem_size, const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a, const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b, typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c, hipStream_t stream); #pragma GCC diagnostic pop #endif
matrix_mul_fp32_simt_gemv_batched_strided_1x64x32_1x2x4.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_gemv_batched_strided_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl" using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 32>; using ThreadShape = cutlass::gemm::GemmShape<1, 2, 4>; using GemvKernel = cutlass::gemm::kernel::DefaultGemv< ThreadBlockShape, ThreadShape, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor>; template void megdnn::cuda::cutlass_wrapper:: cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>( BatchedGemmCoord const& problem_size, const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a, const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b, typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c, cudaStream_t stream); #pragma GCC diagnostic pop #endif
e33be5d19d495c07ddd7661002bf57a87ee96a56.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <time.h> #include <limits.h> #include "mex.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "hip/driver_types.h" #define PI acos(-1) #define BLOCK_X 16 #define BLOCK_Y 16 /*M value for Linear Congruential Generator (LCG); use GCC's value*/ long M = INT_MAX; /*A value for LCG*/ int A = 1103515245; /*C value for LCG*/ int C = 12345; const int threads_per_block = 128; /***************************** *GET_TIME *returns a long int representing the time *****************************/ long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } /* Returns the number of seconds elapsed between the two specified times */ float elapsed_time(long long start_time, long long end_time) { return (float) (end_time - start_time) / (1000 * 1000); } /***************************** * CHECK_ERROR * Checks for CUDA errors and prints them to the screen to help with * debugging of CUDA related programming *****************************/ void check_error(hipError_t e) { if (e != hipSuccess) { printf("\nCUDA error: %s\n", hipGetErrorString(e)); exit(1); } } __device__ int d_findIndexSeq(float * CDF, int lengthCDF, float value) { int index = -1; int x; for(x = 0; x < lengthCDF; x++) { if(CDF[x] >= value) { index = x; break; } } if(index == -1) return lengthCDF-1; return index; } __device__ int d_findIndexBin(float * CDF, int beginIndex, int endIndex, float value) { if(endIndex < beginIndex) return -1; int middleIndex; while(endIndex > beginIndex) { middleIndex = beginIndex + ((endIndex-beginIndex)/2); if(CDF[middleIndex] >= value) { if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(CDF[middleIndex] == value && middleIndex >= 0) middleIndex--; middleIndex++; return middleIndex; } } if(CDF[middleIndex] > value) endIndex = middleIndex-1; else beginIndex = middleIndex+1; } return -1; } /***************************** * CUDA Kernel Function to replace FindIndex * param1: arrayX * param2: arrayY * param3: CDF * param4: u * param5: xj * param6: yj * param7: Nparticles *****************************/ __global__ void kernel(float * arrayX, float * arrayY, float * CDF, float * u, float * xj, float * yj, int Nparticles){ int block_id = blockIdx.x;// + gridDim.x * blockIdx.y; int i = blockDim.x * block_id + threadIdx.x; if(i < Nparticles){ int index = d_findIndexSeq(CDF, Nparticles, u[i]); if(index == -1){ index = Nparticles-1; } xj[i] = arrayX[index]; yj[i] = arrayY[index]; } } /***************************** *ROUND *takes in a double and returns an integer that approximates to that double *if the mantissa < .5 => return value < input value *else return value > input value *****************************/ double roundDouble(double value){ int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } /****************************** * SETIF * set values of the 3D array to a newValue if that value is equal to the testValue * param1: value to test * param2: 3D array * param3: dim X * param4: dim Y * param5: dim Z ******************************/ void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue) array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue; } } } } /***************************** * RANDU * GENERATES A UNIFORM DISTRIBUTION * returns a double representing a randomily generated number from a uniform distribution with range [0, 1) ******************************/ double randu(int * seed, int index) { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } /****************************** * RANDN * GENERATES A NORMAL DISTRIBUTION * returns a double representing random number generated using Irwin-Hall distribution method * see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution ******************************/ double randn(int * seed, int index){ /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } /****************************** * ADDNOISE * sets values of 3D matrix using randomly generated numbers from a normal distribution * param matrix ******************************/ void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0)); } } } } /****************************** * STRELDISK * param: pointer to the disk to be made * creates a 9x9 matrix representing the disk ******************************/ void strelDisk(int * disk, int radius) { int diameter = radius*2 - 1; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2)); if(distance < radius) disk[x*diameter + y] = 1; } } } /****************************** * DILATE_MATRIX * param1: matrix to be dilated * param2: current x position * param3: current y position * param4: current z position * param5: x length * param6: y length * param7: z length * param8: error radius *******************************/ void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) { int startX = posX - error; while(startX < 0) startX++; int startY = posY - error; while(startY < 0) startY++; /*int startZ = posZ - error; while(startZ < 0) startZ++;*/ int endX = posX + error; while(endX > dimX) endX--; int endY = posY + error; while(endY > dimY) endY--; /*int endZ = posZ + error; while(endZ > dimZ) endZ--;*/ int x,y; for(x = startX; x < endX; x++){ for(y = startY; y < endY; y++){ double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) ); if(distance < error) matrix[x*dimY*dimZ + y*dimZ + posZ] = 1; } } } /****************************** * IMDILATE_DISK * param1: target 3d matrix * param2: dimX * param3: dimY * param4: dimZ * param5: radius * param6: error * returns the dilated matrix * dilates the target matrix using the radius as a guide ******************************/ void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix) { int x, y, z; for(z = 0; z < dimZ; z++){ for(x = 0; x < dimX; x++){ for(y = 0; y < dimY; y++){ if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){ dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error); } } } } } /***************************** * GET NEIGHBORS * returns a 2D array describing the offets * param 1 strel object * param 2 dimX of object * param 3 dimY of object *******************************/ void getneighbors(int * se, int numOnes, double * neighbors, int radius){ int x, y; int neighY = 0; int center = radius - 1; int diameter = radius*2 -1; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(se[x*diameter + y]){ neighbors[neighY*2] = (int)(y - center); neighbors[neighY*2 + 1] = (int)(x - center); neighY++; } } } } /****************************** * VIDEO SEQUENCE * the synthetic video sequence we will work with here is composed of a * single moving object, circular in shape (fixed radius) * The motion here is a linear motion * the foreground intensity and the backgrounf intensity is known * the image is corrupted with zero mean Gaussian noise *******************************/ void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){ int k; int max_size = IszX*IszY*Nfr; /*get object centers*/ int x0 = (int)roundDouble(IszY/2.0); int y0 = (int)roundDouble(IszX/2.0); I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1; /*move point*/ int xk, yk, pos; for(k = 1; k < Nfr; k++){ xk = abs(x0 + (k-1)); yk = abs(y0 - 2*(k-1)); pos = yk * IszY * Nfr + xk *Nfr + k; if(pos >= max_size) pos = 0; I[pos] = 1; } /*dilate matrix*/ int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(k = 0; k < Nfr; k++){ I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k]; } } } free(newMatrix); /*define background, add noise*/ setIf(0, 100, I, &IszX, &IszY, &Nfr); setIf(1, 228, I, &IszX, &IszY, &Nfr); /*add noise*/ addNoise(I, &IszX, &IszY, &Nfr, seed); } /******************************** * CALC LIKELIHOOD SUM * DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100 * param 1 I 3D matrix * param 2 current ind array * param 3 length of ind array * returns a double representing the sum ********************************/ double calcLikelihoodSum(int * I, int * ind, int numOnes){ double likelihoodSum = 0.0; int y; for(y = 0; y < numOnes; y++) likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0; return likelihoodSum; } /****************************** * FIND INDEX * FINDS THE FIRST OCCURRENCE OF AN ELEMENT IN CDF GREATER THAN THE PROVIDED VALUE AND RETURNS THAT INDEX * param1 CDF * param2 length of CDF * param3 value *******************************/ int findIndex(double * CDF, int lengthCDF, double value){ int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } int findIndexBin(double * CDF, int beginIndex, int endIndex, double value){ if(endIndex < beginIndex) return -1; int middleIndex = beginIndex + ((endIndex - beginIndex)/2); /*check the value*/ if(CDF[middleIndex] >= value) { /*check that it's good*/ if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(middleIndex > 0 && CDF[middleIndex-1] == value) middleIndex--; return middleIndex; } } if(CDF[middleIndex] > value) return findIndexBin(CDF, beginIndex, middleIndex+1, value); return findIndexBin(CDF, middleIndex-1, endIndex, value); } void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles, double * x_loc, double * y_loc, double xe, double ye){ long long start = get_time(); /*original particle centroid*/ x_loc[0] = xe; y_loc[0] = ye; /*expected object locations, compared to center*/ int radius = 5; int diameter = radius*2 -1; int * disk = (int *)mxCalloc(diameter*diameter, sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)mxCalloc(countOnes*2, sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); /*initial weights are all equal (1/Nparticles)*/ double * weights = (double *)malloc(sizeof(double)*Nparticles); #pragma omp parallel for shared(weights, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); } long long get_weights = get_time(); printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); /*initial likelihood to 0.0*/ double * likelihood = (double *)mxCalloc(Nparticles, sizeof(double)); double * arrayX = (double *)mxCalloc(Nparticles, sizeof(double)); double * arrayY = (double *)mxCalloc(Nparticles, sizeof(double)); double * xj = (double *)mxCalloc(Nparticles, sizeof(double)); double * yj = (double *)mxCalloc(Nparticles, sizeof(double)); double * CDF = (double *)mxCalloc(Nparticles, sizeof(double)); double * u = (double *)mxCalloc(Nparticles, sizeof(double)); //int * ind = (int*)mxCalloc(countOnes*Nparticles, sizeof(int)); mxArray * arguments[4]; mxArray * mxIK = mxCreateDoubleMatrix(IszX, IszY, mxREAL); mxArray * mxObj = mxCreateDoubleMatrix(countOnes, 2, mxREAL); mxArray * mxX = mxCreateDoubleMatrix(1, Nparticles, mxREAL); mxArray * mxY = mxCreateDoubleMatrix(1, Nparticles, mxREAL); double * Ik = (double *)mxCalloc(IszX*IszY, sizeof(double)); mxArray * result = mxCreateDoubleMatrix(1, Nparticles, mxREAL); float * farrayX = (float *)mxCalloc(Nparticles, sizeof(float)); float * farrayY = (float *)mxCalloc(Nparticles, sizeof(float)); float * fxj = (float *)mxCalloc(Nparticles, sizeof(float)); float * fyj = (float *)mxCalloc(Nparticles, sizeof(float)); float * fCDF = (float *)mxCalloc(Nparticles, sizeof(float)); float * fu = (float *)mxCalloc(Nparticles, sizeof(float)); //GPU copies of arrays float * arrayX_GPU; float * arrayY_GPU; float * xj_GPU; float * yj_GPU; float * CDF_GPU; float * u_GPU; //CUDA memory allocation check_error(hipMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles)); check_error(hipMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles)); check_error(hipMalloc((void **) &xj_GPU, sizeof(float)*Nparticles)); check_error(hipMalloc((void **) &yj_GPU, sizeof(float)*Nparticles)); check_error(hipMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles)); check_error(hipMalloc((void **) &u_GPU, sizeof(float)*Nparticles)); #pragma omp parallel for shared(arrayX, arrayY, xe, ye) private(x) for(x = 0; x < Nparticles; x++){ arrayX[x] = xe; arrayY[x] = ye; } int k; printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time())); for(k = 1; k < Nfr; k++){ long long set_arrays = get_time(); /*apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction*/ #pragma omp parallel for shared(arrayX, arrayY, Nparticles, seed) private(x) for(x = 0; x < Nparticles; x++){ arrayX[x] += 1 + 5*randn(seed, x); arrayY[x] += -2 + 2*randn(seed, x); } long long error = get_time(); printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); //get the current image for(x = 0; x < IszX; x++) { for(y = 0; y < IszY; y++) { Ik[x*IszX + y] = (double)I[k*IszX*IszY + x*IszY + y]; } } //copy arguments memcpy(mxGetPr(mxIK), Ik, sizeof(double)*IszX*IszY); memcpy(mxGetPr(mxObj), objxy, sizeof(double)*countOnes); memcpy(mxGetPr(mxX), arrayX, sizeof(double)*Nparticles); memcpy(mxGetPr(mxY), arrayY, sizeof(double)*Nparticles); arguments[0] = mxIK; arguments[1] = mxObj; arguments[2] = mxX; arguments[3] = mxY; mexCallMATLAB(1, &result, 4, arguments, "GetSimpleLikelihood"); memcpy(likelihood, result, sizeof(double)*Nparticles); long long likelihood_time = get_time(); printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); /* update & normalize weights // using equation (63) of Arulampalam Tutorial*/ #pragma omp parallel for shared(Nparticles, weights, likelihood) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = weights[x] * exp(likelihood[x]); } long long exponential = get_time(); printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; #pragma omp parallel for private(x) reduction(+:sumWeights) for(x = 0; x < Nparticles; x++){ sumWeights += weights[x]; } long long sum_time = get_time(); printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); #pragma omp parallel for shared(sumWeights, weights) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = weights[x]/sumWeights; } long long normalize = get_time(); printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; /* estimate the object location by expected values*/ #pragma omp parallel for private(x) reduction(+:xe, ye) for(x = 0; x < Nparticles; x++){ xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; } long long move_time = get_time(); printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("XE: %lf\n", xe); printf("YE: %lf\n", ye); x_loc[k] = xe; y_loc[k] = ye; double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); /*display(hold off for now) //pause(hold off for now) //resampling*/ CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); #pragma omp parallel for shared(u, u1, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ u[x] = u1 + x/((double)(Nparticles)); } long long u_time = get_time(); printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); for(x = 0; x < Nparticles; x++) { farrayX[x] = (float)arrayX[x]; farrayY[x] = (float)arrayY[x]; fxj[x] = (float)xj[x]; fyj[x] = (float)yj[x]; fCDF[x] = (float)CDF[x]; fu[x] = (float)u[x]; } long long start_copy = get_time(); //CUDA memory copying from CPU memory to GPU memory hipMemcpy(arrayX_GPU, farrayX, sizeof(float)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(arrayY_GPU, farrayY, sizeof(float)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(xj_GPU, fxj, sizeof(float)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(yj_GPU, fyj, sizeof(float)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(CDF_GPU, fCDF, sizeof(float)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(u_GPU, fu, sizeof(float)*Nparticles, hipMemcpyHostToDevice); long long end_copy = get_time(); //Set number of threads int num_blocks = ceil((double) Nparticles/(double) threads_per_block); //KERNEL FUNCTION CALL hipLaunchKernelGGL(( kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles); long long start_copy_back = get_time(); //CUDA memory copying back from GPU to CPU memory hipMemcpy(fyj, yj_GPU, sizeof(float)*Nparticles, hipMemcpyDeviceToHost); hipMemcpy(fxj, xj_GPU, sizeof(float)*Nparticles, hipMemcpyDeviceToHost); for(x = 0; x < Nparticles; x++) { xj[x] = (double)fxj[x]; yj[x] = (double)fyj[x]; } long long end_copy_back = get_time(); printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy)); printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back)); printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back)); long long xyj_time = get_time(); printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); /*reassign arrayX and arrayY*/ #pragma omp parallel for shared(weights, arrayX, arrayY, xj, yj, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); arrayX[x] = xj[x]; arrayY[x] = yj[x]; } long long reset = get_time(); printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); } mxFree(disk); mxFree(weights); mxFree(objxy); mxFree(likelihood); mxFree(arrayX); mxFree(arrayY); mxFree(CDF); mxFree(u); //mxFree(ind); mxFree(xj); mxFree(yj); mxFree(Ik); mxFree(farrayX); mxFree(farrayY); mxFree(fxj); mxFree(fyj); mxFree(fCDF); mxFree(fu); //CUDA freeing of memory hipFree(u_GPU); hipFree(CDF_GPU); hipFree(yj_GPU); hipFree(xj_GPU); hipFree(arrayY_GPU); hipFree(arrayX_GPU); } void particleFilter1F(int * I, int IszX, int IszY, int * seed, int Nparticles, double * x_loc, double * y_loc, double prevX, double prevY){ long long start = get_time(); /*original particle centroid*/ double xe = prevX; double ye = prevY; /*expected object locations, compared to center*/ int radius = 5; int diameter = radius*2 -1; int * disk = (int *)mxCalloc(diameter*diameter, sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)mxCalloc(countOnes*2, sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); /*initial weights are all equal (1/Nparticles)*/ double * weights = (double *)malloc(sizeof(double)*Nparticles); #pragma omp parallel for shared(weights, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); } long long get_weights = get_time(); printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); /*initial likelihood to 0.0*/ double * likelihood = (double *)mxCalloc(Nparticles, sizeof(double)); double * arrayX = (double *)mxCalloc(Nparticles, sizeof(double)); double * arrayY = (double *)mxCalloc(Nparticles, sizeof(double)); double * xj = (double *)mxCalloc(Nparticles, sizeof(double)); double * yj = (double *)mxCalloc(Nparticles, sizeof(double)); double * CDF = (double *)mxCalloc(Nparticles, sizeof(double)); double * u = (double *)mxCalloc(Nparticles, sizeof(double)); //int * ind = (int*)mxCalloc(countOnes*Nparticles, sizeof(int)); float * farrayX = (float *)mxCalloc(Nparticles, sizeof(float)); float * farrayY = (float *)mxCalloc(Nparticles, sizeof(float)); float * fxj = (float *)mxCalloc(Nparticles, sizeof(float)); float * fyj = (float *)mxCalloc(Nparticles, sizeof(float)); float * fCDF = (float *)mxCalloc(Nparticles, sizeof(float)); float * fu = (float *)mxCalloc(Nparticles, sizeof(float)); //GPU copies of arrays float * arrayX_GPU; float * arrayY_GPU; float * xj_GPU; float * yj_GPU; float * CDF_GPU; float * u_GPU; //CUDA memory allocation check_error(hipMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles)); check_error(hipMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles)); check_error(hipMalloc((void **) &xj_GPU, sizeof(float)*Nparticles)); check_error(hipMalloc((void **) &yj_GPU, sizeof(float)*Nparticles)); check_error(hipMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles)); check_error(hipMalloc((void **) &u_GPU, sizeof(float)*Nparticles)); mxArray * arguments[4]; mxArray * mxIK = mxCreateDoubleMatrix(IszX, IszY, mxREAL); mxArray * mxObj = mxCreateDoubleMatrix(countOnes, 2, mxREAL); mxArray * mxX = mxCreateDoubleMatrix(1, Nparticles, mxREAL); mxArray * mxY = mxCreateDoubleMatrix(1, Nparticles, mxREAL); double * Ik = (double *)mxCalloc(IszX*IszY, sizeof(double)); mxArray * result = mxCreateDoubleMatrix(1, Nparticles, mxREAL); #pragma omp parallel for shared(arrayX, arrayY, xe, ye) private(x) for(x = 0; x < Nparticles; x++){ arrayX[x] = xe; arrayY[x] = ye; } printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time())); long long set_arrays = get_time(); /*apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction*/ #pragma omp parallel for shared(arrayX, arrayY, Nparticles, seed) private(x) for(x = 0; x < Nparticles; x++){ arrayX[x] += 1 + 5*randn(seed, x); arrayY[x] += -2 + 2*randn(seed, x); } long long error = get_time(); printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); /*particle filter likelihood*/ #pragma omp parallel for shared(likelihood, I, arrayX, arrayY, objxy, ind) private(x, y, indX, indY) //get the current image for(x = 0; x < IszX; x++) { for(y = 0; y < IszY; y++) { Ik[x*IszX + y] = (double)I[x*IszY + y]; } } //copy arguments memcpy(mxGetPr(mxIK), Ik, sizeof(double)*IszX*IszY); memcpy(mxGetPr(mxObj), objxy, sizeof(double)*countOnes); memcpy(mxGetPr(mxX), arrayX, sizeof(double)*Nparticles); memcpy(mxGetPr(mxY), arrayY, sizeof(double)*Nparticles); arguments[0] = mxIK; arguments[1] = mxObj; arguments[2] = mxX; arguments[3] = mxY; mexCallMATLAB(1, &result, 4, arguments, "GetSimpleLikelihood"); memcpy(likelihood, result, sizeof(double)*Nparticles); long long likelihood_time = get_time(); printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); /* update & normalize weights // using equation (63) of Arulampalam Tutorial*/ #pragma omp parallel for shared(Nparticles, weights, likelihood) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = weights[x] * exp(likelihood[x]); } long long exponential = get_time(); printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; #pragma omp parallel for private(x) reduction(+:sumWeights) for(x = 0; x < Nparticles; x++){ sumWeights += weights[x]; } long long sum_time = get_time(); printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); #pragma omp parallel for shared(sumWeights, weights) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = weights[x]/sumWeights; } long long normalize = get_time(); printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; /* estimate the object location by expected values*/ #pragma omp parallel for private(x) reduction(+:xe, ye) for(x = 0; x < Nparticles; x++){ xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; /*printf("POSX[%d]: %lf \t WGT[%d]: %lf\n", x, arrayX[x], x, weights[x]); printf("POSY[%d]: %lf \t WGT[%d]: %lf\n", x, arrayY[x], x, weights[x]);*/ } long long move_time = get_time(); printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("XE: %lf\n", xe); printf("YE: %lf\n", ye); x_loc[0] = xe+.5; y_loc[0] = ye+.5; double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); /*display(hold off for now) //pause(hold off for now) //resampling*/ CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); #pragma omp parallel for shared(u, u1, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ u[x] = u1 + x/((double)(Nparticles)); } long long u_time = get_time(); printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); for(x = 0; x < Nparticles; x++) { farrayX[x] = (float)arrayX[x]; farrayY[x] = (float)arrayY[x]; fxj[x] = (float)xj[x]; fyj[x] = (float)yj[x]; fCDF[x] = (float)CDF[x]; fu[x] = (float)u[x]; } long long start_copy = get_time(); //CUDA memory copying from CPU memory to GPU memory hipMemcpy(arrayX_GPU, farrayX, sizeof(float)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(arrayY_GPU, farrayY, sizeof(float)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(xj_GPU, fxj, sizeof(float)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(yj_GPU, fyj, sizeof(float)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(CDF_GPU, fCDF, sizeof(float)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(u_GPU, fu, sizeof(float)*Nparticles, hipMemcpyHostToDevice); long long end_copy = get_time(); //Set number of threads int num_blocks = ceil((double) Nparticles/(double) threads_per_block); //KERNEL FUNCTION CALL hipLaunchKernelGGL(( kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles); long long start_copy_back = get_time(); //CUDA memory copying back from GPU to CPU memory hipMemcpy(fyj, yj_GPU, sizeof(float)*Nparticles, hipMemcpyDeviceToHost); hipMemcpy(fxj, xj_GPU, sizeof(float)*Nparticles, hipMemcpyDeviceToHost); for(x = 0; x < Nparticles; x++) { xj[x] = (double)fxj[x]; yj[x] = (double)fyj[x]; } long long end_copy_back = get_time(); printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy)); printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back)); printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back)); long long xyj_time = get_time(); printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); /*reassign arrayX and arrayY*/ #pragma omp parallel for shared(weights, arrayX, arrayY, xj, yj, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); arrayX[x] = xj[x]; arrayY[x] = yj[x]; } long long reset = get_time(); printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); mxFree(disk); mxFree(weights); mxFree(objxy); mxFree(likelihood); mxFree(arrayX); mxFree(arrayY); mxFree(CDF); mxFree(u); //mxFree(ind); mxFree(xj); mxFree(yj); mxFree(Ik); mxFree(farrayX); mxFree(farrayY); mxFree(fxj); mxFree(fyj); mxFree(fCDF); mxFree(fu); //CUDA freeing of memory hipFree(u_GPU); hipFree(CDF_GPU); hipFree(yj_GPU); hipFree(xj_GPU); hipFree(arrayY_GPU); hipFree(arrayX_GPU); } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){ int * I; int IszX, IszY, Nfr, Nparticles; if(nrhs < 6) { printf("ERROR: TOO FEW ARGS HAVE BEEN ENTERED\n"); printf("EXITING\n"); exit(0); } else if(nrhs == 7) { IszX = (int)(mxGetScalar(prhs[1])); IszY = (int)(mxGetScalar(prhs[2])); Nfr = (int)(mxGetScalar(prhs[3])); Nparticles = (int)(mxGetScalar(prhs[4])); printf("ISZX: %d\n", IszX); printf("ISZY: %d\n", IszY); printf("Nfr: %d\n", Nfr); printf("Nparticles: %d\n", Nparticles); unsigned char * cI = (unsigned char *)mxGetData(prhs[0]); I = (int *)mxCalloc(IszX*IszY*Nfr, sizeof(int)); int x, y, z; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(z = 0; z < Nfr; z++){ I[x*IszY*Nfr + y*Nfr + z] = (int)cI[x*IszY*Nfr + y*Nfr + z]; } } } double xe = (double)mxGetScalar(prhs[5]); double ye = (double)mxGetScalar(prhs[6]); int * seed = (int *)mxCalloc(Nparticles, sizeof(int)); int i; for(i = 0; i < Nparticles; i++) seed[i] = time(0)*i; double * posX = (double *)mxCalloc(Nfr, sizeof(double)); double * posY = (double *)mxCalloc(Nfr, sizeof(double)); long long start = get_time(); particleFilter(I, IszX, IszY, Nfr, seed, Nparticles, posX, posY, xe, ye); long long end = get_time(); mxFree(I); mxFree(seed); printf("PARTICLE FILTER TOOK %f\n", elapsed_time(start, end)); plhs[0] = mxCreateDoubleMatrix(Nfr, 1, mxREAL); plhs[1] = mxCreateDoubleMatrix(Nfr, 1, mxREAL); double * bufferX = mxGetPr(plhs[0]); double * bufferY = mxGetPr(plhs[1]); for(i = 0; i < Nfr; i++) { bufferX[i] = posX[i]; bufferY[i] = posY[i]; } mxFree(posX); mxFree(posY); } else if(nrhs == 6) { IszX = (int)(mxGetScalar(prhs[1])); IszY = (int)(mxGetScalar(prhs[2])); Nparticles = (int)(mxGetScalar(prhs[3])); printf("ISZX: %d\n", IszX); printf("ISZY: %d\n", IszY); printf("Nparticles: %d\n", Nparticles); double startX = (double)mxGetScalar(prhs[4]); double startY = (double)mxGetScalar(prhs[5]); printf("Starting PosX: %lf\n", startX); printf("Starting PosY: %lf\n", startY); unsigned char * cI = (unsigned char *)mxGetData(prhs[0]); I = (int *)mxCalloc(IszX*IszY, sizeof(int)); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ I[x*IszX + y] = (int)cI[x*IszX + y]; } } int * seed = (int *)mxCalloc(Nparticles, sizeof(int)); int i; for(i = 0; i < Nparticles; i++) seed[i] = time(0)*i; double posX[1]; double posY[1]; long long start = get_time(); particleFilter1F(I, IszX, IszY, seed, Nparticles, posX, posY, startX, startY); long long end = get_time(); mxFree(I); mxFree(seed); printf("PARTICLE FILTER TOOK %f\n", elapsed_time(start, end)); plhs[0] = mxCreateDoubleMatrix(1,1,mxREAL); plhs[1] = mxCreateDoubleMatrix(1,1,mxREAL); double * bufferX = mxGetPr(plhs[0]); double * bufferY = mxGetPr(plhs[1]); bufferX[0] = posX[0]; bufferY[0] = posY[0]; } else { printf("ERROR: TOO MANY ARGS\n"); printf("EXITING\n"); exit(0); } } int main(){ /*3D matrix consisting the picture and the frames*/ int * I; /*dimension X of the picture in pixels*/ int IszX = 128; /*dimension Y of the picture in pixels*/ int IszY = 128; /*number of frames*/ int Nfr = 10; /*define number of particles*/ int Nparticles = 100000; /*establish seed*/ int * seed = (int *)malloc(sizeof(int)*Nparticles); int i; for(i = 0; i < Nparticles; i++) seed[i] = time(0)*i; /*malloc matrix*/ I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); long long start = get_time(); /*call video sequence*/ videoSequence(I, IszX, IszY, Nfr, seed); long long endVideoSequence = get_time(); printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence)); double * posX = (double *)mxCalloc(Nfr, sizeof(double)); double * posY = (double *)mxCalloc(Nfr, sizeof(double)); double xe = IszX/2.0; double ye = IszY/2.0; /*call particle filter*/ particleFilter(I, IszX, IszY, Nfr, seed, Nparticles, posX, posY, xe, ye); free(I); free(seed); long long endParticleFilter = get_time(); printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter)); printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter)); return 0; }
e33be5d19d495c07ddd7661002bf57a87ee96a56.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <time.h> #include <limits.h> #include "mex.h" #include "cuda.h" #include "cuda_runtime.h" #include "driver_types.h" #define PI acos(-1) #define BLOCK_X 16 #define BLOCK_Y 16 /*M value for Linear Congruential Generator (LCG); use GCC's value*/ long M = INT_MAX; /*A value for LCG*/ int A = 1103515245; /*C value for LCG*/ int C = 12345; const int threads_per_block = 128; /***************************** *GET_TIME *returns a long int representing the time *****************************/ long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } /* Returns the number of seconds elapsed between the two specified times */ float elapsed_time(long long start_time, long long end_time) { return (float) (end_time - start_time) / (1000 * 1000); } /***************************** * CHECK_ERROR * Checks for CUDA errors and prints them to the screen to help with * debugging of CUDA related programming *****************************/ void check_error(cudaError e) { if (e != cudaSuccess) { printf("\nCUDA error: %s\n", cudaGetErrorString(e)); exit(1); } } __device__ int d_findIndexSeq(float * CDF, int lengthCDF, float value) { int index = -1; int x; for(x = 0; x < lengthCDF; x++) { if(CDF[x] >= value) { index = x; break; } } if(index == -1) return lengthCDF-1; return index; } __device__ int d_findIndexBin(float * CDF, int beginIndex, int endIndex, float value) { if(endIndex < beginIndex) return -1; int middleIndex; while(endIndex > beginIndex) { middleIndex = beginIndex + ((endIndex-beginIndex)/2); if(CDF[middleIndex] >= value) { if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(CDF[middleIndex] == value && middleIndex >= 0) middleIndex--; middleIndex++; return middleIndex; } } if(CDF[middleIndex] > value) endIndex = middleIndex-1; else beginIndex = middleIndex+1; } return -1; } /***************************** * CUDA Kernel Function to replace FindIndex * param1: arrayX * param2: arrayY * param3: CDF * param4: u * param5: xj * param6: yj * param7: Nparticles *****************************/ __global__ void kernel(float * arrayX, float * arrayY, float * CDF, float * u, float * xj, float * yj, int Nparticles){ int block_id = blockIdx.x;// + gridDim.x * blockIdx.y; int i = blockDim.x * block_id + threadIdx.x; if(i < Nparticles){ int index = d_findIndexSeq(CDF, Nparticles, u[i]); if(index == -1){ index = Nparticles-1; } xj[i] = arrayX[index]; yj[i] = arrayY[index]; } } /***************************** *ROUND *takes in a double and returns an integer that approximates to that double *if the mantissa < .5 => return value < input value *else return value > input value *****************************/ double roundDouble(double value){ int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } /****************************** * SETIF * set values of the 3D array to a newValue if that value is equal to the testValue * param1: value to test * param2: 3D array * param3: dim X * param4: dim Y * param5: dim Z ******************************/ void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue) array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue; } } } } /***************************** * RANDU * GENERATES A UNIFORM DISTRIBUTION * returns a double representing a randomily generated number from a uniform distribution with range [0, 1) ******************************/ double randu(int * seed, int index) { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } /****************************** * RANDN * GENERATES A NORMAL DISTRIBUTION * returns a double representing random number generated using Irwin-Hall distribution method * see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution ******************************/ double randn(int * seed, int index){ /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } /****************************** * ADDNOISE * sets values of 3D matrix using randomly generated numbers from a normal distribution * param matrix ******************************/ void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0)); } } } } /****************************** * STRELDISK * param: pointer to the disk to be made * creates a 9x9 matrix representing the disk ******************************/ void strelDisk(int * disk, int radius) { int diameter = radius*2 - 1; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2)); if(distance < radius) disk[x*diameter + y] = 1; } } } /****************************** * DILATE_MATRIX * param1: matrix to be dilated * param2: current x position * param3: current y position * param4: current z position * param5: x length * param6: y length * param7: z length * param8: error radius *******************************/ void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) { int startX = posX - error; while(startX < 0) startX++; int startY = posY - error; while(startY < 0) startY++; /*int startZ = posZ - error; while(startZ < 0) startZ++;*/ int endX = posX + error; while(endX > dimX) endX--; int endY = posY + error; while(endY > dimY) endY--; /*int endZ = posZ + error; while(endZ > dimZ) endZ--;*/ int x,y; for(x = startX; x < endX; x++){ for(y = startY; y < endY; y++){ double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) ); if(distance < error) matrix[x*dimY*dimZ + y*dimZ + posZ] = 1; } } } /****************************** * IMDILATE_DISK * param1: target 3d matrix * param2: dimX * param3: dimY * param4: dimZ * param5: radius * param6: error * returns the dilated matrix * dilates the target matrix using the radius as a guide ******************************/ void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix) { int x, y, z; for(z = 0; z < dimZ; z++){ for(x = 0; x < dimX; x++){ for(y = 0; y < dimY; y++){ if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){ dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error); } } } } } /***************************** * GET NEIGHBORS * returns a 2D array describing the offets * param 1 strel object * param 2 dimX of object * param 3 dimY of object *******************************/ void getneighbors(int * se, int numOnes, double * neighbors, int radius){ int x, y; int neighY = 0; int center = radius - 1; int diameter = radius*2 -1; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(se[x*diameter + y]){ neighbors[neighY*2] = (int)(y - center); neighbors[neighY*2 + 1] = (int)(x - center); neighY++; } } } } /****************************** * VIDEO SEQUENCE * the synthetic video sequence we will work with here is composed of a * single moving object, circular in shape (fixed radius) * The motion here is a linear motion * the foreground intensity and the backgrounf intensity is known * the image is corrupted with zero mean Gaussian noise *******************************/ void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){ int k; int max_size = IszX*IszY*Nfr; /*get object centers*/ int x0 = (int)roundDouble(IszY/2.0); int y0 = (int)roundDouble(IszX/2.0); I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1; /*move point*/ int xk, yk, pos; for(k = 1; k < Nfr; k++){ xk = abs(x0 + (k-1)); yk = abs(y0 - 2*(k-1)); pos = yk * IszY * Nfr + xk *Nfr + k; if(pos >= max_size) pos = 0; I[pos] = 1; } /*dilate matrix*/ int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(k = 0; k < Nfr; k++){ I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k]; } } } free(newMatrix); /*define background, add noise*/ setIf(0, 100, I, &IszX, &IszY, &Nfr); setIf(1, 228, I, &IszX, &IszY, &Nfr); /*add noise*/ addNoise(I, &IszX, &IszY, &Nfr, seed); } /******************************** * CALC LIKELIHOOD SUM * DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100 * param 1 I 3D matrix * param 2 current ind array * param 3 length of ind array * returns a double representing the sum ********************************/ double calcLikelihoodSum(int * I, int * ind, int numOnes){ double likelihoodSum = 0.0; int y; for(y = 0; y < numOnes; y++) likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0; return likelihoodSum; } /****************************** * FIND INDEX * FINDS THE FIRST OCCURRENCE OF AN ELEMENT IN CDF GREATER THAN THE PROVIDED VALUE AND RETURNS THAT INDEX * param1 CDF * param2 length of CDF * param3 value *******************************/ int findIndex(double * CDF, int lengthCDF, double value){ int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } int findIndexBin(double * CDF, int beginIndex, int endIndex, double value){ if(endIndex < beginIndex) return -1; int middleIndex = beginIndex + ((endIndex - beginIndex)/2); /*check the value*/ if(CDF[middleIndex] >= value) { /*check that it's good*/ if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(middleIndex > 0 && CDF[middleIndex-1] == value) middleIndex--; return middleIndex; } } if(CDF[middleIndex] > value) return findIndexBin(CDF, beginIndex, middleIndex+1, value); return findIndexBin(CDF, middleIndex-1, endIndex, value); } void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles, double * x_loc, double * y_loc, double xe, double ye){ long long start = get_time(); /*original particle centroid*/ x_loc[0] = xe; y_loc[0] = ye; /*expected object locations, compared to center*/ int radius = 5; int diameter = radius*2 -1; int * disk = (int *)mxCalloc(diameter*diameter, sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)mxCalloc(countOnes*2, sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); /*initial weights are all equal (1/Nparticles)*/ double * weights = (double *)malloc(sizeof(double)*Nparticles); #pragma omp parallel for shared(weights, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); } long long get_weights = get_time(); printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); /*initial likelihood to 0.0*/ double * likelihood = (double *)mxCalloc(Nparticles, sizeof(double)); double * arrayX = (double *)mxCalloc(Nparticles, sizeof(double)); double * arrayY = (double *)mxCalloc(Nparticles, sizeof(double)); double * xj = (double *)mxCalloc(Nparticles, sizeof(double)); double * yj = (double *)mxCalloc(Nparticles, sizeof(double)); double * CDF = (double *)mxCalloc(Nparticles, sizeof(double)); double * u = (double *)mxCalloc(Nparticles, sizeof(double)); //int * ind = (int*)mxCalloc(countOnes*Nparticles, sizeof(int)); mxArray * arguments[4]; mxArray * mxIK = mxCreateDoubleMatrix(IszX, IszY, mxREAL); mxArray * mxObj = mxCreateDoubleMatrix(countOnes, 2, mxREAL); mxArray * mxX = mxCreateDoubleMatrix(1, Nparticles, mxREAL); mxArray * mxY = mxCreateDoubleMatrix(1, Nparticles, mxREAL); double * Ik = (double *)mxCalloc(IszX*IszY, sizeof(double)); mxArray * result = mxCreateDoubleMatrix(1, Nparticles, mxREAL); float * farrayX = (float *)mxCalloc(Nparticles, sizeof(float)); float * farrayY = (float *)mxCalloc(Nparticles, sizeof(float)); float * fxj = (float *)mxCalloc(Nparticles, sizeof(float)); float * fyj = (float *)mxCalloc(Nparticles, sizeof(float)); float * fCDF = (float *)mxCalloc(Nparticles, sizeof(float)); float * fu = (float *)mxCalloc(Nparticles, sizeof(float)); //GPU copies of arrays float * arrayX_GPU; float * arrayY_GPU; float * xj_GPU; float * yj_GPU; float * CDF_GPU; float * u_GPU; //CUDA memory allocation check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles)); check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles)); check_error(cudaMalloc((void **) &xj_GPU, sizeof(float)*Nparticles)); check_error(cudaMalloc((void **) &yj_GPU, sizeof(float)*Nparticles)); check_error(cudaMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles)); check_error(cudaMalloc((void **) &u_GPU, sizeof(float)*Nparticles)); #pragma omp parallel for shared(arrayX, arrayY, xe, ye) private(x) for(x = 0; x < Nparticles; x++){ arrayX[x] = xe; arrayY[x] = ye; } int k; printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time())); for(k = 1; k < Nfr; k++){ long long set_arrays = get_time(); /*apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction*/ #pragma omp parallel for shared(arrayX, arrayY, Nparticles, seed) private(x) for(x = 0; x < Nparticles; x++){ arrayX[x] += 1 + 5*randn(seed, x); arrayY[x] += -2 + 2*randn(seed, x); } long long error = get_time(); printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); //get the current image for(x = 0; x < IszX; x++) { for(y = 0; y < IszY; y++) { Ik[x*IszX + y] = (double)I[k*IszX*IszY + x*IszY + y]; } } //copy arguments memcpy(mxGetPr(mxIK), Ik, sizeof(double)*IszX*IszY); memcpy(mxGetPr(mxObj), objxy, sizeof(double)*countOnes); memcpy(mxGetPr(mxX), arrayX, sizeof(double)*Nparticles); memcpy(mxGetPr(mxY), arrayY, sizeof(double)*Nparticles); arguments[0] = mxIK; arguments[1] = mxObj; arguments[2] = mxX; arguments[3] = mxY; mexCallMATLAB(1, &result, 4, arguments, "GetSimpleLikelihood"); memcpy(likelihood, result, sizeof(double)*Nparticles); long long likelihood_time = get_time(); printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); /* update & normalize weights // using equation (63) of Arulampalam Tutorial*/ #pragma omp parallel for shared(Nparticles, weights, likelihood) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = weights[x] * exp(likelihood[x]); } long long exponential = get_time(); printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; #pragma omp parallel for private(x) reduction(+:sumWeights) for(x = 0; x < Nparticles; x++){ sumWeights += weights[x]; } long long sum_time = get_time(); printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); #pragma omp parallel for shared(sumWeights, weights) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = weights[x]/sumWeights; } long long normalize = get_time(); printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; /* estimate the object location by expected values*/ #pragma omp parallel for private(x) reduction(+:xe, ye) for(x = 0; x < Nparticles; x++){ xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; } long long move_time = get_time(); printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("XE: %lf\n", xe); printf("YE: %lf\n", ye); x_loc[k] = xe; y_loc[k] = ye; double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); /*display(hold off for now) //pause(hold off for now) //resampling*/ CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); #pragma omp parallel for shared(u, u1, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ u[x] = u1 + x/((double)(Nparticles)); } long long u_time = get_time(); printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); for(x = 0; x < Nparticles; x++) { farrayX[x] = (float)arrayX[x]; farrayY[x] = (float)arrayY[x]; fxj[x] = (float)xj[x]; fyj[x] = (float)yj[x]; fCDF[x] = (float)CDF[x]; fu[x] = (float)u[x]; } long long start_copy = get_time(); //CUDA memory copying from CPU memory to GPU memory cudaMemcpy(arrayX_GPU, farrayX, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(arrayY_GPU, farrayY, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(xj_GPU, fxj, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(yj_GPU, fyj, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(CDF_GPU, fCDF, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(u_GPU, fu, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); long long end_copy = get_time(); //Set number of threads int num_blocks = ceil((double) Nparticles/(double) threads_per_block); //KERNEL FUNCTION CALL kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles); long long start_copy_back = get_time(); //CUDA memory copying back from GPU to CPU memory cudaMemcpy(fyj, yj_GPU, sizeof(float)*Nparticles, cudaMemcpyDeviceToHost); cudaMemcpy(fxj, xj_GPU, sizeof(float)*Nparticles, cudaMemcpyDeviceToHost); for(x = 0; x < Nparticles; x++) { xj[x] = (double)fxj[x]; yj[x] = (double)fyj[x]; } long long end_copy_back = get_time(); printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy)); printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back)); printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back)); long long xyj_time = get_time(); printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); /*reassign arrayX and arrayY*/ #pragma omp parallel for shared(weights, arrayX, arrayY, xj, yj, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); arrayX[x] = xj[x]; arrayY[x] = yj[x]; } long long reset = get_time(); printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); } mxFree(disk); mxFree(weights); mxFree(objxy); mxFree(likelihood); mxFree(arrayX); mxFree(arrayY); mxFree(CDF); mxFree(u); //mxFree(ind); mxFree(xj); mxFree(yj); mxFree(Ik); mxFree(farrayX); mxFree(farrayY); mxFree(fxj); mxFree(fyj); mxFree(fCDF); mxFree(fu); //CUDA freeing of memory cudaFree(u_GPU); cudaFree(CDF_GPU); cudaFree(yj_GPU); cudaFree(xj_GPU); cudaFree(arrayY_GPU); cudaFree(arrayX_GPU); } void particleFilter1F(int * I, int IszX, int IszY, int * seed, int Nparticles, double * x_loc, double * y_loc, double prevX, double prevY){ long long start = get_time(); /*original particle centroid*/ double xe = prevX; double ye = prevY; /*expected object locations, compared to center*/ int radius = 5; int diameter = radius*2 -1; int * disk = (int *)mxCalloc(diameter*diameter, sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)mxCalloc(countOnes*2, sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); /*initial weights are all equal (1/Nparticles)*/ double * weights = (double *)malloc(sizeof(double)*Nparticles); #pragma omp parallel for shared(weights, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); } long long get_weights = get_time(); printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); /*initial likelihood to 0.0*/ double * likelihood = (double *)mxCalloc(Nparticles, sizeof(double)); double * arrayX = (double *)mxCalloc(Nparticles, sizeof(double)); double * arrayY = (double *)mxCalloc(Nparticles, sizeof(double)); double * xj = (double *)mxCalloc(Nparticles, sizeof(double)); double * yj = (double *)mxCalloc(Nparticles, sizeof(double)); double * CDF = (double *)mxCalloc(Nparticles, sizeof(double)); double * u = (double *)mxCalloc(Nparticles, sizeof(double)); //int * ind = (int*)mxCalloc(countOnes*Nparticles, sizeof(int)); float * farrayX = (float *)mxCalloc(Nparticles, sizeof(float)); float * farrayY = (float *)mxCalloc(Nparticles, sizeof(float)); float * fxj = (float *)mxCalloc(Nparticles, sizeof(float)); float * fyj = (float *)mxCalloc(Nparticles, sizeof(float)); float * fCDF = (float *)mxCalloc(Nparticles, sizeof(float)); float * fu = (float *)mxCalloc(Nparticles, sizeof(float)); //GPU copies of arrays float * arrayX_GPU; float * arrayY_GPU; float * xj_GPU; float * yj_GPU; float * CDF_GPU; float * u_GPU; //CUDA memory allocation check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles)); check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles)); check_error(cudaMalloc((void **) &xj_GPU, sizeof(float)*Nparticles)); check_error(cudaMalloc((void **) &yj_GPU, sizeof(float)*Nparticles)); check_error(cudaMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles)); check_error(cudaMalloc((void **) &u_GPU, sizeof(float)*Nparticles)); mxArray * arguments[4]; mxArray * mxIK = mxCreateDoubleMatrix(IszX, IszY, mxREAL); mxArray * mxObj = mxCreateDoubleMatrix(countOnes, 2, mxREAL); mxArray * mxX = mxCreateDoubleMatrix(1, Nparticles, mxREAL); mxArray * mxY = mxCreateDoubleMatrix(1, Nparticles, mxREAL); double * Ik = (double *)mxCalloc(IszX*IszY, sizeof(double)); mxArray * result = mxCreateDoubleMatrix(1, Nparticles, mxREAL); #pragma omp parallel for shared(arrayX, arrayY, xe, ye) private(x) for(x = 0; x < Nparticles; x++){ arrayX[x] = xe; arrayY[x] = ye; } printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time())); long long set_arrays = get_time(); /*apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction*/ #pragma omp parallel for shared(arrayX, arrayY, Nparticles, seed) private(x) for(x = 0; x < Nparticles; x++){ arrayX[x] += 1 + 5*randn(seed, x); arrayY[x] += -2 + 2*randn(seed, x); } long long error = get_time(); printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); /*particle filter likelihood*/ #pragma omp parallel for shared(likelihood, I, arrayX, arrayY, objxy, ind) private(x, y, indX, indY) //get the current image for(x = 0; x < IszX; x++) { for(y = 0; y < IszY; y++) { Ik[x*IszX + y] = (double)I[x*IszY + y]; } } //copy arguments memcpy(mxGetPr(mxIK), Ik, sizeof(double)*IszX*IszY); memcpy(mxGetPr(mxObj), objxy, sizeof(double)*countOnes); memcpy(mxGetPr(mxX), arrayX, sizeof(double)*Nparticles); memcpy(mxGetPr(mxY), arrayY, sizeof(double)*Nparticles); arguments[0] = mxIK; arguments[1] = mxObj; arguments[2] = mxX; arguments[3] = mxY; mexCallMATLAB(1, &result, 4, arguments, "GetSimpleLikelihood"); memcpy(likelihood, result, sizeof(double)*Nparticles); long long likelihood_time = get_time(); printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); /* update & normalize weights // using equation (63) of Arulampalam Tutorial*/ #pragma omp parallel for shared(Nparticles, weights, likelihood) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = weights[x] * exp(likelihood[x]); } long long exponential = get_time(); printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; #pragma omp parallel for private(x) reduction(+:sumWeights) for(x = 0; x < Nparticles; x++){ sumWeights += weights[x]; } long long sum_time = get_time(); printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); #pragma omp parallel for shared(sumWeights, weights) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = weights[x]/sumWeights; } long long normalize = get_time(); printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; /* estimate the object location by expected values*/ #pragma omp parallel for private(x) reduction(+:xe, ye) for(x = 0; x < Nparticles; x++){ xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; /*printf("POSX[%d]: %lf \t WGT[%d]: %lf\n", x, arrayX[x], x, weights[x]); printf("POSY[%d]: %lf \t WGT[%d]: %lf\n", x, arrayY[x], x, weights[x]);*/ } long long move_time = get_time(); printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("XE: %lf\n", xe); printf("YE: %lf\n", ye); x_loc[0] = xe+.5; y_loc[0] = ye+.5; double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); /*display(hold off for now) //pause(hold off for now) //resampling*/ CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); #pragma omp parallel for shared(u, u1, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ u[x] = u1 + x/((double)(Nparticles)); } long long u_time = get_time(); printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); for(x = 0; x < Nparticles; x++) { farrayX[x] = (float)arrayX[x]; farrayY[x] = (float)arrayY[x]; fxj[x] = (float)xj[x]; fyj[x] = (float)yj[x]; fCDF[x] = (float)CDF[x]; fu[x] = (float)u[x]; } long long start_copy = get_time(); //CUDA memory copying from CPU memory to GPU memory cudaMemcpy(arrayX_GPU, farrayX, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(arrayY_GPU, farrayY, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(xj_GPU, fxj, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(yj_GPU, fyj, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(CDF_GPU, fCDF, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); cudaMemcpy(u_GPU, fu, sizeof(float)*Nparticles, cudaMemcpyHostToDevice); long long end_copy = get_time(); //Set number of threads int num_blocks = ceil((double) Nparticles/(double) threads_per_block); //KERNEL FUNCTION CALL kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles); long long start_copy_back = get_time(); //CUDA memory copying back from GPU to CPU memory cudaMemcpy(fyj, yj_GPU, sizeof(float)*Nparticles, cudaMemcpyDeviceToHost); cudaMemcpy(fxj, xj_GPU, sizeof(float)*Nparticles, cudaMemcpyDeviceToHost); for(x = 0; x < Nparticles; x++) { xj[x] = (double)fxj[x]; yj[x] = (double)fyj[x]; } long long end_copy_back = get_time(); printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy)); printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back)); printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back)); long long xyj_time = get_time(); printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); /*reassign arrayX and arrayY*/ #pragma omp parallel for shared(weights, arrayX, arrayY, xj, yj, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); arrayX[x] = xj[x]; arrayY[x] = yj[x]; } long long reset = get_time(); printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); mxFree(disk); mxFree(weights); mxFree(objxy); mxFree(likelihood); mxFree(arrayX); mxFree(arrayY); mxFree(CDF); mxFree(u); //mxFree(ind); mxFree(xj); mxFree(yj); mxFree(Ik); mxFree(farrayX); mxFree(farrayY); mxFree(fxj); mxFree(fyj); mxFree(fCDF); mxFree(fu); //CUDA freeing of memory cudaFree(u_GPU); cudaFree(CDF_GPU); cudaFree(yj_GPU); cudaFree(xj_GPU); cudaFree(arrayY_GPU); cudaFree(arrayX_GPU); } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){ int * I; int IszX, IszY, Nfr, Nparticles; if(nrhs < 6) { printf("ERROR: TOO FEW ARGS HAVE BEEN ENTERED\n"); printf("EXITING\n"); exit(0); } else if(nrhs == 7) { IszX = (int)(mxGetScalar(prhs[1])); IszY = (int)(mxGetScalar(prhs[2])); Nfr = (int)(mxGetScalar(prhs[3])); Nparticles = (int)(mxGetScalar(prhs[4])); printf("ISZX: %d\n", IszX); printf("ISZY: %d\n", IszY); printf("Nfr: %d\n", Nfr); printf("Nparticles: %d\n", Nparticles); unsigned char * cI = (unsigned char *)mxGetData(prhs[0]); I = (int *)mxCalloc(IszX*IszY*Nfr, sizeof(int)); int x, y, z; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(z = 0; z < Nfr; z++){ I[x*IszY*Nfr + y*Nfr + z] = (int)cI[x*IszY*Nfr + y*Nfr + z]; } } } double xe = (double)mxGetScalar(prhs[5]); double ye = (double)mxGetScalar(prhs[6]); int * seed = (int *)mxCalloc(Nparticles, sizeof(int)); int i; for(i = 0; i < Nparticles; i++) seed[i] = time(0)*i; double * posX = (double *)mxCalloc(Nfr, sizeof(double)); double * posY = (double *)mxCalloc(Nfr, sizeof(double)); long long start = get_time(); particleFilter(I, IszX, IszY, Nfr, seed, Nparticles, posX, posY, xe, ye); long long end = get_time(); mxFree(I); mxFree(seed); printf("PARTICLE FILTER TOOK %f\n", elapsed_time(start, end)); plhs[0] = mxCreateDoubleMatrix(Nfr, 1, mxREAL); plhs[1] = mxCreateDoubleMatrix(Nfr, 1, mxREAL); double * bufferX = mxGetPr(plhs[0]); double * bufferY = mxGetPr(plhs[1]); for(i = 0; i < Nfr; i++) { bufferX[i] = posX[i]; bufferY[i] = posY[i]; } mxFree(posX); mxFree(posY); } else if(nrhs == 6) { IszX = (int)(mxGetScalar(prhs[1])); IszY = (int)(mxGetScalar(prhs[2])); Nparticles = (int)(mxGetScalar(prhs[3])); printf("ISZX: %d\n", IszX); printf("ISZY: %d\n", IszY); printf("Nparticles: %d\n", Nparticles); double startX = (double)mxGetScalar(prhs[4]); double startY = (double)mxGetScalar(prhs[5]); printf("Starting PosX: %lf\n", startX); printf("Starting PosY: %lf\n", startY); unsigned char * cI = (unsigned char *)mxGetData(prhs[0]); I = (int *)mxCalloc(IszX*IszY, sizeof(int)); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ I[x*IszX + y] = (int)cI[x*IszX + y]; } } int * seed = (int *)mxCalloc(Nparticles, sizeof(int)); int i; for(i = 0; i < Nparticles; i++) seed[i] = time(0)*i; double posX[1]; double posY[1]; long long start = get_time(); particleFilter1F(I, IszX, IszY, seed, Nparticles, posX, posY, startX, startY); long long end = get_time(); mxFree(I); mxFree(seed); printf("PARTICLE FILTER TOOK %f\n", elapsed_time(start, end)); plhs[0] = mxCreateDoubleMatrix(1,1,mxREAL); plhs[1] = mxCreateDoubleMatrix(1,1,mxREAL); double * bufferX = mxGetPr(plhs[0]); double * bufferY = mxGetPr(plhs[1]); bufferX[0] = posX[0]; bufferY[0] = posY[0]; } else { printf("ERROR: TOO MANY ARGS\n"); printf("EXITING\n"); exit(0); } } int main(){ /*3D matrix consisting the picture and the frames*/ int * I; /*dimension X of the picture in pixels*/ int IszX = 128; /*dimension Y of the picture in pixels*/ int IszY = 128; /*number of frames*/ int Nfr = 10; /*define number of particles*/ int Nparticles = 100000; /*establish seed*/ int * seed = (int *)malloc(sizeof(int)*Nparticles); int i; for(i = 0; i < Nparticles; i++) seed[i] = time(0)*i; /*malloc matrix*/ I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); long long start = get_time(); /*call video sequence*/ videoSequence(I, IszX, IszY, Nfr, seed); long long endVideoSequence = get_time(); printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence)); double * posX = (double *)mxCalloc(Nfr, sizeof(double)); double * posY = (double *)mxCalloc(Nfr, sizeof(double)); double xe = IszX/2.0; double ye = IszY/2.0; /*call particle filter*/ particleFilter(I, IszX, IszY, Nfr, seed, Nparticles, posX, posY, xe, ye); free(I); free(seed); long long endParticleFilter = get_time(); printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter)); printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter)); return 0; }
d8063319cb146b5978b23a3906b81dbdc1db3284.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ============================================================================= // === GPUQREngine/Include/Kernel/Factorize/factorize_vt.cu ==================== // ============================================================================= // GPUQREngine, Copyright (c) 2013, Timothy A Davis, Sencer Nuri Yeralan, // and Sanjay Ranka. All Rights Reserved. // SPDX-License-Identifier: GPL-2.0+ //------------------------------------------------------------------------------ // Constraints: // MCHUNK = M / BITTYROWS must be an integer // MCHUNK * N must be <= NUMTHREADS #ifdef FACTORIZE // TODO allow EPSILON to be defined by the caller #define EPSILON (1e-90) __device__ void FACTORIZE ( ) { //-------------------------------------------------------------------------- // The bitty block //-------------------------------------------------------------------------- // The matrix A is M by N, T is N by N (if present) // Each thread owns an r-by-1 bitty column of A. If its first entry is // A(ia,ja), and then it owns entries in every mchunk rows after that // [A(ia,ja), A(ia+mchunk,ja), ... ]. // Each column is operated on by mchunk threads. #define MCHUNK (M / BITTYROWS) #define MYBITTYROW(ii) (ii*MCHUNK + (threadIdx.x % MCHUNK)) #define MYBITTYCOL (threadIdx.x / MCHUNK) #define ATHREADS (MCHUNK * N) #define WORKER (ATHREADS == NUMTHREADS || threadIdx.x < ATHREADS) double rbitA [BITTYROWS] ; // bitty block for A double rbitV [BITTYROWS] ; // bitty block for V double sigma ; // only used by thread zero //-------------------------------------------------------------------------- // shared memory usage //-------------------------------------------------------------------------- #define shA shMemory.factorize.A #define shZ shMemory.factorize.Z #define shRdiag shMemory.factorize.A1 #define RSIGMA(i) shMemory.factorize.V1 [i] #ifdef WHOLE_FRONT // T is not computed, and there is no list of tiles #define TAU shMemory.factorize.tau #else // T is computed and saved in the VT tile, work on a set of tiles #define TAU shT [k][k] #define shT shMemory.factorize.T #define shV1 shMemory.factorize.V1 #endif //-------------------------------------------------------------------------- // Grab my task from the queue. //-------------------------------------------------------------------------- int fn = myTask.fn ; #ifdef WHOLE_FRONT int fm = myTask.fm ; int nv = MIN (fm, fn) ; // If nv is a constant, it allows the outer loop to be unrolled: // #define nv N #define j1 0 // The whole front always considers the edge case #ifndef EDGE_CASE #define EDGE_CASE #endif #else int j1 = myTask.extra[4] ; #ifdef EDGE_CASE int fm = myTask.fm ; int nv = MIN (fm, fn) - j1 ; nv = MIN (nv, N) ; nv = MIN (nv, M) ; #else #define nv N #endif double (*glVT)[TILESIZE] = (double (*)[TILESIZE]) myTask.AuxAddress[0] ; #endif #ifdef EDGE_CASE // Check if an entry is inside the front. #define INSIDE(test) (test) #else // The entry is guaranteed to reside inside the frontal matrix. #define INSIDE(test) (1) #endif // bool is_false = (fn < 0) ; #define glA(i,j) (myTask.F[((i)*fn + (j))]) //-------------------------------------------------------------------------- // Load A into shared memory //-------------------------------------------------------------------------- // ACHUNKSIZE must be an integer #define it (threadIdx.x / N) #define jt (threadIdx.x % N) #define ACHUNKSIZE (NUMTHREADS / N) #ifdef WHOLE_FRONT // all threads load the entire front (no tiles). // always check the edge case. // M / ACHUNKSIZE must be an integer. #define NACHUNKS (M / ACHUNKSIZE) for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; shA [i][jt] = (i < fm && jt < fn) ? glA (i, jt) : 0 ; } #else // when all threads work on a tile. // (N*N / NUMTHREADS) does not have to be an integer. With a tile // size of N=32, and NUMTHREADS=384, it isn't. So compute the ceiling, // and handle the clean up by testing i < N below. #define NACHUNKS CEIL (N*N, NUMTHREADS) /* If we're not coming from an apply-factorize, load from F. */ if(IsApplyFactorize == 0) { // Load tiles from the frontal matrix // accounts for 25% of the total time on Kepler, 13% on Fermi for (int t = 0 ; t < ROW_PANELSIZE ; t++) { int rowTile = myTask.extra[t]; if (INSIDE (rowTile != EMPTY)) { /* load the tile of A from global memory */ for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; if (ii < NACHUNKS-1 || i < N) { shA [i + t*TILESIZE][jt] = (INSIDE (i+rowTile < fm) && INSIDE (jt+j1 < fn)) ? glA (i+rowTile, jt+j1) : 0 ; } } } else { /* clear the tile of A */ for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; if (ii < NACHUNKS-1 || i < N) { shA [i + t*TILESIZE][jt] = 0 ; } } } } } // clear the tile T for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; if (ii < NACHUNKS-1 || i < N) { shT [i][jt] = 0 ; } } #endif /* We need all of A to be loaded and T to be cleared before proceeding. */ __syncthreads(); //-------------------------------------------------------------------------- // load A into the bitty block //-------------------------------------------------------------------------- if (WORKER) { #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; rbitA [ii] = shA [i][MYBITTYCOL] ; } } //-------------------------------------------------------------------------- // compute the first sigma = sum (A (1:m,1).^2) //-------------------------------------------------------------------------- if (WORKER && MYBITTYCOL == 0) { // each thread that owns column 0 computes sigma for its // own bitty block double s = 0 ; #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; if (i >= 1) { s += rbitA [ii] * rbitA [ii] ; } } RSIGMA (threadIdx.x) = s ; } // thread zero must wait for RSIGMA __syncthreads ( ) ; if (threadIdx.x == 0) { sigma = 0 ; #pragma unroll for (int ii = 0 ; ii < MCHUNK ; ii++) { sigma += RSIGMA (ii) ; } } //-------------------------------------------------------------------------- // Do the block householder factorization //-------------------------------------------------------------------------- // loop unrolling has no effect on the edge case (it is not unrolled), // but greatly speeds up the non-edge case. #pragma unroll for (int k = 0 ; k < nv ; k++) { //---------------------------------------------------------------------- // write the kth column of A back into shared memory //---------------------------------------------------------------------- if (WORKER && MYBITTYCOL == k && k > 0) { // the bitty block for threads that own column k contains // the kth column of R and the kth column of v. #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; shA [i][k] = rbitA [ii] ; } } __syncthreads ( ) ; // At this point, A (:,k) is held in both shared memory, and in the // threads that own that column. A (:,k) is not yet the kth // Householder vector, except for the diagnal (which is computed // below). A (0:k-1,k) is now the kth column of R (above the // diagonal). //---------------------------------------------------------------------- // compute the Householder coefficients //---------------------------------------------------------------------- // This is costly, accounting for about 25% of the total time on // Kepler, and 22% on Fermi, when A is loaded from global memory. This // means the work here is even a higher fraction when A is in shared. if (threadIdx.x == 0) { double x1 = shA [k][k] ; // the diagonal A (k,k) double s, v1, tau ; if (sigma <= EPSILON) { // printf ("hit eps %g\n", sigma) ; s = x1 ; v1 = 0 ; tau = 0 ; } else { s = sqrt (x1*x1 + sigma) ; v1 = (x1 <= 0) ? (x1 - s) : (-sigma / (x1 + s)) ; tau = -1 / (s * v1) ; } shRdiag [k] = s ; // the diagonal entry of R shA [k][k] = v1 ; // the topmost entry of the vector v TAU = tau ; // tile case: T (k,k) = tau } // All threads need v1, and later on they need tau __syncthreads ( ) ; // A (0:k-1,k) now holds the kth column of R (excluding the diagonal). // A (k:m-1,k) holds the kth Householder vector (incl. the diagonal). //---------------------------------------------------------------------- // z = (-tau) * v' * A (k:m-1,:), where v is A (k:m-1,k) //---------------------------------------------------------------------- if (WORKER) // && (COMPUTE_T || MYBITTYCOL > k)) { // Load the vector v from A (k:m-1,k) into the V bitty block. // If T is not computed and MYBITTYCOL <= k, then this code can // be skipped, but the code is slower when that test is made. #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; // only i >= k is needed, but it's faster to load it all rbitV [ii] = shA [i][k] ; } // compute v' * A (k:m-1,:), each thread in its own column { double z = 0.0 ; #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; if (i >= k) { z += rbitV [ii] * rbitA [ii] ; } } // store z into the reduction space in shared memory shZ [MYBITTYROW(0)][MYBITTYCOL] = z ; } } // All threads need to see the reduction space for Z __syncthreads ( ) ; // Reduce Z into a single row vector z, using the first warp only if (threadIdx.x < N) // && (COMPUTE_T || threadIdx.x > k)) { double z = 0 ; #pragma unroll for (int ii = 0 ; ii < MCHUNK ; ii++) { z += shZ [ii][threadIdx.x] ; } shZ [0][threadIdx.x] = - z * TAU ; } // All threads need to see the z vector __syncthreads ( ) ; //---------------------------------------------------------------------- // update A (in register) and compute the next sigma //---------------------------------------------------------------------- if (WORKER && MYBITTYCOL > k) { // A (k:m,k+1:n) = A (k:,k+1:n) + v * z (k+1:n) ; // only threads that own a column MYBITTYCOL > k do any work { double z = shZ [0][MYBITTYCOL] ; #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; if (i >= k) { rbitA [ii] += rbitV [ii] * z ; } } } // sigma = sum (A ((k+2):m,k+1).^2), except for the reduction if (MYBITTYCOL == k+1) { // each thread that owns column k+1 computes sigma for its // own bitty block double s = 0 ; #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; if (i >= k+2) { s += rbitA [ii] * rbitA [ii] ; } } RSIGMA (MYBITTYROW(0)) = s ; } } //---------------------------------------------------------------------- // construct the kth column of T //---------------------------------------------------------------------- #ifndef WHOLE_FRONT // T (0:k-1,k) = T (0:k-1,0:k-1) * z (0:k-1)' if (threadIdx.x < k) { double t_ik = 0 ; for (int jj = 0 ; jj < k ; jj++) { t_ik += shT [threadIdx.x][jj] * shZ [0][jj] ; } shT [threadIdx.x][k] = t_ik ; } #endif //---------------------------------------------------------------------- // reduce sigma into a single scalar for the next iteration //---------------------------------------------------------------------- // Thread zero must wait for RSIGMA __syncthreads ( ) ; if (threadIdx.x == 0) { sigma = 0 ; #pragma unroll for (int ii = 0 ; ii < MCHUNK ; ii++) { sigma += RSIGMA (ii) ; } } } // tril (A) now holds all the Householder vectors, including the diagonal. // triu (A,1) now holds R, excluding the diagonal. // shRdiag holds the diagonal of R. //-------------------------------------------------------------------------- // write out the remaining columns of R, if any //-------------------------------------------------------------------------- if (WORKER && MYBITTYCOL >= nv) { for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; shA [i][MYBITTYCOL] = rbitA [ii] ; } } //-------------------------------------------------------------------------- /* Have a warp shuffle memory around. */ if (threadIdx.x < N) { #ifndef WHOLE_FRONT shV1 [threadIdx.x] = shA [threadIdx.x][threadIdx.x]; #endif shA [threadIdx.x][threadIdx.x] = shRdiag [threadIdx.x]; } // Wait for the memory shuffle to finish before saving A to global memory __syncthreads(); //-------------------------------------------------------------------------- // save results back to global memory //-------------------------------------------------------------------------- #ifdef WHOLE_FRONT if (jt < fn) { for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; if (i < fm) glA (i, jt) = shA [i][jt] ; } } #else // Save VT back to global memory & clear out // lower-triangular part of the first tile (leaving R). for (int th=threadIdx.x; th<TILESIZE*TILESIZE; th+=blockDim.x) { int i = th / 32; int j = th % 32; /* The upper triangular part (including diagonal) is T. */ if(i <= j) { glVT[i][j] = shT[i][j]; } /* The lower triangular part is V. * Note we clear the tril part leaving only R in this tile. */ else { glVT[i+1][j] = shA[i][j]; shA[i][j] = 0.0; } } // Save the diagonal if (threadIdx.x < N) { glVT[threadIdx.x+1][threadIdx.x] = shV1[threadIdx.x]; } // Wait for this operation to complete before saving A back to global // memory __syncthreads(); // save the tiles in A back into the front in global memory for (int t = 0 ; t < ROW_PANELSIZE ; t++) { int rowTile = myTask.extra[t]; if (INSIDE (rowTile != EMPTY)) { for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; if (ii < NACHUNKS-1 || i < N) { if (INSIDE (i+rowTile < fm) && INSIDE (jt+j1 < fn)) { glA (i+rowTile, jt+j1) = shA [i + t*TILESIZE][jt]; } } } } } #endif } //------------------------------------------------------------------------------ // undefine macros //------------------------------------------------------------------------------ #undef EPSILON #undef FACTORIZE #undef M #undef N #undef glA #undef WORKER #undef ATHREADS #undef MCHUNK #undef BITTYROWS #undef MYBITTYROW #undef MYBITTYCOL #undef shA #undef shT #undef shZ #undef shRdiag #undef shV1 #undef RSIGMA #undef TAU #undef INSIDE #undef INSIDE #undef nv #undef it #undef jt #undef j1 #undef ACHUNKSIZE #undef SAFELOAD #undef NACHUNKS #undef EDGE_CASE #undef WHOLE_FRONT #undef ROW_PANELSIZE #endif
d8063319cb146b5978b23a3906b81dbdc1db3284.cu
// ============================================================================= // === GPUQREngine/Include/Kernel/Factorize/factorize_vt.cu ==================== // ============================================================================= // GPUQREngine, Copyright (c) 2013, Timothy A Davis, Sencer Nuri Yeralan, // and Sanjay Ranka. All Rights Reserved. // SPDX-License-Identifier: GPL-2.0+ //------------------------------------------------------------------------------ // Constraints: // MCHUNK = M / BITTYROWS must be an integer // MCHUNK * N must be <= NUMTHREADS #ifdef FACTORIZE // TODO allow EPSILON to be defined by the caller #define EPSILON (1e-90) __device__ void FACTORIZE ( ) { //-------------------------------------------------------------------------- // The bitty block //-------------------------------------------------------------------------- // The matrix A is M by N, T is N by N (if present) // Each thread owns an r-by-1 bitty column of A. If its first entry is // A(ia,ja), and then it owns entries in every mchunk rows after that // [A(ia,ja), A(ia+mchunk,ja), ... ]. // Each column is operated on by mchunk threads. #define MCHUNK (M / BITTYROWS) #define MYBITTYROW(ii) (ii*MCHUNK + (threadIdx.x % MCHUNK)) #define MYBITTYCOL (threadIdx.x / MCHUNK) #define ATHREADS (MCHUNK * N) #define WORKER (ATHREADS == NUMTHREADS || threadIdx.x < ATHREADS) double rbitA [BITTYROWS] ; // bitty block for A double rbitV [BITTYROWS] ; // bitty block for V double sigma ; // only used by thread zero //-------------------------------------------------------------------------- // shared memory usage //-------------------------------------------------------------------------- #define shA shMemory.factorize.A #define shZ shMemory.factorize.Z #define shRdiag shMemory.factorize.A1 #define RSIGMA(i) shMemory.factorize.V1 [i] #ifdef WHOLE_FRONT // T is not computed, and there is no list of tiles #define TAU shMemory.factorize.tau #else // T is computed and saved in the VT tile, work on a set of tiles #define TAU shT [k][k] #define shT shMemory.factorize.T #define shV1 shMemory.factorize.V1 #endif //-------------------------------------------------------------------------- // Grab my task from the queue. //-------------------------------------------------------------------------- int fn = myTask.fn ; #ifdef WHOLE_FRONT int fm = myTask.fm ; int nv = MIN (fm, fn) ; // If nv is a constant, it allows the outer loop to be unrolled: // #define nv N #define j1 0 // The whole front always considers the edge case #ifndef EDGE_CASE #define EDGE_CASE #endif #else int j1 = myTask.extra[4] ; #ifdef EDGE_CASE int fm = myTask.fm ; int nv = MIN (fm, fn) - j1 ; nv = MIN (nv, N) ; nv = MIN (nv, M) ; #else #define nv N #endif double (*glVT)[TILESIZE] = (double (*)[TILESIZE]) myTask.AuxAddress[0] ; #endif #ifdef EDGE_CASE // Check if an entry is inside the front. #define INSIDE(test) (test) #else // The entry is guaranteed to reside inside the frontal matrix. #define INSIDE(test) (1) #endif // bool is_false = (fn < 0) ; #define glA(i,j) (myTask.F[((i)*fn + (j))]) //-------------------------------------------------------------------------- // Load A into shared memory //-------------------------------------------------------------------------- // ACHUNKSIZE must be an integer #define it (threadIdx.x / N) #define jt (threadIdx.x % N) #define ACHUNKSIZE (NUMTHREADS / N) #ifdef WHOLE_FRONT // all threads load the entire front (no tiles). // always check the edge case. // M / ACHUNKSIZE must be an integer. #define NACHUNKS (M / ACHUNKSIZE) for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; shA [i][jt] = (i < fm && jt < fn) ? glA (i, jt) : 0 ; } #else // when all threads work on a tile. // (N*N / NUMTHREADS) does not have to be an integer. With a tile // size of N=32, and NUMTHREADS=384, it isn't. So compute the ceiling, // and handle the clean up by testing i < N below. #define NACHUNKS CEIL (N*N, NUMTHREADS) /* If we're not coming from an apply-factorize, load from F. */ if(IsApplyFactorize == 0) { // Load tiles from the frontal matrix // accounts for 25% of the total time on Kepler, 13% on Fermi for (int t = 0 ; t < ROW_PANELSIZE ; t++) { int rowTile = myTask.extra[t]; if (INSIDE (rowTile != EMPTY)) { /* load the tile of A from global memory */ for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; if (ii < NACHUNKS-1 || i < N) { shA [i + t*TILESIZE][jt] = (INSIDE (i+rowTile < fm) && INSIDE (jt+j1 < fn)) ? glA (i+rowTile, jt+j1) : 0 ; } } } else { /* clear the tile of A */ for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; if (ii < NACHUNKS-1 || i < N) { shA [i + t*TILESIZE][jt] = 0 ; } } } } } // clear the tile T for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; if (ii < NACHUNKS-1 || i < N) { shT [i][jt] = 0 ; } } #endif /* We need all of A to be loaded and T to be cleared before proceeding. */ __syncthreads(); //-------------------------------------------------------------------------- // load A into the bitty block //-------------------------------------------------------------------------- if (WORKER) { #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; rbitA [ii] = shA [i][MYBITTYCOL] ; } } //-------------------------------------------------------------------------- // compute the first sigma = sum (A (1:m,1).^2) //-------------------------------------------------------------------------- if (WORKER && MYBITTYCOL == 0) { // each thread that owns column 0 computes sigma for its // own bitty block double s = 0 ; #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; if (i >= 1) { s += rbitA [ii] * rbitA [ii] ; } } RSIGMA (threadIdx.x) = s ; } // thread zero must wait for RSIGMA __syncthreads ( ) ; if (threadIdx.x == 0) { sigma = 0 ; #pragma unroll for (int ii = 0 ; ii < MCHUNK ; ii++) { sigma += RSIGMA (ii) ; } } //-------------------------------------------------------------------------- // Do the block householder factorization //-------------------------------------------------------------------------- // loop unrolling has no effect on the edge case (it is not unrolled), // but greatly speeds up the non-edge case. #pragma unroll for (int k = 0 ; k < nv ; k++) { //---------------------------------------------------------------------- // write the kth column of A back into shared memory //---------------------------------------------------------------------- if (WORKER && MYBITTYCOL == k && k > 0) { // the bitty block for threads that own column k contains // the kth column of R and the kth column of v. #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; shA [i][k] = rbitA [ii] ; } } __syncthreads ( ) ; // At this point, A (:,k) is held in both shared memory, and in the // threads that own that column. A (:,k) is not yet the kth // Householder vector, except for the diagnal (which is computed // below). A (0:k-1,k) is now the kth column of R (above the // diagonal). //---------------------------------------------------------------------- // compute the Householder coefficients //---------------------------------------------------------------------- // This is costly, accounting for about 25% of the total time on // Kepler, and 22% on Fermi, when A is loaded from global memory. This // means the work here is even a higher fraction when A is in shared. if (threadIdx.x == 0) { double x1 = shA [k][k] ; // the diagonal A (k,k) double s, v1, tau ; if (sigma <= EPSILON) { // printf ("hit eps %g\n", sigma) ; s = x1 ; v1 = 0 ; tau = 0 ; } else { s = sqrt (x1*x1 + sigma) ; v1 = (x1 <= 0) ? (x1 - s) : (-sigma / (x1 + s)) ; tau = -1 / (s * v1) ; } shRdiag [k] = s ; // the diagonal entry of R shA [k][k] = v1 ; // the topmost entry of the vector v TAU = tau ; // tile case: T (k,k) = tau } // All threads need v1, and later on they need tau __syncthreads ( ) ; // A (0:k-1,k) now holds the kth column of R (excluding the diagonal). // A (k:m-1,k) holds the kth Householder vector (incl. the diagonal). //---------------------------------------------------------------------- // z = (-tau) * v' * A (k:m-1,:), where v is A (k:m-1,k) //---------------------------------------------------------------------- if (WORKER) // && (COMPUTE_T || MYBITTYCOL > k)) { // Load the vector v from A (k:m-1,k) into the V bitty block. // If T is not computed and MYBITTYCOL <= k, then this code can // be skipped, but the code is slower when that test is made. #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; // only i >= k is needed, but it's faster to load it all rbitV [ii] = shA [i][k] ; } // compute v' * A (k:m-1,:), each thread in its own column { double z = 0.0 ; #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; if (i >= k) { z += rbitV [ii] * rbitA [ii] ; } } // store z into the reduction space in shared memory shZ [MYBITTYROW(0)][MYBITTYCOL] = z ; } } // All threads need to see the reduction space for Z __syncthreads ( ) ; // Reduce Z into a single row vector z, using the first warp only if (threadIdx.x < N) // && (COMPUTE_T || threadIdx.x > k)) { double z = 0 ; #pragma unroll for (int ii = 0 ; ii < MCHUNK ; ii++) { z += shZ [ii][threadIdx.x] ; } shZ [0][threadIdx.x] = - z * TAU ; } // All threads need to see the z vector __syncthreads ( ) ; //---------------------------------------------------------------------- // update A (in register) and compute the next sigma //---------------------------------------------------------------------- if (WORKER && MYBITTYCOL > k) { // A (k:m,k+1:n) = A (k:,k+1:n) + v * z (k+1:n) ; // only threads that own a column MYBITTYCOL > k do any work { double z = shZ [0][MYBITTYCOL] ; #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; if (i >= k) { rbitA [ii] += rbitV [ii] * z ; } } } // sigma = sum (A ((k+2):m,k+1).^2), except for the reduction if (MYBITTYCOL == k+1) { // each thread that owns column k+1 computes sigma for its // own bitty block double s = 0 ; #pragma unroll for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; if (i >= k+2) { s += rbitA [ii] * rbitA [ii] ; } } RSIGMA (MYBITTYROW(0)) = s ; } } //---------------------------------------------------------------------- // construct the kth column of T //---------------------------------------------------------------------- #ifndef WHOLE_FRONT // T (0:k-1,k) = T (0:k-1,0:k-1) * z (0:k-1)' if (threadIdx.x < k) { double t_ik = 0 ; for (int jj = 0 ; jj < k ; jj++) { t_ik += shT [threadIdx.x][jj] * shZ [0][jj] ; } shT [threadIdx.x][k] = t_ik ; } #endif //---------------------------------------------------------------------- // reduce sigma into a single scalar for the next iteration //---------------------------------------------------------------------- // Thread zero must wait for RSIGMA __syncthreads ( ) ; if (threadIdx.x == 0) { sigma = 0 ; #pragma unroll for (int ii = 0 ; ii < MCHUNK ; ii++) { sigma += RSIGMA (ii) ; } } } // tril (A) now holds all the Householder vectors, including the diagonal. // triu (A,1) now holds R, excluding the diagonal. // shRdiag holds the diagonal of R. //-------------------------------------------------------------------------- // write out the remaining columns of R, if any //-------------------------------------------------------------------------- if (WORKER && MYBITTYCOL >= nv) { for (int ii = 0 ; ii < BITTYROWS ; ii++) { int i = MYBITTYROW (ii) ; shA [i][MYBITTYCOL] = rbitA [ii] ; } } //-------------------------------------------------------------------------- /* Have a warp shuffle memory around. */ if (threadIdx.x < N) { #ifndef WHOLE_FRONT shV1 [threadIdx.x] = shA [threadIdx.x][threadIdx.x]; #endif shA [threadIdx.x][threadIdx.x] = shRdiag [threadIdx.x]; } // Wait for the memory shuffle to finish before saving A to global memory __syncthreads(); //-------------------------------------------------------------------------- // save results back to global memory //-------------------------------------------------------------------------- #ifdef WHOLE_FRONT if (jt < fn) { for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; if (i < fm) glA (i, jt) = shA [i][jt] ; } } #else // Save VT back to global memory & clear out // lower-triangular part of the first tile (leaving R). for (int th=threadIdx.x; th<TILESIZE*TILESIZE; th+=blockDim.x) { int i = th / 32; int j = th % 32; /* The upper triangular part (including diagonal) is T. */ if(i <= j) { glVT[i][j] = shT[i][j]; } /* The lower triangular part is V. * Note we clear the tril part leaving only R in this tile. */ else { glVT[i+1][j] = shA[i][j]; shA[i][j] = 0.0; } } // Save the diagonal if (threadIdx.x < N) { glVT[threadIdx.x+1][threadIdx.x] = shV1[threadIdx.x]; } // Wait for this operation to complete before saving A back to global // memory __syncthreads(); // save the tiles in A back into the front in global memory for (int t = 0 ; t < ROW_PANELSIZE ; t++) { int rowTile = myTask.extra[t]; if (INSIDE (rowTile != EMPTY)) { for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + it ; if (ii < NACHUNKS-1 || i < N) { if (INSIDE (i+rowTile < fm) && INSIDE (jt+j1 < fn)) { glA (i+rowTile, jt+j1) = shA [i + t*TILESIZE][jt]; } } } } } #endif } //------------------------------------------------------------------------------ // undefine macros //------------------------------------------------------------------------------ #undef EPSILON #undef FACTORIZE #undef M #undef N #undef glA #undef WORKER #undef ATHREADS #undef MCHUNK #undef BITTYROWS #undef MYBITTYROW #undef MYBITTYCOL #undef shA #undef shT #undef shZ #undef shRdiag #undef shV1 #undef RSIGMA #undef TAU #undef INSIDE #undef INSIDE #undef nv #undef it #undef jt #undef j1 #undef ACHUNKSIZE #undef SAFELOAD #undef NACHUNKS #undef EDGE_CASE #undef WHOLE_FRONT #undef ROW_PANELSIZE #endif
12e3c9ab2baedda32016ac3b4648d3a73537c2f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include "compat.cuh" #include "utils.cuh" #include "threads.cuh" __global__ void uniform_rw_kernel( const int64_t *__restrict__ row, const int64_t *__restrict__ col, const int64_t *__restrict__ deg, const int64_t *__restrict__ start, const float *__restrict__ rand, int64_t *__restrict__ out, const size_t walk_length, const size_t numel) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t n = index; n < numel; n += stride) { out[n] = start[n]; for (ptrdiff_t l = 1; l <= walk_length; l++) { auto i = (l - 1) * numel + n; auto cur = out[i]; out[l * numel + n] = col[row[cur] + int64_t(rand[i] * deg[cur])]; } } } at::Tensor rw_cuda(at::Tensor row, at::Tensor col, at::Tensor start, size_t walk_length, float p, float q, size_t num_nodes) { hipSetDevice(row.get_device()); auto deg = degree(row, num_nodes); row = at::cat({at::zeros(1, deg.options()), deg.cumsum(0)}, 0); auto rand = at::rand({(int64_t)walk_length, start.size(0)}, start.options().dtype(at::kFloat)); auto out = at::full({(int64_t)walk_length + 1, start.size(0)}, -1, start.options()); hipLaunchKernelGGL(( uniform_rw_kernel), dim3(BLOCKS(start.numel())), dim3(THREADS), 0, 0, row.DATA_PTR<int64_t>(), col.DATA_PTR<int64_t>(), deg.DATA_PTR<int64_t>(), start.DATA_PTR<int64_t>(), rand.DATA_PTR<float>(), out.DATA_PTR<int64_t>(), walk_length, start.numel()); return out.t().contiguous(); }
12e3c9ab2baedda32016ac3b4648d3a73537c2f9.cu
#include <ATen/ATen.h> #include "compat.cuh" #include "utils.cuh" #include "threads.cuh" __global__ void uniform_rw_kernel( const int64_t *__restrict__ row, const int64_t *__restrict__ col, const int64_t *__restrict__ deg, const int64_t *__restrict__ start, const float *__restrict__ rand, int64_t *__restrict__ out, const size_t walk_length, const size_t numel) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t n = index; n < numel; n += stride) { out[n] = start[n]; for (ptrdiff_t l = 1; l <= walk_length; l++) { auto i = (l - 1) * numel + n; auto cur = out[i]; out[l * numel + n] = col[row[cur] + int64_t(rand[i] * deg[cur])]; } } } at::Tensor rw_cuda(at::Tensor row, at::Tensor col, at::Tensor start, size_t walk_length, float p, float q, size_t num_nodes) { cudaSetDevice(row.get_device()); auto deg = degree(row, num_nodes); row = at::cat({at::zeros(1, deg.options()), deg.cumsum(0)}, 0); auto rand = at::rand({(int64_t)walk_length, start.size(0)}, start.options().dtype(at::kFloat)); auto out = at::full({(int64_t)walk_length + 1, start.size(0)}, -1, start.options()); uniform_rw_kernel<<<BLOCKS(start.numel()), THREADS>>>( row.DATA_PTR<int64_t>(), col.DATA_PTR<int64_t>(), deg.DATA_PTR<int64_t>(), start.DATA_PTR<int64_t>(), rand.DATA_PTR<float>(), out.DATA_PTR<int64_t>(), walk_length, start.numel()); return out.t().contiguous(); }
c50b23e635b2211b22e8689343fabf582689f6d6.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "paddle/framework/lod_tensor.h" #include "paddle/platform/assert.h" #include <gtest/gtest.h> __global__ void test(size_t* a, int size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) { a[i] *= 2; } } TEST(LoDTensor, LoDInGPU) { paddle::framework::LoDTensor lod_tensor; paddle::platform::GPUPlace place(0); paddle::framework::LoD src_lod; src_lod.push_back(std::vector<size_t>{0, 2, 4, 6, 8, 10, 12, 14}); lod_tensor.Resize({14, 16}); lod_tensor.mutable_data<float>(place); lod_tensor.set_lod(src_lod); EXPECT_EQ(lod_tensor.lod_element(0, 2).first, 4UL); EXPECT_EQ(lod_tensor.lod_element(0, 4).first, 8UL); auto lod = lod_tensor.lod(); hipLaunchKernelGGL(( test), dim3(1), dim3(8), 0, 0, lod[0].data(), lod[0].size()); hipDeviceSynchronize(); for (size_t i = 0; i < src_lod[0].size(); ++i) { EXPECT_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); } }
c50b23e635b2211b22e8689343fabf582689f6d6.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda.h> #include <cuda_runtime.h> #include "paddle/framework/lod_tensor.h" #include "paddle/platform/assert.h" #include <gtest/gtest.h> __global__ void test(size_t* a, int size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) { a[i] *= 2; } } TEST(LoDTensor, LoDInGPU) { paddle::framework::LoDTensor lod_tensor; paddle::platform::GPUPlace place(0); paddle::framework::LoD src_lod; src_lod.push_back(std::vector<size_t>{0, 2, 4, 6, 8, 10, 12, 14}); lod_tensor.Resize({14, 16}); lod_tensor.mutable_data<float>(place); lod_tensor.set_lod(src_lod); EXPECT_EQ(lod_tensor.lod_element(0, 2).first, 4UL); EXPECT_EQ(lod_tensor.lod_element(0, 4).first, 8UL); auto lod = lod_tensor.lod(); test<<<1, 8>>>(lod[0].data(), lod[0].size()); cudaDeviceSynchronize(); for (size_t i = 0; i < src_lod[0].size(); ++i) { EXPECT_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); } }
6dddcf697a0940ba16b36073f3a95d16d40f0b0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_runtime.h" __global__ void add_const_kernel(const float *input, float *output, float value, size_t size) { size_t ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= size) return; output[ind] = input[ind] + value; } int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val, DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){ if(p != NULL){ int size_input = 1, size_output = 1; for(int i = 0; i < input -> ndim; i++) size_input *= input -> shape[i]; for(int i = 0; i < output -> ndim; i++) size_output *= output -> shape[i]; p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024; p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024; p -> workspace_memory = 0; // Insert the begin and end event. hipEvent_t start, stop; hipEventCreate(&start); hipEventRecord(start,0); size_t size = 1; for (index_t i = 0; i < input->ndim; i++) { size *= input->shape[i]; } dim3 blocks; dim3 threads; float *output_data = (float *)output->data; const float *input_data = (const float *)input->data; if (size <= 1024) { threads.x = size; blocks.x = 1; } else { threads.x = 1024; blocks.x = (size + 1023) / 1024; } if (stream_handle) hipLaunchKernelGGL(( add_const_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, input_data, output_data, val, size); else hipLaunchKernelGGL(( add_const_kernel), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, val, size); float elapsedTime; hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); hipEventDestroy(start); hipEventDestroy(stop); p->time = elapsedTime; } else{ size_t size = 1; for (index_t i = 0; i < input->ndim; i++) { size *= input->shape[i]; } dim3 blocks; dim3 threads; float *output_data = (float *)output->data; const float *input_data = (const float *)input->data; if (size <= 1024) { threads.x = size; blocks.x = 1; } else { threads.x = 1024; blocks.x = (size + 1023) / 1024; } if (stream_handle) hipLaunchKernelGGL(( add_const_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, input_data, output_data, val, size); else hipLaunchKernelGGL(( add_const_kernel), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, val, size); } return 0; }
6dddcf697a0940ba16b36073f3a95d16d40f0b0f.cu
#include "gpu_runtime.h" __global__ void add_const_kernel(const float *input, float *output, float value, size_t size) { size_t ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= size) return; output[ind] = input[ind] + value; } int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val, DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){ if(p != NULL){ int size_input = 1, size_output = 1; for(int i = 0; i < input -> ndim; i++) size_input *= input -> shape[i]; for(int i = 0; i < output -> ndim; i++) size_output *= output -> shape[i]; p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024; p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024; p -> workspace_memory = 0; // Insert the begin and end event. cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventRecord(start,0); size_t size = 1; for (index_t i = 0; i < input->ndim; i++) { size *= input->shape[i]; } dim3 blocks; dim3 threads; float *output_data = (float *)output->data; const float *input_data = (const float *)input->data; if (size <= 1024) { threads.x = size; blocks.x = 1; } else { threads.x = 1024; blocks.x = (size + 1023) / 1024; } if (stream_handle) add_const_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(input_data, output_data, val, size); else add_const_kernel<<<blocks, threads>>>(input_data, output_data, val, size); float elapsedTime; cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); p->time = elapsedTime; } else{ size_t size = 1; for (index_t i = 0; i < input->ndim; i++) { size *= input->shape[i]; } dim3 blocks; dim3 threads; float *output_data = (float *)output->data; const float *input_data = (const float *)input->data; if (size <= 1024) { threads.x = size; blocks.x = 1; } else { threads.x = 1024; blocks.x = (size + 1023) / 1024; } if (stream_handle) add_const_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(input_data, output_data, val, size); else add_const_kernel<<<blocks, threads>>>(input_data, output_data, val, size); } return 0; }
27b73f4c2cfd3ca624c51dd53240d0a3051c4508.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <catch/catch.hpp> #include <cuMat/src/DisableCompilerWarnings.h> #include <cuMat/src/Matrix.h> #include <cuMat/src/EigenInteropHelpers.h> template<typename MatrixType> __global__ void TestEigenWriteCoeffKernel(dim3 virtual_size, MatrixType matrix) { CUMAT_KERNEL_3D_LOOP(i, j, k, virtual_size) matrix.coeff(i, j, k, -1) = i + j * 100 + k * 100 * 100; CUMAT_KERNEL_3D_LOOP_END } template<typename _Matrix> void testMatrixToEigen(const _Matrix& m) { cuMat::Context& ctx = cuMat::Context::current(); int sx = m.rows(); int sy = m.cols(); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig3D(sx, sy, 1, TestEigenWriteCoeffKernel<_Matrix>); hipLaunchKernelGGL(( TestEigenWriteCoeffKernel) , dim3(cfg.block_count), dim3(cfg.thread_per_block), 0, ctx.stream() , cfg.virtual_size, m); CUMAT_CHECK_ERROR(); auto host = m.toEigen(); for (int y = 0; y<sy; ++y) { for (int x = 0; x<sx; ++x) { REQUIRE(host(x, y) == x + y * 100); } } } TEST_CASE("matrix_to_eigen", "[eigen-interop]") { testMatrixToEigen(cuMat::Matrix<float, 1, 1, 1, cuMat::ColumnMajor>(1, 1, 1)); testMatrixToEigen(cuMat::Matrix<float, 4, 8, 1, cuMat::ColumnMajor>(4, 8, 1)); testMatrixToEigen(cuMat::Matrix<int, 16, 8, 1, cuMat::ColumnMajor>(16, 8, 1)); testMatrixToEigen(cuMat::Matrix<float, cuMat::Dynamic, cuMat::Dynamic, 1, cuMat::ColumnMajor>(32, 6, 1)); testMatrixToEigen(cuMat::Matrix<float, 1, 1, 1, cuMat::RowMajor>(1, 1, 1)); testMatrixToEigen(cuMat::Matrix<float, 4, 8, 1, cuMat::RowMajor>(4, 8, 1)); testMatrixToEigen(cuMat::Matrix<int, 16, 8, 1, cuMat::RowMajor>(16, 8, 1)); testMatrixToEigen(cuMat::Matrix<float, cuMat::Dynamic, cuMat::Dynamic, 1, cuMat::RowMajor>(32, 6, 1)); } template<typename MatrixType> __global__ void TestEigenReadCoeffKernel(dim3 virtual_size, MatrixType matrix, int* failure) { CUMAT_KERNEL_3D_LOOP(i, j, k, virtual_size) if (matrix.coeff(i, j, k, -1) != i + j * 100 + k * 100 * 100) failure[0] = 1; CUMAT_KERNEL_3D_LOOP_END } template <typename _Matrix> void testMatrixFromEigen(const _Matrix& m) { int sx = m.rows(); int sy = m.cols(); _Matrix host = m; for (int y = 0; y<sy; ++y) { for (int x = 0; x<sx; ++x) { host(x, y) = x + y * 100; } } cuMat::Context& ctx = cuMat::Context::current(); typedef typename cuMat::eigen::MatrixEigenToCuMat<_Matrix>::type matrix_t; matrix_t mat = matrix_t::fromEigen(host); cuMat::DevicePointer<int> successFlag(1); CUMAT_SAFE_CALL(hipMemset(successFlag.pointer(), 0, sizeof(int))); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig3D(sx, sy, 1, TestEigenReadCoeffKernel<matrix_t>); hipLaunchKernelGGL(( TestEigenReadCoeffKernel) , dim3(cfg.block_count), dim3(cfg.thread_per_block), 0, ctx.stream() , cfg.virtual_size, mat, successFlag.pointer()); CUMAT_CHECK_ERROR(); int successFlagHost; hipMemcpy(&successFlagHost, successFlag.pointer(), sizeof(int), hipMemcpyDeviceToHost); REQUIRE(successFlagHost == 0); } TEST_CASE("matrix_from_eigen", "[eigen-interop]") { testMatrixFromEigen(Eigen::Matrix<float, 8, 6, Eigen::RowMajor>()); { auto m = Eigen::Matrix<float, Eigen::Dynamic, 6, Eigen::RowMajor>(); m.resize(12, 6); testMatrixFromEigen(m); } { auto m = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(); m.resize(12, 24); testMatrixFromEigen(m); } testMatrixFromEigen(Eigen::Matrix<float, 8, 6, Eigen::ColMajor>()); { auto m = Eigen::Matrix<float, 16, Eigen::Dynamic, Eigen::ColMajor>(); m.resize(16, 8); testMatrixFromEigen(m); } { auto m = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>(); m.resize(12, 24); testMatrixFromEigen(m); } }
27b73f4c2cfd3ca624c51dd53240d0a3051c4508.cu
#include <catch/catch.hpp> #include <cuMat/src/DisableCompilerWarnings.h> #include <cuMat/src/Matrix.h> #include <cuMat/src/EigenInteropHelpers.h> template<typename MatrixType> __global__ void TestEigenWriteCoeffKernel(dim3 virtual_size, MatrixType matrix) { CUMAT_KERNEL_3D_LOOP(i, j, k, virtual_size) matrix.coeff(i, j, k, -1) = i + j * 100 + k * 100 * 100; CUMAT_KERNEL_3D_LOOP_END } template<typename _Matrix> void testMatrixToEigen(const _Matrix& m) { cuMat::Context& ctx = cuMat::Context::current(); int sx = m.rows(); int sy = m.cols(); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig3D(sx, sy, 1, TestEigenWriteCoeffKernel<_Matrix>); TestEigenWriteCoeffKernel <<< cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>> (cfg.virtual_size, m); CUMAT_CHECK_ERROR(); auto host = m.toEigen(); for (int y = 0; y<sy; ++y) { for (int x = 0; x<sx; ++x) { REQUIRE(host(x, y) == x + y * 100); } } } TEST_CASE("matrix_to_eigen", "[eigen-interop]") { testMatrixToEigen(cuMat::Matrix<float, 1, 1, 1, cuMat::ColumnMajor>(1, 1, 1)); testMatrixToEigen(cuMat::Matrix<float, 4, 8, 1, cuMat::ColumnMajor>(4, 8, 1)); testMatrixToEigen(cuMat::Matrix<int, 16, 8, 1, cuMat::ColumnMajor>(16, 8, 1)); testMatrixToEigen(cuMat::Matrix<float, cuMat::Dynamic, cuMat::Dynamic, 1, cuMat::ColumnMajor>(32, 6, 1)); testMatrixToEigen(cuMat::Matrix<float, 1, 1, 1, cuMat::RowMajor>(1, 1, 1)); testMatrixToEigen(cuMat::Matrix<float, 4, 8, 1, cuMat::RowMajor>(4, 8, 1)); testMatrixToEigen(cuMat::Matrix<int, 16, 8, 1, cuMat::RowMajor>(16, 8, 1)); testMatrixToEigen(cuMat::Matrix<float, cuMat::Dynamic, cuMat::Dynamic, 1, cuMat::RowMajor>(32, 6, 1)); } template<typename MatrixType> __global__ void TestEigenReadCoeffKernel(dim3 virtual_size, MatrixType matrix, int* failure) { CUMAT_KERNEL_3D_LOOP(i, j, k, virtual_size) if (matrix.coeff(i, j, k, -1) != i + j * 100 + k * 100 * 100) failure[0] = 1; CUMAT_KERNEL_3D_LOOP_END } template <typename _Matrix> void testMatrixFromEigen(const _Matrix& m) { int sx = m.rows(); int sy = m.cols(); _Matrix host = m; for (int y = 0; y<sy; ++y) { for (int x = 0; x<sx; ++x) { host(x, y) = x + y * 100; } } cuMat::Context& ctx = cuMat::Context::current(); typedef typename cuMat::eigen::MatrixEigenToCuMat<_Matrix>::type matrix_t; matrix_t mat = matrix_t::fromEigen(host); cuMat::DevicePointer<int> successFlag(1); CUMAT_SAFE_CALL(cudaMemset(successFlag.pointer(), 0, sizeof(int))); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig3D(sx, sy, 1, TestEigenReadCoeffKernel<matrix_t>); TestEigenReadCoeffKernel <<< cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>> (cfg.virtual_size, mat, successFlag.pointer()); CUMAT_CHECK_ERROR(); int successFlagHost; cudaMemcpy(&successFlagHost, successFlag.pointer(), sizeof(int), cudaMemcpyDeviceToHost); REQUIRE(successFlagHost == 0); } TEST_CASE("matrix_from_eigen", "[eigen-interop]") { testMatrixFromEigen(Eigen::Matrix<float, 8, 6, Eigen::RowMajor>()); { auto m = Eigen::Matrix<float, Eigen::Dynamic, 6, Eigen::RowMajor>(); m.resize(12, 6); testMatrixFromEigen(m); } { auto m = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(); m.resize(12, 24); testMatrixFromEigen(m); } testMatrixFromEigen(Eigen::Matrix<float, 8, 6, Eigen::ColMajor>()); { auto m = Eigen::Matrix<float, 16, Eigen::Dynamic, Eigen::ColMajor>(); m.resize(16, 8); testMatrixFromEigen(m); } { auto m = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>(); m.resize(12, 24); testMatrixFromEigen(m); } }
aaaf99dce43a56df71ce3638c2ec71ed727b77d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * CudaOperations.cu * * Created on: Feb 6, 2019 * Author: alexander */ __global__ void quickSum(double* energyTempor, int size) { long long offset = 1; int wIndex; while (offset < size * size) { wIndex = threadIdx.x; while ((wIndex * 2 + 1) * offset < size * size) { energyTempor[wIndex * 2 * offset] += energyTempor[(wIndex * 2 + 1) * offset]; wIndex = wIndex + blockDim.x; } offset *= 2; __syncthreads(); } }
aaaf99dce43a56df71ce3638c2ec71ed727b77d3.cu
#include "includes.h" /* * CudaOperations.cu * * Created on: Feb 6, 2019 * Author: alexander */ __global__ void quickSum(double* energyTempor, int size) { long long offset = 1; int wIndex; while (offset < size * size) { wIndex = threadIdx.x; while ((wIndex * 2 + 1) * offset < size * size) { energyTempor[wIndex * 2 * offset] += energyTempor[(wIndex * 2 + 1) * offset]; wIndex = wIndex + blockDim.x; } offset *= 2; __syncthreads(); } }
c8235f1d48910e9d0e5785a76772489bdf90b53c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hl_base.h" #include "hl_device_functions.cuh" #include "paddle/utils/Logging.h" __global__ void KeMaxSequenceForward(real* input, const int* sequence, real* output, int* index, int numSequences, int dim) { int dimIdx = threadIdx.x; int sequenceId = blockIdx.x; if (sequenceId >= numSequences) return; int start = sequence[sequenceId]; int end = sequence[sequenceId + 1]; for (int i = dimIdx; i < dim; i += blockDim.x) { real tmp = -HL_FLOAT_MAX; int tmpId = -1; for (int insId = start; insId < end; insId++) { if (tmp < input[insId * dim + i]) { tmp = input[insId * dim + i]; tmpId = insId; } } output[sequenceId * dim + i] = tmp; index[sequenceId * dim + i] = tmpId; } } void hl_max_sequence_forward(real* input, const int* sequence, real* output, int* index, int numSequences, int dim) { CHECK_NOTNULL(input); CHECK_NOTNULL(sequence); CHECK_NOTNULL(output); CHECK_NOTNULL(index); dim3 threads(256, 1); dim3 grid(numSequences, 1); hipLaunchKernelGGL(( KeMaxSequenceForward), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, input, sequence, output, index, numSequences, dim); CHECK_SYNC("hl_max_sequence_forward failed"); } __global__ void KeMaxSequenceBackward( real* outputGrad, int* index, real* inputGrad, int numSequences, int dim) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int colIdx = idx % dim; if (idx < numSequences * dim) { int insId = index[idx]; inputGrad[insId * dim + colIdx] += outputGrad[idx]; } } void hl_max_sequence_backward( real* outputGrad, int* index, real* inputGrad, int numSequences, int dim) { CHECK_NOTNULL(outputGrad); CHECK_NOTNULL(index); CHECK_NOTNULL(inputGrad); unsigned int blocks = (numSequences * dim + 128 - 1) / 128; dim3 threads(128, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KeMaxSequenceBackward), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, outputGrad, index, inputGrad, numSequences, dim); CHECK_SYNC("hl_max_sequence_backward failed"); } template <int blockDimX, int blockDimY, int gridDimX, bool AddRow> __global__ void KeMatrixAddRows(real* output, real* table, int* ids, int numSamples, int tableSize, int dim) { int idx = threadIdx.x; int idy = threadIdx.y; int sampleId = blockIdx.x + idy * gridDimX; while (sampleId < numSamples) { int tableId = ids[sampleId]; if ((0 <= tableId) && (tableId < tableSize)) { real* outputData = output + sampleId * dim; real* tableData = table + tableId * dim; for (int i = idx; i < dim; i += blockDimX) { if (AddRow == 0) { outputData[i] += tableData[i]; } else { paddle::paddleAtomicAdd(&tableData[i], outputData[i]); } } } sampleId += blockDimY * gridDimX; } } template <int blockDimX, int blockDimY, int gridDimX, bool seq2batch, bool isAdd> __global__ void KeSequence2Batch(real* batch, real* sequence, const int* batchIndex, int seqWidth, int batchCount) { int idx = threadIdx.x; int idy = threadIdx.y; int id = blockIdx.x + idy * gridDimX; while (id < batchCount) { int seqId = batchIndex[id]; real* batchData = batch + id * seqWidth; real* seqData = sequence + seqId * seqWidth; for (int i = idx; i < seqWidth; i += blockDimX) { if (seq2batch) { if (isAdd) { batchData[i] += seqData[i]; } else { batchData[i] = seqData[i]; } } else { if (isAdd) { seqData[i] += batchData[i]; } else { seqData[i] = batchData[i]; } } } id += blockDimY * gridDimX; } } void hl_sequence2batch_copy(real* batch, real* sequence, const int* batchIndex, int seqWidth, int batchCount, bool seq2batch) { CHECK_NOTNULL(sequence); CHECK_NOTNULL(batch); CHECK_NOTNULL(batchIndex); dim3 threads(128, 8); dim3 grid(8, 1); if (seq2batch) { hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 1, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, batchIndex, seqWidth, batchCount); } else { hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 0, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, batchIndex, seqWidth, batchCount); } CHECK_SYNC("hl_sequence2batch_copy failed"); } void hl_sequence2batch_add(real* batch, real* sequence, int* batchIndex, int seqWidth, int batchCount, bool seq2batch) { CHECK_NOTNULL(sequence); CHECK_NOTNULL(batch); CHECK_NOTNULL(batchIndex); dim3 threads(128, 8); dim3 grid(8, 1); if (seq2batch) { hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 1, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, batchIndex, seqWidth, batchCount); } else { hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 0, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, batchIndex, seqWidth, batchCount); } CHECK_SYNC("hl_sequence2batch_add failed"); } template <bool normByTimes, bool seq2batch> __global__ void KeSequence2BatchPadding(real* batch, real* sequence, const int* sequenceStartPositions, const size_t sequenceWidth, const size_t maxSequenceLength, const size_t numSequences) { int batchIdx = blockIdx.y; int sequenceStart = sequenceStartPositions[batchIdx]; int sequenceLength = sequenceStartPositions[batchIdx + 1] - sequenceStart; int sequenceIdx = blockIdx.x * blockDim.y + threadIdx.y; int batchBaseIdx = (sequenceIdx * numSequences + batchIdx) * sequenceWidth; int sequenceBaseIdx = (sequenceStart + sequenceIdx) * sequenceWidth; real scale = normByTimes ? (1.0f / (real)sequenceLength) : 1.0f; if (sequenceIdx < sequenceLength) { if (seq2batch) { /* sequence -> batch */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { batch[batchBaseIdx + i] = scale * sequence[sequenceBaseIdx + i]; } } else { /* batch -> sequence */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { sequence[sequenceBaseIdx + i] = scale * batch[batchBaseIdx + i]; } } } else if (sequenceIdx < maxSequenceLength) { if (seq2batch) { /* sequence -> batch */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { batch[batchBaseIdx + i] = 0; } } } } void hl_sequence2batch_copy_padding(real* batch, real* sequence, const int* sequenceStartPositions, const size_t sequenceWidth, const size_t maxSequenceLength, const size_t numSequences, bool normByTimes, bool seq2batch) { CHECK_NOTNULL(batch); CHECK_NOTNULL(sequence); CHECK_NOTNULL(sequenceStartPositions); if (!normByTimes && numSequences == 1) { size_t elementCount = maxSequenceLength * sequenceWidth; if (seq2batch) { /* sequence -> batch */ hl_memcpy_device2device(batch, sequence, sizeof(real) * elementCount); } else { /* batch -> sequence */ hl_memcpy_device2device(sequence, batch, sizeof(real) * elementCount); } return; } const int CUDA_BLOCK_SIZE = 512; /* At least use 32 threads to copy sequenceWidth elements, and at least 8 elements for each thread. */ int blockDimX = ((((sequenceWidth + 7) >> 3) + 31) >> 5) << 5; blockDimX = (blockDimX < CUDA_BLOCK_SIZE) ? blockDimX : CUDA_BLOCK_SIZE; int blockDimY = CUDA_BLOCK_SIZE / blockDimX; dim3 threads(blockDimX, blockDimY); int gridDimX = (maxSequenceLength + blockDimY - 1) / blockDimY; int gridDimY = numSequences; dim3 grid(gridDimX, gridDimY); if (seq2batch) { /* sequence -> batch */ if (normByTimes) { hipLaunchKernelGGL(( KeSequence2BatchPadding<1, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } else { hipLaunchKernelGGL(( KeSequence2BatchPadding<0, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } } else { /* batch -> sequence */ if (normByTimes) { hipLaunchKernelGGL(( KeSequence2BatchPadding<1, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } else { hipLaunchKernelGGL(( KeSequence2BatchPadding<0, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } } CHECK_SYNC("hl_sequence2batch_copy_padding failed"); } __device__ inline float my_rsqrt(float x) { return rsqrtf(x); } __device__ inline double my_rsqrt(double x) { return rsqrt(x); } __global__ void KeSequenceAvgForward(real* dst, real* src, const int* starts, int height, int width, const int mode) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int row = gid / width; int col = gid % width; if (gid < height * width) { int start = starts[row]; int end = starts[row + 1]; int seqLength = end - start; if (seqLength == 0) return; real sum = 0.0; for (int i = start; i < end; i++) { sum += src[i * width + col]; } sum = mode == 1 ? sum : (mode == 0 ? sum / seqLength : sum * my_rsqrt((real)seqLength)); dst[gid] += sum; } } void hl_sequence_avg_forward(real* dst, real* src, const int* starts, int height, int width, const int mode) { CHECK_NOTNULL(dst); CHECK_NOTNULL(src); CHECK_NOTNULL(starts); int block = 512; int grid = DIVUP(width * height, 512); CHECK(mode == 0 || mode == 1 || mode == 2) << "mode error in hl_sequence_avg_forward!"; hipLaunchKernelGGL(( KeSequenceAvgForward), dim3(grid), dim3(block), 0, STREAM_DEFAULT, dst, src, starts, height, width, mode); CHECK_SYNC("hl_sequence_avg_forward failed"); } __global__ void KeSequenceAvgBackward(real* dst, real* src, const int* starts, int height, int width, const int mode) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int row = gid / width; int col = gid % width; if (gid < height * width) { int start = starts[row]; int end = starts[row + 1]; int seqLength = end - start; if (seqLength == 0) return; real grad = src[gid]; grad = mode == 1 ? grad : (mode == 0 ? grad / seqLength : grad * my_rsqrt((real)seqLength)); for (int i = start; i < end; i++) { dst[i * width + col] += grad; } } } void hl_sequence_avg_backward(real* dst, real* src, const int* starts, int height, int width, const int mode) { CHECK_NOTNULL(dst); CHECK_NOTNULL(src); CHECK_NOTNULL(starts); int block = 512; int grid = DIVUP(width * height, 512); CHECK(mode == 0 || mode == 1 || mode == 2) << "mode error in hl_sequence_avg_backward!"; hipLaunchKernelGGL(( KeSequenceAvgBackward), dim3(grid), dim3(block), 0, STREAM_DEFAULT, dst, src, starts, height, width, mode); CHECK_SYNC("hl_sequence_avg_backward failed"); }
c8235f1d48910e9d0e5785a76772489bdf90b53c.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hl_base.h" #include "hl_device_functions.cuh" #include "paddle/utils/Logging.h" __global__ void KeMaxSequenceForward(real* input, const int* sequence, real* output, int* index, int numSequences, int dim) { int dimIdx = threadIdx.x; int sequenceId = blockIdx.x; if (sequenceId >= numSequences) return; int start = sequence[sequenceId]; int end = sequence[sequenceId + 1]; for (int i = dimIdx; i < dim; i += blockDim.x) { real tmp = -HL_FLOAT_MAX; int tmpId = -1; for (int insId = start; insId < end; insId++) { if (tmp < input[insId * dim + i]) { tmp = input[insId * dim + i]; tmpId = insId; } } output[sequenceId * dim + i] = tmp; index[sequenceId * dim + i] = tmpId; } } void hl_max_sequence_forward(real* input, const int* sequence, real* output, int* index, int numSequences, int dim) { CHECK_NOTNULL(input); CHECK_NOTNULL(sequence); CHECK_NOTNULL(output); CHECK_NOTNULL(index); dim3 threads(256, 1); dim3 grid(numSequences, 1); KeMaxSequenceForward<<<grid, threads, 0, STREAM_DEFAULT>>>( input, sequence, output, index, numSequences, dim); CHECK_SYNC("hl_max_sequence_forward failed"); } __global__ void KeMaxSequenceBackward( real* outputGrad, int* index, real* inputGrad, int numSequences, int dim) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int colIdx = idx % dim; if (idx < numSequences * dim) { int insId = index[idx]; inputGrad[insId * dim + colIdx] += outputGrad[idx]; } } void hl_max_sequence_backward( real* outputGrad, int* index, real* inputGrad, int numSequences, int dim) { CHECK_NOTNULL(outputGrad); CHECK_NOTNULL(index); CHECK_NOTNULL(inputGrad); unsigned int blocks = (numSequences * dim + 128 - 1) / 128; dim3 threads(128, 1); dim3 grid(blocks, 1); KeMaxSequenceBackward<<<grid, threads, 0, STREAM_DEFAULT>>>( outputGrad, index, inputGrad, numSequences, dim); CHECK_SYNC("hl_max_sequence_backward failed"); } template <int blockDimX, int blockDimY, int gridDimX, bool AddRow> __global__ void KeMatrixAddRows(real* output, real* table, int* ids, int numSamples, int tableSize, int dim) { int idx = threadIdx.x; int idy = threadIdx.y; int sampleId = blockIdx.x + idy * gridDimX; while (sampleId < numSamples) { int tableId = ids[sampleId]; if ((0 <= tableId) && (tableId < tableSize)) { real* outputData = output + sampleId * dim; real* tableData = table + tableId * dim; for (int i = idx; i < dim; i += blockDimX) { if (AddRow == 0) { outputData[i] += tableData[i]; } else { paddle::paddleAtomicAdd(&tableData[i], outputData[i]); } } } sampleId += blockDimY * gridDimX; } } template <int blockDimX, int blockDimY, int gridDimX, bool seq2batch, bool isAdd> __global__ void KeSequence2Batch(real* batch, real* sequence, const int* batchIndex, int seqWidth, int batchCount) { int idx = threadIdx.x; int idy = threadIdx.y; int id = blockIdx.x + idy * gridDimX; while (id < batchCount) { int seqId = batchIndex[id]; real* batchData = batch + id * seqWidth; real* seqData = sequence + seqId * seqWidth; for (int i = idx; i < seqWidth; i += blockDimX) { if (seq2batch) { if (isAdd) { batchData[i] += seqData[i]; } else { batchData[i] = seqData[i]; } } else { if (isAdd) { seqData[i] += batchData[i]; } else { seqData[i] = batchData[i]; } } } id += blockDimY * gridDimX; } } void hl_sequence2batch_copy(real* batch, real* sequence, const int* batchIndex, int seqWidth, int batchCount, bool seq2batch) { CHECK_NOTNULL(sequence); CHECK_NOTNULL(batch); CHECK_NOTNULL(batchIndex); dim3 threads(128, 8); dim3 grid(8, 1); if (seq2batch) { KeSequence2Batch<128, 8, 8, 1, 0><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, batchIndex, seqWidth, batchCount); } else { KeSequence2Batch<128, 8, 8, 0, 0><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, batchIndex, seqWidth, batchCount); } CHECK_SYNC("hl_sequence2batch_copy failed"); } void hl_sequence2batch_add(real* batch, real* sequence, int* batchIndex, int seqWidth, int batchCount, bool seq2batch) { CHECK_NOTNULL(sequence); CHECK_NOTNULL(batch); CHECK_NOTNULL(batchIndex); dim3 threads(128, 8); dim3 grid(8, 1); if (seq2batch) { KeSequence2Batch<128, 8, 8, 1, 1><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, batchIndex, seqWidth, batchCount); } else { KeSequence2Batch<128, 8, 8, 0, 1><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, batchIndex, seqWidth, batchCount); } CHECK_SYNC("hl_sequence2batch_add failed"); } template <bool normByTimes, bool seq2batch> __global__ void KeSequence2BatchPadding(real* batch, real* sequence, const int* sequenceStartPositions, const size_t sequenceWidth, const size_t maxSequenceLength, const size_t numSequences) { int batchIdx = blockIdx.y; int sequenceStart = sequenceStartPositions[batchIdx]; int sequenceLength = sequenceStartPositions[batchIdx + 1] - sequenceStart; int sequenceIdx = blockIdx.x * blockDim.y + threadIdx.y; int batchBaseIdx = (sequenceIdx * numSequences + batchIdx) * sequenceWidth; int sequenceBaseIdx = (sequenceStart + sequenceIdx) * sequenceWidth; real scale = normByTimes ? (1.0f / (real)sequenceLength) : 1.0f; if (sequenceIdx < sequenceLength) { if (seq2batch) { /* sequence -> batch */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { batch[batchBaseIdx + i] = scale * sequence[sequenceBaseIdx + i]; } } else { /* batch -> sequence */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { sequence[sequenceBaseIdx + i] = scale * batch[batchBaseIdx + i]; } } } else if (sequenceIdx < maxSequenceLength) { if (seq2batch) { /* sequence -> batch */ for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) { batch[batchBaseIdx + i] = 0; } } } } void hl_sequence2batch_copy_padding(real* batch, real* sequence, const int* sequenceStartPositions, const size_t sequenceWidth, const size_t maxSequenceLength, const size_t numSequences, bool normByTimes, bool seq2batch) { CHECK_NOTNULL(batch); CHECK_NOTNULL(sequence); CHECK_NOTNULL(sequenceStartPositions); if (!normByTimes && numSequences == 1) { size_t elementCount = maxSequenceLength * sequenceWidth; if (seq2batch) { /* sequence -> batch */ hl_memcpy_device2device(batch, sequence, sizeof(real) * elementCount); } else { /* batch -> sequence */ hl_memcpy_device2device(sequence, batch, sizeof(real) * elementCount); } return; } const int CUDA_BLOCK_SIZE = 512; /* At least use 32 threads to copy sequenceWidth elements, and at least 8 elements for each thread. */ int blockDimX = ((((sequenceWidth + 7) >> 3) + 31) >> 5) << 5; blockDimX = (blockDimX < CUDA_BLOCK_SIZE) ? blockDimX : CUDA_BLOCK_SIZE; int blockDimY = CUDA_BLOCK_SIZE / blockDimX; dim3 threads(blockDimX, blockDimY); int gridDimX = (maxSequenceLength + blockDimY - 1) / blockDimY; int gridDimY = numSequences; dim3 grid(gridDimX, gridDimY); if (seq2batch) { /* sequence -> batch */ if (normByTimes) { KeSequence2BatchPadding<1, 1><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } else { KeSequence2BatchPadding<0, 1><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } } else { /* batch -> sequence */ if (normByTimes) { KeSequence2BatchPadding<1, 0><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } else { KeSequence2BatchPadding<0, 0><<<grid, threads, 0, STREAM_DEFAULT>>>( batch, sequence, sequenceStartPositions, sequenceWidth, maxSequenceLength, numSequences); } } CHECK_SYNC("hl_sequence2batch_copy_padding failed"); } __device__ inline float my_rsqrt(float x) { return rsqrtf(x); } __device__ inline double my_rsqrt(double x) { return rsqrt(x); } __global__ void KeSequenceAvgForward(real* dst, real* src, const int* starts, int height, int width, const int mode) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int row = gid / width; int col = gid % width; if (gid < height * width) { int start = starts[row]; int end = starts[row + 1]; int seqLength = end - start; if (seqLength == 0) return; real sum = 0.0; for (int i = start; i < end; i++) { sum += src[i * width + col]; } sum = mode == 1 ? sum : (mode == 0 ? sum / seqLength : sum * my_rsqrt((real)seqLength)); dst[gid] += sum; } } void hl_sequence_avg_forward(real* dst, real* src, const int* starts, int height, int width, const int mode) { CHECK_NOTNULL(dst); CHECK_NOTNULL(src); CHECK_NOTNULL(starts); int block = 512; int grid = DIVUP(width * height, 512); CHECK(mode == 0 || mode == 1 || mode == 2) << "mode error in hl_sequence_avg_forward!"; KeSequenceAvgForward<<<grid, block, 0, STREAM_DEFAULT>>>( dst, src, starts, height, width, mode); CHECK_SYNC("hl_sequence_avg_forward failed"); } __global__ void KeSequenceAvgBackward(real* dst, real* src, const int* starts, int height, int width, const int mode) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int row = gid / width; int col = gid % width; if (gid < height * width) { int start = starts[row]; int end = starts[row + 1]; int seqLength = end - start; if (seqLength == 0) return; real grad = src[gid]; grad = mode == 1 ? grad : (mode == 0 ? grad / seqLength : grad * my_rsqrt((real)seqLength)); for (int i = start; i < end; i++) { dst[i * width + col] += grad; } } } void hl_sequence_avg_backward(real* dst, real* src, const int* starts, int height, int width, const int mode) { CHECK_NOTNULL(dst); CHECK_NOTNULL(src); CHECK_NOTNULL(starts); int block = 512; int grid = DIVUP(width * height, 512); CHECK(mode == 0 || mode == 1 || mode == 2) << "mode error in hl_sequence_avg_backward!"; KeSequenceAvgBackward<<<grid, block, 0, STREAM_DEFAULT>>>( dst, src, starts, height, width, mode); CHECK_SYNC("hl_sequence_avg_backward failed"); }
6a9445a201a740a5e5d756763fa3004706839c77.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2015-2019 by Contributors * \file elementwise_metric.cc * \brief evaluation metrics for elementwise binary or regression. * \author Kailong Chen, Tianqi Chen * * The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset. */ #include <rabit/rabit.h> #include <xgboost/metric.h> #include <dmlc/registry.h> #include <cmath> #include "metric_common.h" #include "../common/math.h" #include "../common/common.h" #if defined(XGBOOST_USE_CUDA) #include <thrust/execution_policy.h> // thrust::hip::par #include <thrust/functional.h> // thrust::plus<> #include <thrust/transform_reduce.h> #include <thrust/iterator/counting_iterator.h> #include "../common/device_helpers.cuh" #endif // XGBOOST_USE_CUDA namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(elementwise_metric); template <typename EvalRow> class ElementWiseMetricsReduction { public: explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {} PackedReduceResult CpuReduceMetrics( const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds) const { size_t ndata = labels.Size(); const auto& h_labels = labels.HostVector(); const auto& h_weights = weights.HostVector(); const auto& h_preds = preds.HostVector(); bst_float residue_sum = 0; bst_float weights_sum = 0; #pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static) for (omp_ulong i = 0; i < ndata; ++i) { const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f; residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt; weights_sum += wt; } PackedReduceResult res { residue_sum, weights_sum }; return res; } #if defined(XGBOOST_USE_CUDA) ~ElementWiseMetricsReduction() { if (device_ >= 0) { dh::safe_cuda(hipSetDevice(device_)); allocator_.Free(); } } PackedReduceResult DeviceReduceMetrics( const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds) { size_t n_data = preds.Size(); thrust::counting_iterator<size_t> begin(0); thrust::counting_iterator<size_t> end = begin + n_data; auto s_label = labels.DeviceSpan(); auto s_preds = preds.DeviceSpan(); auto s_weights = weights.DeviceSpan(); bool const is_null_weight = weights.Size() == 0; auto d_policy = policy_; PackedReduceResult result = thrust::transform_reduce( thrust::hip::par(allocator_), begin, end, [=] XGBOOST_DEVICE(size_t idx) { bst_float weight = is_null_weight ? 1.0f : s_weights[idx]; bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]); residue *= weight; return PackedReduceResult{ residue, weight }; }, PackedReduceResult(), thrust::plus<PackedReduceResult>()); return result; } #endif // XGBOOST_USE_CUDA PackedReduceResult Reduce( const GenericParameter &tparam, int device, const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds) { PackedReduceResult result; if (device < 0) { result = CpuReduceMetrics(weights, labels, preds); } #if defined(XGBOOST_USE_CUDA) else { // NOLINT device_ = device; preds.SetDevice(device_); labels.SetDevice(device_); weights.SetDevice(device_); dh::safe_cuda(hipSetDevice(device_)); result = DeviceReduceMetrics(weights, labels, preds); } #endif // defined(XGBOOST_USE_CUDA) return result; } private: EvalRow policy_; #if defined(XGBOOST_USE_CUDA) int device_{-1}; dh::CubMemory allocator_; #endif // defined(XGBOOST_USE_CUDA) }; struct EvalRowRMSE { char const *Name() const { return "rmse"; } XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { bst_float diff = label - pred; return diff * diff; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum); } }; struct EvalRowRMSLE { char const* Name() const { return "rmsle"; } XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { bst_float diff = std::log1p(label) - std::log1p(pred); return diff * diff; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum); } }; struct EvalRowMAE { const char *Name() const { return "mae"; } XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { return std::abs(label - pred); } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } }; struct EvalRowLogLoss { const char *Name() const { return "logloss"; } XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { const bst_float eps = 1e-16f; const bst_float pneg = 1.0f - py; if (py < eps) { return -y * ::log(eps) - (1.0f - y) * ::log(1.0f - eps); } else if (pneg < eps) { return -y * ::log(1.0f - eps) - (1.0f - y) * ::log(eps); } else { return -y * ::log(py) - (1.0f - y) * ::log(pneg); } } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } }; struct EvalError { explicit EvalError(const char* param) { if (param != nullptr) { CHECK_EQ(sscanf(param, "%f", &threshold_), 1) << "unable to parse the threshold value for the error metric"; has_param_ = true; } else { threshold_ = 0.5f; has_param_ = false; } } const char *Name() const { static std::string name; if (has_param_) { std::ostringstream os; os << "error"; if (threshold_ != 0.5f) os << '@' << threshold_; name = os.str(); return name.c_str(); } else { return "error"; } } XGBOOST_DEVICE bst_float EvalRow( bst_float label, bst_float pred) const { // assume label is in [0,1] return pred > threshold_ ? 1.0f - label : label; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } private: bst_float threshold_; bool has_param_; }; struct EvalPoissonNegLogLik { const char *Name() const { return "poisson-nloglik"; } XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { const bst_float eps = 1e-16f; if (py < eps) py = eps; return common::LogGamma(y + 1.0f) + py - ::log(py) * y; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } }; struct EvalGammaDeviance { const char *Name() const { return "gamma-deviance"; } XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { bst_float epsilon = 1.0e-9; bst_float tmp = label / (pred + epsilon); return tmp - ::log(tmp) - 1; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return 2 * esum; } }; struct EvalGammaNLogLik { static const char *Name() { return "gamma-nloglik"; } XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { bst_float psi = 1.0; bst_float theta = -1. / py; bst_float a = psi; bst_float b = -::log(-theta); bst_float c = 1. / psi * ::log(y/psi) - ::log(y) - common::LogGamma(1. / psi); return -((y * theta - b) / a + c); } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } }; struct EvalTweedieNLogLik { explicit EvalTweedieNLogLik(const char* param) { CHECK(param != nullptr) << "tweedie-nloglik must be in format tweedie-nloglik@rho"; rho_ = atof(param); CHECK(rho_ < 2 && rho_ >= 1) << "tweedie variance power must be in interval [1, 2)"; } const char *Name() const { static std::string name; std::ostringstream os; os << "tweedie-nloglik@" << rho_; name = os.str(); return name.c_str(); } XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const { bst_float a = y * ::exp((1 - rho_) * ::log(p)) / (1 - rho_); bst_float b = ::exp((2 - rho_) * ::log(p)) / (2 - rho_); return -a + b; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } protected: bst_float rho_; }; /*! * \brief base class of element-wise evaluation * \tparam Derived the name of subclass */ template<typename Policy> struct EvalEWiseBase : public Metric { EvalEWiseBase() = default; explicit EvalEWiseBase(char const* policy_param) : policy_{policy_param}, reducer_{policy_} {} bst_float Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, bool distributed) override { if (info.labels_.Size() == 0) { LOG(WARNING) << "label set is empty"; } CHECK_EQ(preds.Size(), info.labels_.Size()) << "label and prediction size not match, " << "hint: use merror or mlogloss for multi-class classification"; int device = tparam_->gpu_id; auto result = reducer_.Reduce(*tparam_, device, info.weights_, info.labels_, preds); double dat[2] { result.Residue(), result.Weights() }; if (distributed) { rabit::Allreduce<rabit::op::Sum>(dat, 2); } return Policy::GetFinal(dat[0], dat[1]); } const char* Name() const override { return policy_.Name(); } private: Policy policy_; ElementWiseMetricsReduction<Policy> reducer_{policy_}; }; XGBOOST_REGISTER_METRIC(RMSE, "rmse") .describe("Rooted mean square error.") .set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); }); XGBOOST_REGISTER_METRIC(RMSLE, "rmsle") .describe("Rooted mean square log error.") .set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSLE>(); }); XGBOOST_REGISTER_METRIC(MAE, "mae") .describe("Mean absolute error.") .set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); }); XGBOOST_REGISTER_METRIC(LogLoss, "logloss") .describe("Negative loglikelihood for logistic regression.") .set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); }); XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik") .describe("Negative loglikelihood for poisson regression.") .set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); }); XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance") .describe("Residual deviance for gamma regression.") .set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); }); XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik") .describe("Negative log-likelihood for gamma regression.") .set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); }); XGBOOST_REGISTER_METRIC(Error, "error") .describe("Binary classification error.") .set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); }); XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik") .describe("tweedie-nloglik@rho for tweedie regression.") .set_body([](const char* param) { return new EvalEWiseBase<EvalTweedieNLogLik>(param); }); } // namespace metric } // namespace xgboost
6a9445a201a740a5e5d756763fa3004706839c77.cu
/*! * Copyright 2015-2019 by Contributors * \file elementwise_metric.cc * \brief evaluation metrics for elementwise binary or regression. * \author Kailong Chen, Tianqi Chen * * The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset. */ #include <rabit/rabit.h> #include <xgboost/metric.h> #include <dmlc/registry.h> #include <cmath> #include "metric_common.h" #include "../common/math.h" #include "../common/common.h" #if defined(XGBOOST_USE_CUDA) #include <thrust/execution_policy.h> // thrust::cuda::par #include <thrust/functional.h> // thrust::plus<> #include <thrust/transform_reduce.h> #include <thrust/iterator/counting_iterator.h> #include "../common/device_helpers.cuh" #endif // XGBOOST_USE_CUDA namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(elementwise_metric); template <typename EvalRow> class ElementWiseMetricsReduction { public: explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {} PackedReduceResult CpuReduceMetrics( const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds) const { size_t ndata = labels.Size(); const auto& h_labels = labels.HostVector(); const auto& h_weights = weights.HostVector(); const auto& h_preds = preds.HostVector(); bst_float residue_sum = 0; bst_float weights_sum = 0; #pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static) for (omp_ulong i = 0; i < ndata; ++i) { const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f; residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt; weights_sum += wt; } PackedReduceResult res { residue_sum, weights_sum }; return res; } #if defined(XGBOOST_USE_CUDA) ~ElementWiseMetricsReduction() { if (device_ >= 0) { dh::safe_cuda(cudaSetDevice(device_)); allocator_.Free(); } } PackedReduceResult DeviceReduceMetrics( const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds) { size_t n_data = preds.Size(); thrust::counting_iterator<size_t> begin(0); thrust::counting_iterator<size_t> end = begin + n_data; auto s_label = labels.DeviceSpan(); auto s_preds = preds.DeviceSpan(); auto s_weights = weights.DeviceSpan(); bool const is_null_weight = weights.Size() == 0; auto d_policy = policy_; PackedReduceResult result = thrust::transform_reduce( thrust::cuda::par(allocator_), begin, end, [=] XGBOOST_DEVICE(size_t idx) { bst_float weight = is_null_weight ? 1.0f : s_weights[idx]; bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]); residue *= weight; return PackedReduceResult{ residue, weight }; }, PackedReduceResult(), thrust::plus<PackedReduceResult>()); return result; } #endif // XGBOOST_USE_CUDA PackedReduceResult Reduce( const GenericParameter &tparam, int device, const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds) { PackedReduceResult result; if (device < 0) { result = CpuReduceMetrics(weights, labels, preds); } #if defined(XGBOOST_USE_CUDA) else { // NOLINT device_ = device; preds.SetDevice(device_); labels.SetDevice(device_); weights.SetDevice(device_); dh::safe_cuda(cudaSetDevice(device_)); result = DeviceReduceMetrics(weights, labels, preds); } #endif // defined(XGBOOST_USE_CUDA) return result; } private: EvalRow policy_; #if defined(XGBOOST_USE_CUDA) int device_{-1}; dh::CubMemory allocator_; #endif // defined(XGBOOST_USE_CUDA) }; struct EvalRowRMSE { char const *Name() const { return "rmse"; } XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { bst_float diff = label - pred; return diff * diff; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum); } }; struct EvalRowRMSLE { char const* Name() const { return "rmsle"; } XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { bst_float diff = std::log1p(label) - std::log1p(pred); return diff * diff; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum); } }; struct EvalRowMAE { const char *Name() const { return "mae"; } XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { return std::abs(label - pred); } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } }; struct EvalRowLogLoss { const char *Name() const { return "logloss"; } XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { const bst_float eps = 1e-16f; const bst_float pneg = 1.0f - py; if (py < eps) { return -y * std::log(eps) - (1.0f - y) * std::log(1.0f - eps); } else if (pneg < eps) { return -y * std::log(1.0f - eps) - (1.0f - y) * std::log(eps); } else { return -y * std::log(py) - (1.0f - y) * std::log(pneg); } } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } }; struct EvalError { explicit EvalError(const char* param) { if (param != nullptr) { CHECK_EQ(sscanf(param, "%f", &threshold_), 1) << "unable to parse the threshold value for the error metric"; has_param_ = true; } else { threshold_ = 0.5f; has_param_ = false; } } const char *Name() const { static std::string name; if (has_param_) { std::ostringstream os; os << "error"; if (threshold_ != 0.5f) os << '@' << threshold_; name = os.str(); return name.c_str(); } else { return "error"; } } XGBOOST_DEVICE bst_float EvalRow( bst_float label, bst_float pred) const { // assume label is in [0,1] return pred > threshold_ ? 1.0f - label : label; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } private: bst_float threshold_; bool has_param_; }; struct EvalPoissonNegLogLik { const char *Name() const { return "poisson-nloglik"; } XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { const bst_float eps = 1e-16f; if (py < eps) py = eps; return common::LogGamma(y + 1.0f) + py - std::log(py) * y; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } }; struct EvalGammaDeviance { const char *Name() const { return "gamma-deviance"; } XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { bst_float epsilon = 1.0e-9; bst_float tmp = label / (pred + epsilon); return tmp - std::log(tmp) - 1; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return 2 * esum; } }; struct EvalGammaNLogLik { static const char *Name() { return "gamma-nloglik"; } XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { bst_float psi = 1.0; bst_float theta = -1. / py; bst_float a = psi; bst_float b = -std::log(-theta); bst_float c = 1. / psi * std::log(y/psi) - std::log(y) - common::LogGamma(1. / psi); return -((y * theta - b) / a + c); } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } }; struct EvalTweedieNLogLik { explicit EvalTweedieNLogLik(const char* param) { CHECK(param != nullptr) << "tweedie-nloglik must be in format tweedie-nloglik@rho"; rho_ = atof(param); CHECK(rho_ < 2 && rho_ >= 1) << "tweedie variance power must be in interval [1, 2)"; } const char *Name() const { static std::string name; std::ostringstream os; os << "tweedie-nloglik@" << rho_; name = os.str(); return name.c_str(); } XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const { bst_float a = y * std::exp((1 - rho_) * std::log(p)) / (1 - rho_); bst_float b = std::exp((2 - rho_) * std::log(p)) / (2 - rho_); return -a + b; } static bst_float GetFinal(bst_float esum, bst_float wsum) { return wsum == 0 ? esum : esum / wsum; } protected: bst_float rho_; }; /*! * \brief base class of element-wise evaluation * \tparam Derived the name of subclass */ template<typename Policy> struct EvalEWiseBase : public Metric { EvalEWiseBase() = default; explicit EvalEWiseBase(char const* policy_param) : policy_{policy_param}, reducer_{policy_} {} bst_float Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, bool distributed) override { if (info.labels_.Size() == 0) { LOG(WARNING) << "label set is empty"; } CHECK_EQ(preds.Size(), info.labels_.Size()) << "label and prediction size not match, " << "hint: use merror or mlogloss for multi-class classification"; int device = tparam_->gpu_id; auto result = reducer_.Reduce(*tparam_, device, info.weights_, info.labels_, preds); double dat[2] { result.Residue(), result.Weights() }; if (distributed) { rabit::Allreduce<rabit::op::Sum>(dat, 2); } return Policy::GetFinal(dat[0], dat[1]); } const char* Name() const override { return policy_.Name(); } private: Policy policy_; ElementWiseMetricsReduction<Policy> reducer_{policy_}; }; XGBOOST_REGISTER_METRIC(RMSE, "rmse") .describe("Rooted mean square error.") .set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); }); XGBOOST_REGISTER_METRIC(RMSLE, "rmsle") .describe("Rooted mean square log error.") .set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSLE>(); }); XGBOOST_REGISTER_METRIC(MAE, "mae") .describe("Mean absolute error.") .set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); }); XGBOOST_REGISTER_METRIC(LogLoss, "logloss") .describe("Negative loglikelihood for logistic regression.") .set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); }); XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik") .describe("Negative loglikelihood for poisson regression.") .set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); }); XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance") .describe("Residual deviance for gamma regression.") .set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); }); XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik") .describe("Negative log-likelihood for gamma regression.") .set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); }); XGBOOST_REGISTER_METRIC(Error, "error") .describe("Binary classification error.") .set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); }); XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik") .describe("tweedie-nloglik@rho for tweedie regression.") .set_body([](const char* param) { return new EvalEWiseBase<EvalTweedieNLogLik>(param); }); } // namespace metric } // namespace xgboost
a13e965dd87567a66c7007fcc1cf971f36c1e936.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // Copyright 2017 Antonio Carrasco Valero. All rights reserved. // //////////////////////////////////////////////////////////////////////////// /* memcpystreamsonehostthread.cu * 201709050310 * * Exercise copying memory from host to device and back to PINNED memory on the host, scheduling the kernel in a variable number of blocks. * */ /* Started from Template project which demonstrates the basics on how to setup a project * example application. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes CUDA #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> // includes, project #include "helper_cuda.h" #include "helper_functions.h" // helper functions for SDK examples //////////////////////////////////////////////////////////////////////////////// // declaration, forward void do_memcpytodevtohost(int argc, char **argv); extern "C" void computeGold(float *reference, float *idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// __device__ void memcpytodevtohostKernel_coalescing(int *g_idata, int *g_odata, int theIntsToCopyPerThread) { // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; const unsigned int block = blockIdx.x; const unsigned int blockstart = block * num_threads * theIntsToCopyPerThread; for (unsigned int anIntIdx = 0; anIntIdx < theIntsToCopyPerThread; ++anIntIdx) { unsigned int aDataIndex = 0; aDataIndex = blockstart + ( anIntIdx * num_threads) + tid; int aDataValue = g_idata[ aDataIndex]; g_odata[ aDataIndex] = aDataValue; } } __global__ void memcpytodevtohostKernel(int *g_idata, int *g_odata, int theIntsToCopyPerThread) { memcpytodevtohostKernel_coalescing( g_idata, g_odata, theIntsToCopyPerThread); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { do_memcpytodevtohost(argc, argv); } void cleanOuput( int *theH_odata, int theIntsToCopyPerThread, int theNumTreads) { // initalize the memory for (unsigned int anIntIdx = 0; anIntIdx < theIntsToCopyPerThread; ++anIntIdx) { for (unsigned int aThreadIdx = 0; aThreadIdx < theNumTreads; ++aThreadIdx) { int aDataIndex = ( anIntIdx * theNumTreads) + aThreadIdx; theH_odata[ aDataIndex] = 0; } } } void do_memcpystreamsonehostthread_general(int argc, char **argv, int theIntsToCopyPerThread, int theNumThreads, int theNumBatches, int theBlocksPerPatch, char *theTitle, void (*theInitInputFunct)(int *, int, int), int (*theCheckOutputFunct)( int *, int, int)) { printf("%s Starting...\n\n", argv[0]); // use command-line specified CUDA device, otherwise use device with highest Gflops/s int devID = findCudaDevice(argc, (const char **)argv); unsigned int intstocopy_perthread = theIntsToCopyPerThread; unsigned int num_threads = theNumThreads; unsigned int num_batches = theNumBatches; unsigned int blocks_per_batch = theBlocksPerPatch; unsigned int num_ints = num_batches * blocks_per_batch * num_threads * intstocopy_perthread; unsigned int mem_size = num_ints * sizeof(int); printf("%s\n", theTitle); printf("intstocopy_perthread=%u; num_threads=%u; num_batches=%u; blocks_per_batch=%u; num_ints=%u; mem_size=%u\n", intstocopy_perthread, num_threads, num_batches, blocks_per_batch, num_ints, mem_size); int *h_idata = 0; int *h_odata = 0; // allocate mem for the result on host side checkCudaErrors( hipHostMalloc( &h_idata, num_ints * sizeof( int))); (*theInitInputFunct)( h_idata, intstocopy_perthread, num_threads); // allocate mem for the result on host side checkCudaErrors( hipHostMalloc( &h_odata, num_ints * sizeof( int))); cleanOuput( h_odata, intstocopy_perthread, num_threads); StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkStartTimer(&timer); // allocate device memory int *d_idata; checkCudaErrors(hipMalloc((void **) &d_idata, mem_size)); // allocate device memory for result int *d_odata; checkCudaErrors(hipMalloc((void **) &d_odata, mem_size)); hipStream_t aStream; hipStreamCreate( &aStream); // setup execution parameters dim3 grid( blocks_per_batch, 1, 1); dim3 threads( num_threads, 1, 1); unsigned int aNumIntsPerBatch = blocks_per_batch * num_threads * intstocopy_perthread; unsigned int anIntsPerBatchMemsize = aNumIntsPerBatch * sizeof( int); for( int aBatchIdx=0; aBatchIdx < num_batches; aBatchIdx++) { unsigned int aBatch_idata_Idx = aBatchIdx * aNumIntsPerBatch; int *h_idata_batch = &h_idata[ aBatch_idata_Idx]; int *d_idata_batch = &d_idata[ aBatch_idata_Idx]; checkCudaErrors( hipMemcpyAsync(d_idata_batch, h_idata_batch, anIntsPerBatchMemsize, hipMemcpyHostToDevice, aStream)); hipLaunchKernelGGL(( memcpytodevtohostKernel), dim3(grid), dim3(threads),0, aStream, d_idata, d_odata, intstocopy_perthread); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); int *h_odata_batch = &h_odata[ aBatch_idata_Idx]; int *d_odata_batch = &d_odata[ aBatch_idata_Idx]; // copy result from device to host checkCudaErrors( hipMemcpyAsync(h_odata_batch, d_odata_batch, anIntsPerBatchMemsize, hipMemcpyDeviceToHost, aStream)); } checkCudaErrors( hipStreamSynchronize( aStream)); // checkCudaErrors( hipStreamDestroy( aStream)); sdkStopTimer(&timer); printf("Memory and processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); int aCheckedOk = (*theCheckOutputFunct)( h_odata, intstocopy_perthread, num_threads); // cleanup device memory checkCudaErrors(hipFree(d_idata)); checkCudaErrors(hipFree(d_odata)); StopWatchInterface *timercpu = 0; sdkCreateTimer(&timercpu); sdkStartTimer(&timercpu); /* for( int aHostIdx=0; aHostIdx < num_ints; aHostIdx++) { h_odata[ aHostIdx] = h_idata[ aHostIdx]; } */ int *aHost_iptr = h_idata; int *aHost_optr = h_odata; for( int aHostIdx=0; aHostIdx < num_ints; aHostIdx++) { *aHost_optr = *aHost_iptr; aHost_optr++; aHost_iptr++; } sdkStopTimer(&timercpu); printf("CPU Processing time: %f (ms)\n", sdkGetTimerValue(&timercpu)); // cleanup host memory checkCudaErrors( hipHostFree( h_idata)); checkCudaErrors( hipHostFree( h_odata)); if( aCheckedOk) { printf("%s All output OK\n", theTitle); } else { printf("%s Error. Exiting.\n", theTitle); exit( EXIT_FAILURE); } } // FASTER transfer from device to host /* */ #define NUM_BATCHES 30 #define BLOCKSPERBATCH 10 // #define NUM_BLOCKS ( NUM_BATCHES * BLOCKSPERBATCH) #define NUM_THREADS 4 * 32 #define INTSTOCOPY_PERTHREAD 6976 /* */ // SLOWER transfer from device to host /* #define NUM_BLOCKS 30 #define NUM_THREADS 32 #define INTSTOCOPY_PERTHREAD 279618 */ void initInputFunct_contiguousInThread( int *theH_idata, int theIntsToCopyPerThread, int theNumTreads) { // initalize the memory for (unsigned int anIntIdx = 0; anIntIdx < theIntsToCopyPerThread; ++anIntIdx) { for (unsigned int aThreadIdx = 0; aThreadIdx < theNumTreads; ++aThreadIdx) { int aDataIndex = ( anIntIdx * theNumTreads) + aThreadIdx; int aDataValue = ( aThreadIdx * theIntsToCopyPerThread) + anIntIdx; theH_idata[ aDataIndex] = aDataValue; } } } int checkOutputFunct_contiguousInThread( int *theH_odata, int theIntsToCopyPerThread, int theNumTreads) { for (unsigned int anIntIdx = 0; anIntIdx < theIntsToCopyPerThread; ++anIntIdx) { for (unsigned int aThreadIdx = 0; aThreadIdx < theNumTreads; ++aThreadIdx) { int aDataIndex = ( anIntIdx * theNumTreads) + aThreadIdx; int anExpected = ( aThreadIdx * theIntsToCopyPerThread) + anIntIdx; int anActual = theH_odata[ aDataIndex]; if( !( anActual == anExpected)) { printf("h_odata[ %d] = %d NOT THE EXPECTED %d\n", aDataIndex, anActual, anExpected); return 0; } } } return 1; } void do_do_memcpytstreamsonehostthread(int argc, char **argv) { do_memcpystreamsonehostthread_general(argc, argv, INTSTOCOPY_PERTHREAD, NUM_THREADS, NUM_BATCHES, BLOCKSPERBATCH, "do_do_memcpytstreamsonehostthread", &initInputFunct_contiguousInThread, &checkOutputFunct_contiguousInThread); } void do_memcpytodevtohost(int argc, char **argv) { do_do_memcpytstreamsonehostthread( argc, argv); hipProfilerStop(); exit(EXIT_SUCCESS); }
a13e965dd87567a66c7007fcc1cf971f36c1e936.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 2017 Antonio Carrasco Valero. All rights reserved. // //////////////////////////////////////////////////////////////////////////// /* memcpystreamsonehostthread.cu * 201709050310 * * Exercise copying memory from host to device and back to PINNED memory on the host, scheduling the kernel in a variable number of blocks. * */ /* Started from Template project which demonstrates the basics on how to setup a project * example application. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes CUDA #include <cuda_runtime.h> #include <cuda_profiler_api.h> // includes, project #include "helper_cuda.h" #include "helper_functions.h" // helper functions for SDK examples //////////////////////////////////////////////////////////////////////////////// // declaration, forward void do_memcpytodevtohost(int argc, char **argv); extern "C" void computeGold(float *reference, float *idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// __device__ void memcpytodevtohostKernel_coalescing(int *g_idata, int *g_odata, int theIntsToCopyPerThread) { // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; const unsigned int block = blockIdx.x; const unsigned int blockstart = block * num_threads * theIntsToCopyPerThread; for (unsigned int anIntIdx = 0; anIntIdx < theIntsToCopyPerThread; ++anIntIdx) { unsigned int aDataIndex = 0; aDataIndex = blockstart + ( anIntIdx * num_threads) + tid; int aDataValue = g_idata[ aDataIndex]; g_odata[ aDataIndex] = aDataValue; } } __global__ void memcpytodevtohostKernel(int *g_idata, int *g_odata, int theIntsToCopyPerThread) { memcpytodevtohostKernel_coalescing( g_idata, g_odata, theIntsToCopyPerThread); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { do_memcpytodevtohost(argc, argv); } void cleanOuput( int *theH_odata, int theIntsToCopyPerThread, int theNumTreads) { // initalize the memory for (unsigned int anIntIdx = 0; anIntIdx < theIntsToCopyPerThread; ++anIntIdx) { for (unsigned int aThreadIdx = 0; aThreadIdx < theNumTreads; ++aThreadIdx) { int aDataIndex = ( anIntIdx * theNumTreads) + aThreadIdx; theH_odata[ aDataIndex] = 0; } } } void do_memcpystreamsonehostthread_general(int argc, char **argv, int theIntsToCopyPerThread, int theNumThreads, int theNumBatches, int theBlocksPerPatch, char *theTitle, void (*theInitInputFunct)(int *, int, int), int (*theCheckOutputFunct)( int *, int, int)) { printf("%s Starting...\n\n", argv[0]); // use command-line specified CUDA device, otherwise use device with highest Gflops/s int devID = findCudaDevice(argc, (const char **)argv); unsigned int intstocopy_perthread = theIntsToCopyPerThread; unsigned int num_threads = theNumThreads; unsigned int num_batches = theNumBatches; unsigned int blocks_per_batch = theBlocksPerPatch; unsigned int num_ints = num_batches * blocks_per_batch * num_threads * intstocopy_perthread; unsigned int mem_size = num_ints * sizeof(int); printf("%s\n", theTitle); printf("intstocopy_perthread=%u; num_threads=%u; num_batches=%u; blocks_per_batch=%u; num_ints=%u; mem_size=%u\n", intstocopy_perthread, num_threads, num_batches, blocks_per_batch, num_ints, mem_size); int *h_idata = 0; int *h_odata = 0; // allocate mem for the result on host side checkCudaErrors( cudaMallocHost( &h_idata, num_ints * sizeof( int))); (*theInitInputFunct)( h_idata, intstocopy_perthread, num_threads); // allocate mem for the result on host side checkCudaErrors( cudaMallocHost( &h_odata, num_ints * sizeof( int))); cleanOuput( h_odata, intstocopy_perthread, num_threads); StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkStartTimer(&timer); // allocate device memory int *d_idata; checkCudaErrors(cudaMalloc((void **) &d_idata, mem_size)); // allocate device memory for result int *d_odata; checkCudaErrors(cudaMalloc((void **) &d_odata, mem_size)); cudaStream_t aStream; cudaStreamCreate( &aStream); // setup execution parameters dim3 grid( blocks_per_batch, 1, 1); dim3 threads( num_threads, 1, 1); unsigned int aNumIntsPerBatch = blocks_per_batch * num_threads * intstocopy_perthread; unsigned int anIntsPerBatchMemsize = aNumIntsPerBatch * sizeof( int); for( int aBatchIdx=0; aBatchIdx < num_batches; aBatchIdx++) { unsigned int aBatch_idata_Idx = aBatchIdx * aNumIntsPerBatch; int *h_idata_batch = &h_idata[ aBatch_idata_Idx]; int *d_idata_batch = &d_idata[ aBatch_idata_Idx]; checkCudaErrors( cudaMemcpyAsync(d_idata_batch, h_idata_batch, anIntsPerBatchMemsize, cudaMemcpyHostToDevice, aStream)); memcpytodevtohostKernel<<< grid, threads,0, aStream>>>(d_idata, d_odata, intstocopy_perthread); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); int *h_odata_batch = &h_odata[ aBatch_idata_Idx]; int *d_odata_batch = &d_odata[ aBatch_idata_Idx]; // copy result from device to host checkCudaErrors( cudaMemcpyAsync(h_odata_batch, d_odata_batch, anIntsPerBatchMemsize, cudaMemcpyDeviceToHost, aStream)); } checkCudaErrors( cudaStreamSynchronize( aStream)); // checkCudaErrors( cudaStreamDestroy( aStream)); sdkStopTimer(&timer); printf("Memory and processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); int aCheckedOk = (*theCheckOutputFunct)( h_odata, intstocopy_perthread, num_threads); // cleanup device memory checkCudaErrors(cudaFree(d_idata)); checkCudaErrors(cudaFree(d_odata)); StopWatchInterface *timercpu = 0; sdkCreateTimer(&timercpu); sdkStartTimer(&timercpu); /* for( int aHostIdx=0; aHostIdx < num_ints; aHostIdx++) { h_odata[ aHostIdx] = h_idata[ aHostIdx]; } */ int *aHost_iptr = h_idata; int *aHost_optr = h_odata; for( int aHostIdx=0; aHostIdx < num_ints; aHostIdx++) { *aHost_optr = *aHost_iptr; aHost_optr++; aHost_iptr++; } sdkStopTimer(&timercpu); printf("CPU Processing time: %f (ms)\n", sdkGetTimerValue(&timercpu)); // cleanup host memory checkCudaErrors( cudaFreeHost( h_idata)); checkCudaErrors( cudaFreeHost( h_odata)); if( aCheckedOk) { printf("%s All output OK\n", theTitle); } else { printf("%s Error. Exiting.\n", theTitle); exit( EXIT_FAILURE); } } // FASTER transfer from device to host /* */ #define NUM_BATCHES 30 #define BLOCKSPERBATCH 10 // #define NUM_BLOCKS ( NUM_BATCHES * BLOCKSPERBATCH) #define NUM_THREADS 4 * 32 #define INTSTOCOPY_PERTHREAD 6976 /* */ // SLOWER transfer from device to host /* #define NUM_BLOCKS 30 #define NUM_THREADS 32 #define INTSTOCOPY_PERTHREAD 279618 */ void initInputFunct_contiguousInThread( int *theH_idata, int theIntsToCopyPerThread, int theNumTreads) { // initalize the memory for (unsigned int anIntIdx = 0; anIntIdx < theIntsToCopyPerThread; ++anIntIdx) { for (unsigned int aThreadIdx = 0; aThreadIdx < theNumTreads; ++aThreadIdx) { int aDataIndex = ( anIntIdx * theNumTreads) + aThreadIdx; int aDataValue = ( aThreadIdx * theIntsToCopyPerThread) + anIntIdx; theH_idata[ aDataIndex] = aDataValue; } } } int checkOutputFunct_contiguousInThread( int *theH_odata, int theIntsToCopyPerThread, int theNumTreads) { for (unsigned int anIntIdx = 0; anIntIdx < theIntsToCopyPerThread; ++anIntIdx) { for (unsigned int aThreadIdx = 0; aThreadIdx < theNumTreads; ++aThreadIdx) { int aDataIndex = ( anIntIdx * theNumTreads) + aThreadIdx; int anExpected = ( aThreadIdx * theIntsToCopyPerThread) + anIntIdx; int anActual = theH_odata[ aDataIndex]; if( !( anActual == anExpected)) { printf("h_odata[ %d] = %d NOT THE EXPECTED %d\n", aDataIndex, anActual, anExpected); return 0; } } } return 1; } void do_do_memcpytstreamsonehostthread(int argc, char **argv) { do_memcpystreamsonehostthread_general(argc, argv, INTSTOCOPY_PERTHREAD, NUM_THREADS, NUM_BATCHES, BLOCKSPERBATCH, "do_do_memcpytstreamsonehostthread", &initInputFunct_contiguousInThread, &checkOutputFunct_contiguousInThread); } void do_memcpytodevtohost(int argc, char **argv) { do_do_memcpytstreamsonehostthread( argc, argv); cudaProfilerStop(); exit(EXIT_SUCCESS); }
8b01b071624614362087fe8a20b5a22db80ff146.hip
// !!! This is a file automatically generated by hipify!!! /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <assert.h> // includes CUDA Runtime #include <hip/hip_runtime.h> /** * Convenience function for checking CUDA runtime API results * can be wrapped around any runtime API call. No-op in release builds. */ inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } __global__ void addVector(int *a, int *b, int *c, unsigned int n) { int thread_idxx = blockIdx.x * blockDim.x + threadIdx.x; if (thread_idxx < n) { c[thread_idxx] = a[thread_idxx] + b[thread_idxx]; } } void printVector(int *v, unsigned int n) { for (int i = 0; i < n; i++) { if (i + 1 < n) { printf("%d, ", v[i]); } else { printf("%d", v[i]); } } printf("\n"); } int main(int argc, char *argv[]) { unsigned int N = 32; // size of vectors const unsigned int size = N * sizeof(int); int T = 32, B = 1; // threads per block and blocks per grid int *a, *b, *c; // host pointers int *dev_a, *dev_b, *dev_c; // device pointers to host memory // runtime must be placed into a state enabling to allocate zero-copy buffers. checkCuda(hipSetDeviceFlags(hipDeviceMapHost)); checkCuda( hipHostMalloc((void**) &a, size, hipHostMallocWriteCombined | hipHostMallocMapped)); checkCuda( hipHostMalloc((void**) &b, size, hipHostMallocWriteCombined | hipHostMallocMapped)); checkCuda(hipHostMalloc((void**) &c, size, hipHostMallocMapped)); // init vectors for (int i = 0; i < N; i++) { a[i] = i; b[i] = i; } printf("Vector A: \n"); printVector(a, N); printf("Vector B: \n"); printVector(b, N); // mem. copy to device not need now, but ptrs needed instead checkCuda(hipHostGetDevicePointer(&dev_a, a, 0)); checkCuda(hipHostGetDevicePointer(&dev_b, b, 0)); checkCuda(hipHostGetDevicePointer(&dev_c, c, 0)); // to measure time hipEvent_t start, stop; float elapsed_time_ms; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( addVector), dim3(B), dim3(T), 0, 0, dev_a, dev_b, dev_c, N); // copy back not needed but now need thread synchronization hipDeviceSynchronize(); hipEventRecord(stop, 0); // print results printf("Vector C: \n"); printVector(c, N); hipEventElapsedTime(&elapsed_time_ms, start, stop); // print out execution time printf("Time to calculate results: %.2f ms.\n", elapsed_time_ms); // clean up hipHostFree(a); hipHostFree(b); hipHostFree(c); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
8b01b071624614362087fe8a20b5a22db80ff146.cu
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <assert.h> // includes CUDA Runtime #include <cuda_runtime.h> /** * Convenience function for checking CUDA runtime API results * can be wrapped around any runtime API call. No-op in release builds. */ inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } __global__ void addVector(int *a, int *b, int *c, unsigned int n) { int thread_idxx = blockIdx.x * blockDim.x + threadIdx.x; if (thread_idxx < n) { c[thread_idxx] = a[thread_idxx] + b[thread_idxx]; } } void printVector(int *v, unsigned int n) { for (int i = 0; i < n; i++) { if (i + 1 < n) { printf("%d, ", v[i]); } else { printf("%d", v[i]); } } printf("\n"); } int main(int argc, char *argv[]) { unsigned int N = 32; // size of vectors const unsigned int size = N * sizeof(int); int T = 32, B = 1; // threads per block and blocks per grid int *a, *b, *c; // host pointers int *dev_a, *dev_b, *dev_c; // device pointers to host memory // runtime must be placed into a state enabling to allocate zero-copy buffers. checkCuda(cudaSetDeviceFlags(cudaDeviceMapHost)); checkCuda( cudaHostAlloc((void**) &a, size, cudaHostAllocWriteCombined | cudaHostAllocMapped)); checkCuda( cudaHostAlloc((void**) &b, size, cudaHostAllocWriteCombined | cudaHostAllocMapped)); checkCuda(cudaHostAlloc((void**) &c, size, cudaHostAllocMapped)); // init vectors for (int i = 0; i < N; i++) { a[i] = i; b[i] = i; } printf("Vector A: \n"); printVector(a, N); printf("Vector B: \n"); printVector(b, N); // mem. copy to device not need now, but ptrs needed instead checkCuda(cudaHostGetDevicePointer(&dev_a, a, 0)); checkCuda(cudaHostGetDevicePointer(&dev_b, b, 0)); checkCuda(cudaHostGetDevicePointer(&dev_c, c, 0)); // to measure time cudaEvent_t start, stop; float elapsed_time_ms; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); addVector<<<B, T>>>(dev_a, dev_b, dev_c, N); // copy back not needed but now need thread synchronization cudaThreadSynchronize(); cudaEventRecord(stop, 0); // print results printf("Vector C: \n"); printVector(c, N); cudaEventElapsedTime(&elapsed_time_ms, start, stop); // print out execution time printf("Time to calculate results: %.2f ms.\n", elapsed_time_ms); // clean up cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
16dfd4611cd133eb5b5ffa81020a3b764172e02e.hip
// !!! This is a file automatically generated by hipify!!! #ifndef CU_OCCULTQUAD_CWRAPPER #define CU_OCCULTQUAD_CWRAPPER #include "exofast_cuda_util.cuh" #include "occultquad.cu" // occultquad_wrapper_c: // C wrapper function // inputs: // z: // u1: // u2: // p: // num_data: integer size of input z array // num_model: integer size of input model parameter arrays // ph_muo1: pointer to beginning element of array of doubles // ph_mu1: pointer to beginning element of array of doubles // outputs: // ph_muo1: values overwritten with model flux for quadratic lim darkening law // ph_mu1: values overwritten with model flux for uniform limb darkening law // assumptions: // z array has at least num_data elements // ph_muo1 and ph_mu1 arrays have at least num_data*num_model elements // other arrays have at least num_param elements __host__ void occultquad_wrapper_c(const double *ph_z, const double *ph_u1, const double *ph_u2, const double *ph_p, const int num_data, const int num_model, double *ph_muo1, double *ph_mu1) { int gpuid = ebf::init_cuda(); // put vectors in thrust format from raw points int num = num_data*num_model; thrust::host_vector<double> h_z(ph_z,ph_z+num); thrust::host_vector<double> h_u1(ph_u1,ph_u1+num_model); thrust::host_vector<double> h_u2(ph_u2,ph_u2+num_model); thrust::host_vector<double> h_p(ph_p,ph_p+num_model); thrust::counting_iterator<int> index_begin(0); thrust::counting_iterator<int> index_end(num); if(gpuid>=0) { // allocate mem on GPU thrust::device_vector<double> d_z(num); thrust::device_vector<double> d_u1(num_model); thrust::device_vector<double> d_u2(num_model); thrust::device_vector<double> d_p(num_model); thrust::device_vector<double> d_muo1(num); thrust::device_vector<double> d_mu1(num); // transfer input params to GPU // start_timer_upload(); d_z = h_z; d_u1 = h_u1; d_u2 = h_u2; d_p = h_p; // stop_timer_upload(); // distribute the computation to the GPU hipDeviceSynchronize(); // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( d_z.begin(), thrust::make_permutation_iterator(d_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( d_z.end(), thrust::make_permutation_iterator(d_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple(d_muo1.begin(), d_mu1.begin() )), occultquad_functor() ); hipDeviceSynchronize(); // stop_timer_kernel(); // transfer results back to host // start_timer_download(); thrust::copy(d_muo1.begin(),d_muo1.end(),ph_muo1); thrust::copy(d_mu1.begin(), d_mu1.end(), ph_mu1); // stop_timer_download(); } else { // distribute the computation to the CPU // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( h_z.begin(), thrust::make_permutation_iterator(h_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( h_z.end(), thrust::make_permutation_iterator(h_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple(ph_muo1, ph_mu1)), occultquad_functor() ); // stop_timer_kernel(); } } // occultquad_only_wrapper_c: // C wrapper function // inputs: // z: // u1: // u2: // p: // num_data: integer size of input z array // num_model: integer size of input model parameter arrays // ph_muo1: pointer to beginning element of array of doubles // ph_mu1: pointer to beginning element of array of doubles // outputs: // ph_muo1: values overwritten with model flux for quadratic lim darkening law // assumptions: // z array has at least num_data elements // ph_muo1 array has at least num_data*num_model elements // other arrays have at least num_param elements __host__ void occultquad_only_wrapper_c(const double *ph_z, const double *ph_u1, const double *ph_u2, const double *ph_p, const int num_data, const int num_model, double *ph_muo1) { int gpuid = ebf::init_cuda(); // put vectors in thrust format from raw points int num = num_data*num_model; thrust::host_vector<double> h_z(ph_z,ph_z+num); thrust::host_vector<double> h_u1(ph_u1,ph_u1+num_model); thrust::host_vector<double> h_u2(ph_u2,ph_u2+num_model); thrust::host_vector<double> h_p(ph_p,ph_p+num_model); thrust::counting_iterator<int> index_begin(0); thrust::counting_iterator<int> index_end(num); if(gpuid>=0) { // allocate mem on GPU thrust::device_vector<double> d_z(num); thrust::device_vector<double> d_u1(num_model); thrust::device_vector<double> d_u2(num_model); thrust::device_vector<double> d_p(num_model); thrust::device_vector<double> d_muo1(num); // transfer input params to GPU // start_timer_upload(); d_z = h_z; d_u1 = h_u1; d_u2 = h_u2; d_p = h_p; // stop_timer_upload(); // distribute the computation to the GPU hipDeviceSynchronize(); // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( d_z.begin(), thrust::make_permutation_iterator(d_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( d_z.end(), thrust::make_permutation_iterator(d_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), d_muo1.begin(), occultquad_only_functor() ); hipDeviceSynchronize(); // stop_timer_kernel(); // transfer results back to host // start_timer_download(); thrust::copy(d_muo1.begin(),d_muo1.end(),ph_muo1); // stop_timer_download(); } else { // distribute the computation to the CPU // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( h_z.begin(), thrust::make_permutation_iterator(h_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( h_z.end(), thrust::make_permutation_iterator(h_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), ph_muo1, occultquad_only_functor() ); // stop_timer_kernel(); } } // occult_uniform_wrapper_c: // C wrapper function // inputs: // z: // u1: // u2: // p: // num_data: integer size of input z array // num_model: integer size of input model parameter arrays // ph_mu1: pointer to beginning element of array of doubles // outputs: // ph_mu1: values overwritten with model flux for uniform limb darkening law // assumptions: // z array has at least num_data elements // ph_mu1 array has at least num_data*num_model elements // other arrays have at least num_param elements __host__ void occult_uniform_wrapper_c(const double *ph_z, const double *ph_u1, const double *ph_u2, const double *ph_p, const int num_data, const int num_model, double *ph_mu1) { int gpuid = ebf::init_cuda(); // put vectors in thrust format from raw points int num = num_data*num_model; thrust::host_vector<double> h_z(ph_z,ph_z+num); thrust::host_vector<double> h_u1(ph_u1,ph_u1+num_model); thrust::host_vector<double> h_u2(ph_u2,ph_u2+num_model); thrust::host_vector<double> h_p(ph_p,ph_p+num_model); thrust::counting_iterator<int> index_begin(0); thrust::counting_iterator<int> index_end(num); if(gpuid>=0) { // allocate mem on GPU thrust::device_vector<double> d_z(num); thrust::device_vector<double> d_u1(num_model); thrust::device_vector<double> d_u2(num_model); thrust::device_vector<double> d_p(num_model); thrust::device_vector<double> d_mu1(num); // transfer input params to GPU // start_timer_upload(); d_z = h_z; d_u1 = h_u1; d_u2 = h_u2; d_p = h_p; // stop_timer_upload(); // distribute the computation to the GPU hipDeviceSynchronize(); // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( d_z.begin(), thrust::make_permutation_iterator(d_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( d_z.end(), thrust::make_permutation_iterator(d_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), d_mu1.begin(), occult_uniform_slow_functor() ); hipDeviceSynchronize(); // stop_timer_kernel(); // transfer results back to host // start_timer_download(); thrust::copy(d_mu1.begin(), d_mu1.end(), ph_mu1); // stop_timer_download(); } else { // distribute the computation to the CPU // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( h_z.begin(), thrust::make_permutation_iterator(h_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( h_z.end(), thrust::make_permutation_iterator(h_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), ph_mu1, occult_uniform_slow_functor() ); // stop_timer_kernel(); } } #endif
16dfd4611cd133eb5b5ffa81020a3b764172e02e.cu
#ifndef CU_OCCULTQUAD_CWRAPPER #define CU_OCCULTQUAD_CWRAPPER #include "exofast_cuda_util.cuh" #include "occultquad.cu" // occultquad_wrapper_c: // C wrapper function // inputs: // z: // u1: // u2: // p: // num_data: integer size of input z array // num_model: integer size of input model parameter arrays // ph_muo1: pointer to beginning element of array of doubles // ph_mu1: pointer to beginning element of array of doubles // outputs: // ph_muo1: values overwritten with model flux for quadratic lim darkening law // ph_mu1: values overwritten with model flux for uniform limb darkening law // assumptions: // z array has at least num_data elements // ph_muo1 and ph_mu1 arrays have at least num_data*num_model elements // other arrays have at least num_param elements __host__ void occultquad_wrapper_c(const double *ph_z, const double *ph_u1, const double *ph_u2, const double *ph_p, const int num_data, const int num_model, double *ph_muo1, double *ph_mu1) { int gpuid = ebf::init_cuda(); // put vectors in thrust format from raw points int num = num_data*num_model; thrust::host_vector<double> h_z(ph_z,ph_z+num); thrust::host_vector<double> h_u1(ph_u1,ph_u1+num_model); thrust::host_vector<double> h_u2(ph_u2,ph_u2+num_model); thrust::host_vector<double> h_p(ph_p,ph_p+num_model); thrust::counting_iterator<int> index_begin(0); thrust::counting_iterator<int> index_end(num); if(gpuid>=0) { // allocate mem on GPU thrust::device_vector<double> d_z(num); thrust::device_vector<double> d_u1(num_model); thrust::device_vector<double> d_u2(num_model); thrust::device_vector<double> d_p(num_model); thrust::device_vector<double> d_muo1(num); thrust::device_vector<double> d_mu1(num); // transfer input params to GPU // start_timer_upload(); d_z = h_z; d_u1 = h_u1; d_u2 = h_u2; d_p = h_p; // stop_timer_upload(); // distribute the computation to the GPU cudaThreadSynchronize(); // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( d_z.begin(), thrust::make_permutation_iterator(d_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( d_z.end(), thrust::make_permutation_iterator(d_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple(d_muo1.begin(), d_mu1.begin() )), occultquad_functor() ); cudaThreadSynchronize(); // stop_timer_kernel(); // transfer results back to host // start_timer_download(); thrust::copy(d_muo1.begin(),d_muo1.end(),ph_muo1); thrust::copy(d_mu1.begin(), d_mu1.end(), ph_mu1); // stop_timer_download(); } else { // distribute the computation to the CPU // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( h_z.begin(), thrust::make_permutation_iterator(h_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( h_z.end(), thrust::make_permutation_iterator(h_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple(ph_muo1, ph_mu1)), occultquad_functor() ); // stop_timer_kernel(); } } // occultquad_only_wrapper_c: // C wrapper function // inputs: // z: // u1: // u2: // p: // num_data: integer size of input z array // num_model: integer size of input model parameter arrays // ph_muo1: pointer to beginning element of array of doubles // ph_mu1: pointer to beginning element of array of doubles // outputs: // ph_muo1: values overwritten with model flux for quadratic lim darkening law // assumptions: // z array has at least num_data elements // ph_muo1 array has at least num_data*num_model elements // other arrays have at least num_param elements __host__ void occultquad_only_wrapper_c(const double *ph_z, const double *ph_u1, const double *ph_u2, const double *ph_p, const int num_data, const int num_model, double *ph_muo1) { int gpuid = ebf::init_cuda(); // put vectors in thrust format from raw points int num = num_data*num_model; thrust::host_vector<double> h_z(ph_z,ph_z+num); thrust::host_vector<double> h_u1(ph_u1,ph_u1+num_model); thrust::host_vector<double> h_u2(ph_u2,ph_u2+num_model); thrust::host_vector<double> h_p(ph_p,ph_p+num_model); thrust::counting_iterator<int> index_begin(0); thrust::counting_iterator<int> index_end(num); if(gpuid>=0) { // allocate mem on GPU thrust::device_vector<double> d_z(num); thrust::device_vector<double> d_u1(num_model); thrust::device_vector<double> d_u2(num_model); thrust::device_vector<double> d_p(num_model); thrust::device_vector<double> d_muo1(num); // transfer input params to GPU // start_timer_upload(); d_z = h_z; d_u1 = h_u1; d_u2 = h_u2; d_p = h_p; // stop_timer_upload(); // distribute the computation to the GPU cudaThreadSynchronize(); // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( d_z.begin(), thrust::make_permutation_iterator(d_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( d_z.end(), thrust::make_permutation_iterator(d_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), d_muo1.begin(), occultquad_only_functor() ); cudaThreadSynchronize(); // stop_timer_kernel(); // transfer results back to host // start_timer_download(); thrust::copy(d_muo1.begin(),d_muo1.end(),ph_muo1); // stop_timer_download(); } else { // distribute the computation to the CPU // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( h_z.begin(), thrust::make_permutation_iterator(h_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( h_z.end(), thrust::make_permutation_iterator(h_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), ph_muo1, occultquad_only_functor() ); // stop_timer_kernel(); } } // occult_uniform_wrapper_c: // C wrapper function // inputs: // z: // u1: // u2: // p: // num_data: integer size of input z array // num_model: integer size of input model parameter arrays // ph_mu1: pointer to beginning element of array of doubles // outputs: // ph_mu1: values overwritten with model flux for uniform limb darkening law // assumptions: // z array has at least num_data elements // ph_mu1 array has at least num_data*num_model elements // other arrays have at least num_param elements __host__ void occult_uniform_wrapper_c(const double *ph_z, const double *ph_u1, const double *ph_u2, const double *ph_p, const int num_data, const int num_model, double *ph_mu1) { int gpuid = ebf::init_cuda(); // put vectors in thrust format from raw points int num = num_data*num_model; thrust::host_vector<double> h_z(ph_z,ph_z+num); thrust::host_vector<double> h_u1(ph_u1,ph_u1+num_model); thrust::host_vector<double> h_u2(ph_u2,ph_u2+num_model); thrust::host_vector<double> h_p(ph_p,ph_p+num_model); thrust::counting_iterator<int> index_begin(0); thrust::counting_iterator<int> index_end(num); if(gpuid>=0) { // allocate mem on GPU thrust::device_vector<double> d_z(num); thrust::device_vector<double> d_u1(num_model); thrust::device_vector<double> d_u2(num_model); thrust::device_vector<double> d_p(num_model); thrust::device_vector<double> d_mu1(num); // transfer input params to GPU // start_timer_upload(); d_z = h_z; d_u1 = h_u1; d_u2 = h_u2; d_p = h_p; // stop_timer_upload(); // distribute the computation to the GPU cudaThreadSynchronize(); // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( d_z.begin(), thrust::make_permutation_iterator(d_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( d_z.end(), thrust::make_permutation_iterator(d_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(d_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), d_mu1.begin(), occult_uniform_slow_functor() ); cudaThreadSynchronize(); // stop_timer_kernel(); // transfer results back to host // start_timer_download(); thrust::copy(d_mu1.begin(), d_mu1.end(), ph_mu1); // stop_timer_download(); } else { // distribute the computation to the CPU // start_timer_kernel(); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( h_z.begin(), thrust::make_permutation_iterator(h_u1.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.begin(),thrust::make_transform_iterator(index_begin,inverse_stride_functor(num_data))) )), thrust::make_zip_iterator(thrust::make_tuple( h_z.end(), thrust::make_permutation_iterator(h_u1.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_u2.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))), thrust::make_permutation_iterator(h_p.end(),thrust::make_transform_iterator(index_end,inverse_stride_functor(num_data))) )), ph_mu1, occult_uniform_slow_functor() ); // stop_timer_kernel(); } } #endif
0ba5fe48b23f1f3d6bc1716120176e21cc9167aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <chrono> #include <ctime> #include "kernels.h" /** @brief: lookup_scale_pos_dropout forward of embedding layer in fairseq, including lookup table, scale, add position embedding and dropout. @thread gridDim.x = batch_size gridDim.y = threads_per_seq blockDim.x = tokens_per_thread blockDim.y = min(embedding_dim, MAX_THREADS) @param input: [batch_size, seq_len] output: [batch_size, seq_len, embedding_dim] embeddings: [vocab_size, embedding_dim] pos_embeddings: [max_seq_len, embedding_dim] dropout_mask: [batch_size, seq_len, embedding_dim] batch_size: the size of the current batch seq_len: the sequence length of the current batch embedding_dim: dim of the embeddings padding_idx: padding index of the sentences (default: 2) step: only used to calculate correct position in inference (default: 0 in training and valid) */ template <typename T> __global__ void lookup_scale_pos_dropout( T *output, const int *input, const T *embeddings, const T *pos_embeddings, uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale, int step, int seed); template <> __global__ void lookup_scale_pos_dropout<float>( float *output, const int *input, const float *embeddings, const float *pos_embeddings, uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale, int step, int seed) { int batch_id = blockIdx.x; int seq_id = blockIdx.y * blockDim.x + threadIdx.x; if (seq_id >= seq_len) return; int target_pos = batch_id * seq_len + seq_id; int start = target_pos * embedding_dim + threadIdx.y; int end = (target_pos + 1) * embedding_dim; int tid = input[target_pos]; float4 *output4 = reinterpret_cast<float4 *>(output); const float4 *embeddings4 = reinterpret_cast<const float4 *>(embeddings); const float4 *pos_embeddings4 = reinterpret_cast<const float4 *>(pos_embeddings); uint32_t *dropout_mask4 = reinterpret_cast<uint32_t *>(dropout_mask); // no need to calculate dropout_mask if (tid == padding_idx) { float4 zero4; zero4.x = zero4.y = zero4.z = zero4.w = 0.f; for (uint i = start; i < end; i += blockDim.y) { output4[i] = zero4; } return; } const float scale = 1.f / (1.f - dropout_ratio); hiprandStatePhilox4_32_10_t state; for (uint i = start; i < end; i += blockDim.y) { hiprand_init(seed, i, 0, &state); float4 rand4 = hiprand_uniform4(&state); uint8_t m[4]; m[0] = (uint8_t)(rand4.x > dropout_ratio); m[1] = (uint8_t)(rand4.y > dropout_ratio); m[2] = (uint8_t)(rand4.z > dropout_ratio); m[3] = (uint8_t)(rand4.w > dropout_ratio); uint32_t *m4 = reinterpret_cast<uint32_t *>(m); dropout_mask4[i] = m4[0]; int offset = i - target_pos * embedding_dim; float4 e4 = embeddings4[tid * embedding_dim + offset]; // step is non-zero only in inference float4 pe4 = pos_embeddings4[(seq_id + step) * embedding_dim + offset]; float4 res4; res4.x = (emb_scale * e4.x + pe4.x) * scale * m[0]; res4.y = (emb_scale * e4.y + pe4.y) * scale * m[1]; res4.z = (emb_scale * e4.z + pe4.z) * scale * m[2]; res4.w = (emb_scale * e4.w + pe4.w) * scale * m[3]; output4[i] = res4; } } template <> __global__ void lookup_scale_pos_dropout<__half>( __half *output, const int *input, const __half *embeddings, const __half *pos_embeddings, uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale, int step, int seed) { int batch_id = blockIdx.x; int seq_id = blockIdx.y * blockDim.x + threadIdx.x; if (seq_id >= seq_len) return; int target_pos = batch_id * seq_len + seq_id; int start = target_pos * embedding_dim + threadIdx.y; int end = (target_pos + 1) * embedding_dim; int tid = input[target_pos]; float4 *output4 = reinterpret_cast<float4 *>(output); const float4 *embeddings4 = reinterpret_cast<const float4 *>(embeddings); const float4 *pos_embeddings4 = reinterpret_cast<const float4 *>(pos_embeddings); uint64_t *dropout_mask8 = reinterpret_cast<uint64_t *>(dropout_mask); // no need to calculate dropout_mask if (tid == padding_idx) { float4 zero4; zero4.x = zero4.y = zero4.z = zero4.w = 0.f; for (uint i = start; i < end; i += blockDim.y) { output4[i] = zero4; } return; } const float scale = 1.f / (1.f - dropout_ratio); hiprandStatePhilox4_32_10_t state; for (uint i = start; i < end; i += blockDim.y) { hiprand_init(seed, i, 0, &state); float4 rand4 = hiprand_uniform4(&state); uint8_t m[8]; m[0] = (uint8_t)(rand4.x > dropout_ratio); m[1] = (uint8_t)(rand4.y > dropout_ratio); m[2] = (uint8_t)(rand4.z > dropout_ratio); m[3] = (uint8_t)(rand4.w > dropout_ratio); rand4 = hiprand_uniform4(&state); m[4] = (uint8_t)(rand4.x > dropout_ratio); m[5] = (uint8_t)(rand4.y > dropout_ratio); m[6] = (uint8_t)(rand4.z > dropout_ratio); m[7] = (uint8_t)(rand4.w > dropout_ratio); uint64_t *m8 = reinterpret_cast<uint64_t *>(m); dropout_mask8[i] = m8[0]; int offset = i - target_pos * embedding_dim; float4 e4 = embeddings4[tid * embedding_dim + offset]; // step is non-zero only in inference float4 pe4 = pos_embeddings4[(seq_id + step) * embedding_dim + offset]; float4 res4; __half2 *e_h2 = reinterpret_cast<__half2 *>(&e4); __half2 *pe_h2 = reinterpret_cast<__half2 *>(&pe4); __half2 *res_h2 = reinterpret_cast<__half2 *>(&res4); __half2 scale_mask_h2[4]; #pragma unroll for (uint j = 0; j < 4; ++j) { scale_mask_h2[j] = __floats2half2_rn(scale * m[j << 1], scale * m[(j << 1) | 1]); } __half2 emb_scale_h2 = __floats2half2_rn(emb_scale, emb_scale); #pragma unroll for (uint j = 0; j < 4; ++j) { res_h2[j] = __hmul2(e_h2[j], emb_scale_h2); res_h2[j] = __hadd2(res_h2[j], pe_h2[j]); res_h2[j] = __hmul2(res_h2[j], scale_mask_h2[j]); } output4[i] = res4; } } template <> void launch_lookup_scale_pos_dropout<float>( float *output, const int *input, const float *embeddings, const float *pos_embeddings, uint8_t *dropout_mask, int batch_size, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, int step, hipStream_t &stream) { float emb_scale = sqrt(embedding_dim); embedding_dim >>= 2; int tokens_per_thread = (MAX_THREADS + embedding_dim - 1) / embedding_dim; int threads_per_seq = (seq_len + tokens_per_thread - 1) / tokens_per_thread; dim3 grid_dim(batch_size, threads_per_seq); dim3 block_dim(tokens_per_thread, min(embedding_dim, MAX_THREADS)); int seed = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(); hipLaunchKernelGGL(( lookup_scale_pos_dropout<float>), dim3(grid_dim), dim3(block_dim), 0, stream, output, input, embeddings, pos_embeddings, dropout_mask, seq_len, embedding_dim, padding_idx, dropout_ratio, emb_scale, step, seed); } template <> void launch_lookup_scale_pos_dropout<__half>( __half *output, const int *input, const __half *embeddings, const __half *pos_embeddings, uint8_t *dropout_mask, int batch_size, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, int step, hipStream_t &stream) { float emb_scale = sqrt(embedding_dim); embedding_dim >>= 3; int tokens_per_thread = (MAX_THREADS + embedding_dim - 1) / embedding_dim; int threads_per_seq = (seq_len + tokens_per_thread - 1) / tokens_per_thread; dim3 grid_dim(batch_size, threads_per_seq); dim3 block_dim(tokens_per_thread, min(embedding_dim, MAX_THREADS)); int seed = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(); hipLaunchKernelGGL(( lookup_scale_pos_dropout<__half>), dim3(grid_dim), dim3(block_dim), 0, stream, output, input, embeddings, pos_embeddings, dropout_mask, seq_len, embedding_dim, padding_idx, dropout_ratio, emb_scale, step, seed); } /** @brief: d_lookup_scale_pos_dropout backward of embedding layer in fairseq. @thread gridDim.x = batch_size gridDim.y = threads_per_seq blockDim.x = tokens_per_thread blockDim.y = min(embedding_dim, MAX_THREADS) @param input: [batch_size, seq_len] grad_output: [batch_size, seq_len, embedding_dim] dropout_mask: [batch_size, seq_len, embedding_dim] batch_size: the size of the current batch seq_len: the sequence length of the current batch embedding_dim: dim of the embeddings padding_idx: padding index of the sentences (default: 2) */ template <typename T> __global__ void zero_grads(T *grad_embeddings, int total_nums) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= total_nums) return; float4 *grad_embeddings4 = reinterpret_cast<float4 *>(grad_embeddings); float4 zero4; zero4.x = zero4.y = zero4.z = zero4.w = 0.f; grad_embeddings4[idx] = zero4; } template <typename T> __global__ void d_lookup_scale_pos_dropout( T *grad_embeddings, const T *grad_output, const int *input, const uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale); template <> __global__ void d_lookup_scale_pos_dropout<float>( float *grad_embeddings, const float *grad_output, const int *input, const uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale) { int batch_id = blockIdx.x; int seq_id = blockIdx.y * blockDim.x + threadIdx.x; if (seq_id >= seq_len) return; int target_pos = batch_id * seq_len + seq_id; int start = target_pos * embedding_dim + threadIdx.y; int end = (target_pos + 1) * embedding_dim; int tid = input[target_pos]; if (tid == padding_idx) { return; } const float scale = 1.f / (1.f - dropout_ratio); const float4 *grad_output4 = reinterpret_cast<const float4 *>(grad_output); const uint32_t *dropout_mask4 = reinterpret_cast<const uint32_t *>(dropout_mask); for (uint i = start; i < end; i += blockDim.y) { float4 go4 = grad_output4[i]; uint32_t m4 = dropout_mask4[i]; uint8_t *m4_ptr = reinterpret_cast<uint8_t *>(&m4); float4 res4; res4.x = emb_scale * go4.x * m4_ptr[0] * scale; res4.y = emb_scale * go4.y * m4_ptr[1] * scale; res4.z = emb_scale * go4.z * m4_ptr[2] * scale; res4.w = emb_scale * go4.w * m4_ptr[3] * scale; int offset = i - target_pos * embedding_dim; int idx = (tid * (embedding_dim) + offset) << 2; atomicAdd(grad_embeddings + idx, res4.x); atomicAdd(grad_embeddings + idx + 1, res4.y); atomicAdd(grad_embeddings + idx + 2, res4.z); atomicAdd(grad_embeddings + idx + 3, res4.w); } } template <> __global__ void d_lookup_scale_pos_dropout<__half>( __half *grad_embeddings, const __half *grad_output, const int *input, const uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale) { int batch_id = blockIdx.x; int seq_id = blockIdx.y * blockDim.x + threadIdx.x; if (seq_id >= seq_len) return; int target_pos = batch_id * seq_len + seq_id; int start = target_pos * embedding_dim + threadIdx.y; int end = (target_pos + 1) * embedding_dim; int tid = input[target_pos]; if (tid == padding_idx) { return; } const float scale = 1.f / (1.f - dropout_ratio); const float4 *grad_output4 = reinterpret_cast<const float4 *>(grad_output); const uint64_t *dropout_mask4 = reinterpret_cast<const uint64_t *>(dropout_mask); __half2 *grad_embeddings_h2 = reinterpret_cast<__half2 *>(grad_embeddings); for (uint i = start; i < end; i += blockDim.y) { float4 go4 = grad_output4[i]; uint64_t m4 = dropout_mask4[i]; uint8_t *m4_ptr = reinterpret_cast<uint8_t *>(&m4); float4 res4; __half2 *go_h2 = reinterpret_cast<__half2 *>(&go4); __half2 *res_h2 = reinterpret_cast<__half2 *>(&res4); __half2 scale_mask_h2[4]; #pragma unroll for (uint j = 0; j < 4; ++j) { scale_mask_h2[j] = __floats2half2_rn(scale * m4_ptr[j << 1], scale * m4_ptr[(j << 1) | 1]); } __half2 emb_scale_h2 = __floats2half2_rn(emb_scale, emb_scale); #pragma unroll for (uint j = 0; j < 4; ++j) { res_h2[j] = __hmul2(emb_scale_h2, go_h2[j]); res_h2[j] = __hmul2(scale_mask_h2[j], res_h2[j]); } int offset = i - target_pos * embedding_dim; int idx = (tid * (embedding_dim) + offset) << 2; #pragma unroll for (uint j = 0; j < 4; ++j) { atomicAdd(grad_embeddings_h2 + idx + j, res_h2[j]); } } } template <> void launch_d_lookup_scale_pos_dropout<float>( float *grad_embeddings, const float *grad_output, const int *input, const uint8_t *dropout_mask, int batch_size, int seq_len, int embedding_dim, int vocab_size, int padding_idx, float dropout_ratio, hipStream_t &stream) { float emb_scale = sqrt(embedding_dim); embedding_dim >>= 2; int total_nums = vocab_size * embedding_dim; dim3 zg_grid_dim((total_nums + MAX_THREADS - 1) / MAX_THREADS); dim3 zg_block_dim(MAX_THREADS); hipLaunchKernelGGL(( zero_grads<float>) , dim3(zg_grid_dim), dim3(zg_block_dim), 0, stream, grad_embeddings, total_nums); int tokens_per_thread = (MAX_THREADS + embedding_dim - 1) / embedding_dim; int threads_per_seq = (seq_len + tokens_per_thread - 1) / tokens_per_thread; dim3 grid_dim(batch_size, threads_per_seq); dim3 block_dim(tokens_per_thread, min(embedding_dim, MAX_THREADS)); hipLaunchKernelGGL(( d_lookup_scale_pos_dropout<float>), dim3(grid_dim), dim3(block_dim), 0, stream, grad_embeddings, grad_output, input, dropout_mask, seq_len, embedding_dim, padding_idx, dropout_ratio, emb_scale); } template <> void launch_d_lookup_scale_pos_dropout<__half>( __half *grad_embeddings, const __half *grad_output, const int *input, const uint8_t *dropout_mask, int batch_size, int seq_len, int embedding_dim, int vocab_size, int padding_idx, float dropout_ratio, hipStream_t &stream) { float emb_scale = sqrt(embedding_dim); embedding_dim >>= 3; int total_nums = vocab_size * embedding_dim; dim3 zg_grid_dim((total_nums + MAX_THREADS - 1) / MAX_THREADS); dim3 zg_block_dim(MAX_THREADS); hipLaunchKernelGGL(( zero_grads<__half>) , dim3(zg_grid_dim), dim3(zg_block_dim), 0, stream, grad_embeddings, total_nums); int tokens_per_thread = (MAX_THREADS + embedding_dim - 1) / embedding_dim; int threads_per_seq = (seq_len + tokens_per_thread - 1) / tokens_per_thread; dim3 grid_dim(batch_size, threads_per_seq); dim3 block_dim(tokens_per_thread, min(embedding_dim, MAX_THREADS)); hipLaunchKernelGGL(( d_lookup_scale_pos_dropout<__half>), dim3(grid_dim), dim3(block_dim), 0, stream, grad_embeddings, grad_output, input, dropout_mask, seq_len, embedding_dim, padding_idx, dropout_ratio, emb_scale); }
0ba5fe48b23f1f3d6bc1716120176e21cc9167aa.cu
#include <chrono> #include <ctime> #include "kernels.h" /** @brief: lookup_scale_pos_dropout forward of embedding layer in fairseq, including lookup table, scale, add position embedding and dropout. @thread gridDim.x = batch_size gridDim.y = threads_per_seq blockDim.x = tokens_per_thread blockDim.y = min(embedding_dim, MAX_THREADS) @param input: [batch_size, seq_len] output: [batch_size, seq_len, embedding_dim] embeddings: [vocab_size, embedding_dim] pos_embeddings: [max_seq_len, embedding_dim] dropout_mask: [batch_size, seq_len, embedding_dim] batch_size: the size of the current batch seq_len: the sequence length of the current batch embedding_dim: dim of the embeddings padding_idx: padding index of the sentences (default: 2) step: only used to calculate correct position in inference (default: 0 in training and valid) */ template <typename T> __global__ void lookup_scale_pos_dropout( T *output, const int *input, const T *embeddings, const T *pos_embeddings, uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale, int step, int seed); template <> __global__ void lookup_scale_pos_dropout<float>( float *output, const int *input, const float *embeddings, const float *pos_embeddings, uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale, int step, int seed) { int batch_id = blockIdx.x; int seq_id = blockIdx.y * blockDim.x + threadIdx.x; if (seq_id >= seq_len) return; int target_pos = batch_id * seq_len + seq_id; int start = target_pos * embedding_dim + threadIdx.y; int end = (target_pos + 1) * embedding_dim; int tid = input[target_pos]; float4 *output4 = reinterpret_cast<float4 *>(output); const float4 *embeddings4 = reinterpret_cast<const float4 *>(embeddings); const float4 *pos_embeddings4 = reinterpret_cast<const float4 *>(pos_embeddings); uint32_t *dropout_mask4 = reinterpret_cast<uint32_t *>(dropout_mask); // no need to calculate dropout_mask if (tid == padding_idx) { float4 zero4; zero4.x = zero4.y = zero4.z = zero4.w = 0.f; for (uint i = start; i < end; i += blockDim.y) { output4[i] = zero4; } return; } const float scale = 1.f / (1.f - dropout_ratio); curandStatePhilox4_32_10_t state; for (uint i = start; i < end; i += blockDim.y) { curand_init(seed, i, 0, &state); float4 rand4 = curand_uniform4(&state); uint8_t m[4]; m[0] = (uint8_t)(rand4.x > dropout_ratio); m[1] = (uint8_t)(rand4.y > dropout_ratio); m[2] = (uint8_t)(rand4.z > dropout_ratio); m[3] = (uint8_t)(rand4.w > dropout_ratio); uint32_t *m4 = reinterpret_cast<uint32_t *>(m); dropout_mask4[i] = m4[0]; int offset = i - target_pos * embedding_dim; float4 e4 = embeddings4[tid * embedding_dim + offset]; // step is non-zero only in inference float4 pe4 = pos_embeddings4[(seq_id + step) * embedding_dim + offset]; float4 res4; res4.x = (emb_scale * e4.x + pe4.x) * scale * m[0]; res4.y = (emb_scale * e4.y + pe4.y) * scale * m[1]; res4.z = (emb_scale * e4.z + pe4.z) * scale * m[2]; res4.w = (emb_scale * e4.w + pe4.w) * scale * m[3]; output4[i] = res4; } } template <> __global__ void lookup_scale_pos_dropout<__half>( __half *output, const int *input, const __half *embeddings, const __half *pos_embeddings, uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale, int step, int seed) { int batch_id = blockIdx.x; int seq_id = blockIdx.y * blockDim.x + threadIdx.x; if (seq_id >= seq_len) return; int target_pos = batch_id * seq_len + seq_id; int start = target_pos * embedding_dim + threadIdx.y; int end = (target_pos + 1) * embedding_dim; int tid = input[target_pos]; float4 *output4 = reinterpret_cast<float4 *>(output); const float4 *embeddings4 = reinterpret_cast<const float4 *>(embeddings); const float4 *pos_embeddings4 = reinterpret_cast<const float4 *>(pos_embeddings); uint64_t *dropout_mask8 = reinterpret_cast<uint64_t *>(dropout_mask); // no need to calculate dropout_mask if (tid == padding_idx) { float4 zero4; zero4.x = zero4.y = zero4.z = zero4.w = 0.f; for (uint i = start; i < end; i += blockDim.y) { output4[i] = zero4; } return; } const float scale = 1.f / (1.f - dropout_ratio); curandStatePhilox4_32_10_t state; for (uint i = start; i < end; i += blockDim.y) { curand_init(seed, i, 0, &state); float4 rand4 = curand_uniform4(&state); uint8_t m[8]; m[0] = (uint8_t)(rand4.x > dropout_ratio); m[1] = (uint8_t)(rand4.y > dropout_ratio); m[2] = (uint8_t)(rand4.z > dropout_ratio); m[3] = (uint8_t)(rand4.w > dropout_ratio); rand4 = curand_uniform4(&state); m[4] = (uint8_t)(rand4.x > dropout_ratio); m[5] = (uint8_t)(rand4.y > dropout_ratio); m[6] = (uint8_t)(rand4.z > dropout_ratio); m[7] = (uint8_t)(rand4.w > dropout_ratio); uint64_t *m8 = reinterpret_cast<uint64_t *>(m); dropout_mask8[i] = m8[0]; int offset = i - target_pos * embedding_dim; float4 e4 = embeddings4[tid * embedding_dim + offset]; // step is non-zero only in inference float4 pe4 = pos_embeddings4[(seq_id + step) * embedding_dim + offset]; float4 res4; __half2 *e_h2 = reinterpret_cast<__half2 *>(&e4); __half2 *pe_h2 = reinterpret_cast<__half2 *>(&pe4); __half2 *res_h2 = reinterpret_cast<__half2 *>(&res4); __half2 scale_mask_h2[4]; #pragma unroll for (uint j = 0; j < 4; ++j) { scale_mask_h2[j] = __floats2half2_rn(scale * m[j << 1], scale * m[(j << 1) | 1]); } __half2 emb_scale_h2 = __floats2half2_rn(emb_scale, emb_scale); #pragma unroll for (uint j = 0; j < 4; ++j) { res_h2[j] = __hmul2(e_h2[j], emb_scale_h2); res_h2[j] = __hadd2(res_h2[j], pe_h2[j]); res_h2[j] = __hmul2(res_h2[j], scale_mask_h2[j]); } output4[i] = res4; } } template <> void launch_lookup_scale_pos_dropout<float>( float *output, const int *input, const float *embeddings, const float *pos_embeddings, uint8_t *dropout_mask, int batch_size, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, int step, cudaStream_t &stream) { float emb_scale = sqrt(embedding_dim); embedding_dim >>= 2; int tokens_per_thread = (MAX_THREADS + embedding_dim - 1) / embedding_dim; int threads_per_seq = (seq_len + tokens_per_thread - 1) / tokens_per_thread; dim3 grid_dim(batch_size, threads_per_seq); dim3 block_dim(tokens_per_thread, min(embedding_dim, MAX_THREADS)); int seed = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(); lookup_scale_pos_dropout<float><<<grid_dim, block_dim, 0, stream>>>( output, input, embeddings, pos_embeddings, dropout_mask, seq_len, embedding_dim, padding_idx, dropout_ratio, emb_scale, step, seed); } template <> void launch_lookup_scale_pos_dropout<__half>( __half *output, const int *input, const __half *embeddings, const __half *pos_embeddings, uint8_t *dropout_mask, int batch_size, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, int step, cudaStream_t &stream) { float emb_scale = sqrt(embedding_dim); embedding_dim >>= 3; int tokens_per_thread = (MAX_THREADS + embedding_dim - 1) / embedding_dim; int threads_per_seq = (seq_len + tokens_per_thread - 1) / tokens_per_thread; dim3 grid_dim(batch_size, threads_per_seq); dim3 block_dim(tokens_per_thread, min(embedding_dim, MAX_THREADS)); int seed = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(); lookup_scale_pos_dropout<__half><<<grid_dim, block_dim, 0, stream>>>( output, input, embeddings, pos_embeddings, dropout_mask, seq_len, embedding_dim, padding_idx, dropout_ratio, emb_scale, step, seed); } /** @brief: d_lookup_scale_pos_dropout backward of embedding layer in fairseq. @thread gridDim.x = batch_size gridDim.y = threads_per_seq blockDim.x = tokens_per_thread blockDim.y = min(embedding_dim, MAX_THREADS) @param input: [batch_size, seq_len] grad_output: [batch_size, seq_len, embedding_dim] dropout_mask: [batch_size, seq_len, embedding_dim] batch_size: the size of the current batch seq_len: the sequence length of the current batch embedding_dim: dim of the embeddings padding_idx: padding index of the sentences (default: 2) */ template <typename T> __global__ void zero_grads(T *grad_embeddings, int total_nums) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= total_nums) return; float4 *grad_embeddings4 = reinterpret_cast<float4 *>(grad_embeddings); float4 zero4; zero4.x = zero4.y = zero4.z = zero4.w = 0.f; grad_embeddings4[idx] = zero4; } template <typename T> __global__ void d_lookup_scale_pos_dropout( T *grad_embeddings, const T *grad_output, const int *input, const uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale); template <> __global__ void d_lookup_scale_pos_dropout<float>( float *grad_embeddings, const float *grad_output, const int *input, const uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale) { int batch_id = blockIdx.x; int seq_id = blockIdx.y * blockDim.x + threadIdx.x; if (seq_id >= seq_len) return; int target_pos = batch_id * seq_len + seq_id; int start = target_pos * embedding_dim + threadIdx.y; int end = (target_pos + 1) * embedding_dim; int tid = input[target_pos]; if (tid == padding_idx) { return; } const float scale = 1.f / (1.f - dropout_ratio); const float4 *grad_output4 = reinterpret_cast<const float4 *>(grad_output); const uint32_t *dropout_mask4 = reinterpret_cast<const uint32_t *>(dropout_mask); for (uint i = start; i < end; i += blockDim.y) { float4 go4 = grad_output4[i]; uint32_t m4 = dropout_mask4[i]; uint8_t *m4_ptr = reinterpret_cast<uint8_t *>(&m4); float4 res4; res4.x = emb_scale * go4.x * m4_ptr[0] * scale; res4.y = emb_scale * go4.y * m4_ptr[1] * scale; res4.z = emb_scale * go4.z * m4_ptr[2] * scale; res4.w = emb_scale * go4.w * m4_ptr[3] * scale; int offset = i - target_pos * embedding_dim; int idx = (tid * (embedding_dim) + offset) << 2; atomicAdd(grad_embeddings + idx, res4.x); atomicAdd(grad_embeddings + idx + 1, res4.y); atomicAdd(grad_embeddings + idx + 2, res4.z); atomicAdd(grad_embeddings + idx + 3, res4.w); } } template <> __global__ void d_lookup_scale_pos_dropout<__half>( __half *grad_embeddings, const __half *grad_output, const int *input, const uint8_t *dropout_mask, int seq_len, int embedding_dim, int padding_idx, float dropout_ratio, float emb_scale) { int batch_id = blockIdx.x; int seq_id = blockIdx.y * blockDim.x + threadIdx.x; if (seq_id >= seq_len) return; int target_pos = batch_id * seq_len + seq_id; int start = target_pos * embedding_dim + threadIdx.y; int end = (target_pos + 1) * embedding_dim; int tid = input[target_pos]; if (tid == padding_idx) { return; } const float scale = 1.f / (1.f - dropout_ratio); const float4 *grad_output4 = reinterpret_cast<const float4 *>(grad_output); const uint64_t *dropout_mask4 = reinterpret_cast<const uint64_t *>(dropout_mask); __half2 *grad_embeddings_h2 = reinterpret_cast<__half2 *>(grad_embeddings); for (uint i = start; i < end; i += blockDim.y) { float4 go4 = grad_output4[i]; uint64_t m4 = dropout_mask4[i]; uint8_t *m4_ptr = reinterpret_cast<uint8_t *>(&m4); float4 res4; __half2 *go_h2 = reinterpret_cast<__half2 *>(&go4); __half2 *res_h2 = reinterpret_cast<__half2 *>(&res4); __half2 scale_mask_h2[4]; #pragma unroll for (uint j = 0; j < 4; ++j) { scale_mask_h2[j] = __floats2half2_rn(scale * m4_ptr[j << 1], scale * m4_ptr[(j << 1) | 1]); } __half2 emb_scale_h2 = __floats2half2_rn(emb_scale, emb_scale); #pragma unroll for (uint j = 0; j < 4; ++j) { res_h2[j] = __hmul2(emb_scale_h2, go_h2[j]); res_h2[j] = __hmul2(scale_mask_h2[j], res_h2[j]); } int offset = i - target_pos * embedding_dim; int idx = (tid * (embedding_dim) + offset) << 2; #pragma unroll for (uint j = 0; j < 4; ++j) { atomicAdd(grad_embeddings_h2 + idx + j, res_h2[j]); } } } template <> void launch_d_lookup_scale_pos_dropout<float>( float *grad_embeddings, const float *grad_output, const int *input, const uint8_t *dropout_mask, int batch_size, int seq_len, int embedding_dim, int vocab_size, int padding_idx, float dropout_ratio, cudaStream_t &stream) { float emb_scale = sqrt(embedding_dim); embedding_dim >>= 2; int total_nums = vocab_size * embedding_dim; dim3 zg_grid_dim((total_nums + MAX_THREADS - 1) / MAX_THREADS); dim3 zg_block_dim(MAX_THREADS); zero_grads<float> <<<zg_grid_dim, zg_block_dim, 0, stream>>>(grad_embeddings, total_nums); int tokens_per_thread = (MAX_THREADS + embedding_dim - 1) / embedding_dim; int threads_per_seq = (seq_len + tokens_per_thread - 1) / tokens_per_thread; dim3 grid_dim(batch_size, threads_per_seq); dim3 block_dim(tokens_per_thread, min(embedding_dim, MAX_THREADS)); d_lookup_scale_pos_dropout<float><<<grid_dim, block_dim, 0, stream>>>( grad_embeddings, grad_output, input, dropout_mask, seq_len, embedding_dim, padding_idx, dropout_ratio, emb_scale); } template <> void launch_d_lookup_scale_pos_dropout<__half>( __half *grad_embeddings, const __half *grad_output, const int *input, const uint8_t *dropout_mask, int batch_size, int seq_len, int embedding_dim, int vocab_size, int padding_idx, float dropout_ratio, cudaStream_t &stream) { float emb_scale = sqrt(embedding_dim); embedding_dim >>= 3; int total_nums = vocab_size * embedding_dim; dim3 zg_grid_dim((total_nums + MAX_THREADS - 1) / MAX_THREADS); dim3 zg_block_dim(MAX_THREADS); zero_grads<__half> <<<zg_grid_dim, zg_block_dim, 0, stream>>>(grad_embeddings, total_nums); int tokens_per_thread = (MAX_THREADS + embedding_dim - 1) / embedding_dim; int threads_per_seq = (seq_len + tokens_per_thread - 1) / tokens_per_thread; dim3 grid_dim(batch_size, threads_per_seq); dim3 block_dim(tokens_per_thread, min(embedding_dim, MAX_THREADS)); d_lookup_scale_pos_dropout<__half><<<grid_dim, block_dim, 0, stream>>>( grad_embeddings, grad_output, input, dropout_mask, seq_len, embedding_dim, padding_idx, dropout_ratio, emb_scale); }
2698a1746091132e5480074c057bf8f4cfecef08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file multibox_detection.cu * \brief MultiBoxDetection op * \author Joshua Zhang */ #include "./multibox_detection-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define MULTIBOX_DETECTION_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template<typename DType> __device__ void Clip(DType *value, const DType lower, const DType upper) { if ((*value) < lower) *value = lower; if ((*value) > upper) *value = upper; } template<typename DType> __device__ void CalculateOverlap(const DType *a, const DType *b, DType *iou) { DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0])); DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1])); DType i = w * h; DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i; (*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u); } template<typename DType> __global__ __launch_bounds__(cuda::kMaxThreadsPerBlock) void DetectionForwardKernel(DType *out, const DType *cls_prob, const DType *loc_pred, const DType *anchors, DType *temp_space, const int num_classes, const int num_anchors, const float threshold, const bool clip, const float vx, const float vy, const float vw, const float vh, const float nms_threshold, const bool force_suppress, const int nms_topk) { const int nbatch = blockIdx.x; // each block for each batch int index = threadIdx.x; __shared__ int valid_count; out += nbatch * num_anchors * 6; cls_prob += nbatch * num_anchors * num_classes; loc_pred += nbatch * num_anchors * 4; if (index == 0) { valid_count = 0; } __syncthreads(); // apply prediction to anchors for (int i = index; i < num_anchors; i += blockDim.x) { DType score = -1; int id = 0; for (int j = 1; j < num_classes; ++j) { DType temp = cls_prob[j * num_anchors + i]; if (temp > score) { score = temp; id = j; } } if (id > 0 && score < threshold) { id = 0; } if (id > 0) { // valid class int pos = atomicAdd(&valid_count, 1); out[pos * 6] = id - 1; // restore original class id out[pos * 6 + 1] = (id == 0 ? DType(-1) : score); int offset = i * 4; DType al = anchors[offset]; DType at = anchors[offset + 1]; DType ar = anchors[offset + 2]; DType ab = anchors[offset + 3]; DType aw = ar - al; DType ah = ab - at; DType ax = (al + ar) / 2.f; DType ay = (at + ab) / 2.f; DType ox = loc_pred[offset] * vx * aw + ax; DType oy = loc_pred[offset + 1] * vy * ah + ay; DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2; DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2; DType xmin = ox - ow; DType ymin = oy - oh; DType xmax = ox + ow; DType ymax = oy + oh; if (clip) { Clip(&xmin, DType(0), DType(1)); Clip(&ymin, DType(0), DType(1)); Clip(&xmax, DType(0), DType(1)); Clip(&ymax, DType(0), DType(1)); } out[pos * 6 + 2] = xmin; out[pos * 6 + 3] = ymin; out[pos * 6 + 4] = xmax; out[pos * 6 + 5] = ymax; } } __syncthreads(); if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return; // if (index == 0) printf("%d\n", valid_count); // descent sort according to scores const int size = valid_count; temp_space += nbatch * num_anchors * 6; DType *src = out; DType *dst = temp_space; for (int width = 2; width < (size << 1); width <<= 1) { int slices = (size - 1) / (blockDim.x * width) + 1; int start = width * index * slices; for (int slice = 0; slice < slices; ++slice) { if (start >= size) break; int middle = start + (width >> 1); if (middle > size) middle = size; int end = start + width; if (end > size) end = size; int i = start; int j = middle; for (int k = start; k < end; ++k) { DType score_i = i < size ? src[i * 6 + 1] : DType(-1); DType score_j = j < size ? src[j * 6 + 1] : DType(-1); if (i < middle && (j >= end || score_i > score_j)) { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[i * 6 + n]; } ++i; } else { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[j * 6 + n]; } ++j; } } start += width; } __syncthreads(); src = src == out? temp_space : out; dst = dst == out? temp_space : out; } __syncthreads(); if (src == temp_space) { // copy from temp to out for (int i = index; i < size * 6; i += blockDim.x) { out[i] = temp_space[i]; } __syncthreads(); } // keep top k detections int ntop = size; if (nms_topk > 0 && nms_topk < ntop) { ntop = nms_topk; for (int i = ntop + index; i < size; i += blockDim.x) { out[i * 6] = -1; } __syncthreads(); } // apply NMS for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) { DType compare_id = out[compare_pos * 6]; if (compare_id < 0) continue; // not a valid positive detection, skip DType *compare_loc_ptr = out + compare_pos * 6 + 2; for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) { DType class_id = out[i * 6]; if (class_id < 0) continue; if (force_suppress || (class_id == compare_id)) { DType iou; CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou); if (iou >= nms_threshold) { out[i * 6] = -1; } } } __syncthreads(); } } } // namespace cuda template<typename DType> inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType> &out, const Tensor<gpu, 3, DType> &cls_prob, const Tensor<gpu, 2, DType> &loc_pred, const Tensor<gpu, 2, DType> &anchors, const Tensor<gpu, 3, DType> &temp_space, const float threshold, const bool clip, const nnvm::Tuple<float> &variances, const float nms_threshold, const bool force_suppress, const int nms_topk) { CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4"; const int num_classes = cls_prob.size(1); const int num_anchors = cls_prob.size(2); const int num_batches = cls_prob.size(0); const int num_threads = cuda::kMaxThreadsPerBlock; int num_blocks = num_batches; cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward"); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipLaunchKernelGGL(( cuda::DetectionForwardKernel), dim3(num_blocks), dim3(num_threads), 0, stream, out.dptr_, cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_, num_classes, num_anchors, threshold, clip, variances[0], variances[1], variances[2], variances[3], nms_threshold, force_suppress, nms_topk); MULTIBOX_DETECTION_CUDA_CHECK(hipPeekAtLastError()); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new MultiBoxDetectionOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
2698a1746091132e5480074c057bf8f4cfecef08.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file multibox_detection.cu * \brief MultiBoxDetection op * \author Joshua Zhang */ #include "./multibox_detection-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define MULTIBOX_DETECTION_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template<typename DType> __device__ void Clip(DType *value, const DType lower, const DType upper) { if ((*value) < lower) *value = lower; if ((*value) > upper) *value = upper; } template<typename DType> __device__ void CalculateOverlap(const DType *a, const DType *b, DType *iou) { DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0])); DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1])); DType i = w * h; DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i; (*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u); } template<typename DType> __global__ __launch_bounds__(cuda::kMaxThreadsPerBlock) void DetectionForwardKernel(DType *out, const DType *cls_prob, const DType *loc_pred, const DType *anchors, DType *temp_space, const int num_classes, const int num_anchors, const float threshold, const bool clip, const float vx, const float vy, const float vw, const float vh, const float nms_threshold, const bool force_suppress, const int nms_topk) { const int nbatch = blockIdx.x; // each block for each batch int index = threadIdx.x; __shared__ int valid_count; out += nbatch * num_anchors * 6; cls_prob += nbatch * num_anchors * num_classes; loc_pred += nbatch * num_anchors * 4; if (index == 0) { valid_count = 0; } __syncthreads(); // apply prediction to anchors for (int i = index; i < num_anchors; i += blockDim.x) { DType score = -1; int id = 0; for (int j = 1; j < num_classes; ++j) { DType temp = cls_prob[j * num_anchors + i]; if (temp > score) { score = temp; id = j; } } if (id > 0 && score < threshold) { id = 0; } if (id > 0) { // valid class int pos = atomicAdd(&valid_count, 1); out[pos * 6] = id - 1; // restore original class id out[pos * 6 + 1] = (id == 0 ? DType(-1) : score); int offset = i * 4; DType al = anchors[offset]; DType at = anchors[offset + 1]; DType ar = anchors[offset + 2]; DType ab = anchors[offset + 3]; DType aw = ar - al; DType ah = ab - at; DType ax = (al + ar) / 2.f; DType ay = (at + ab) / 2.f; DType ox = loc_pred[offset] * vx * aw + ax; DType oy = loc_pred[offset + 1] * vy * ah + ay; DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2; DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2; DType xmin = ox - ow; DType ymin = oy - oh; DType xmax = ox + ow; DType ymax = oy + oh; if (clip) { Clip(&xmin, DType(0), DType(1)); Clip(&ymin, DType(0), DType(1)); Clip(&xmax, DType(0), DType(1)); Clip(&ymax, DType(0), DType(1)); } out[pos * 6 + 2] = xmin; out[pos * 6 + 3] = ymin; out[pos * 6 + 4] = xmax; out[pos * 6 + 5] = ymax; } } __syncthreads(); if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return; // if (index == 0) printf("%d\n", valid_count); // descent sort according to scores const int size = valid_count; temp_space += nbatch * num_anchors * 6; DType *src = out; DType *dst = temp_space; for (int width = 2; width < (size << 1); width <<= 1) { int slices = (size - 1) / (blockDim.x * width) + 1; int start = width * index * slices; for (int slice = 0; slice < slices; ++slice) { if (start >= size) break; int middle = start + (width >> 1); if (middle > size) middle = size; int end = start + width; if (end > size) end = size; int i = start; int j = middle; for (int k = start; k < end; ++k) { DType score_i = i < size ? src[i * 6 + 1] : DType(-1); DType score_j = j < size ? src[j * 6 + 1] : DType(-1); if (i < middle && (j >= end || score_i > score_j)) { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[i * 6 + n]; } ++i; } else { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[j * 6 + n]; } ++j; } } start += width; } __syncthreads(); src = src == out? temp_space : out; dst = dst == out? temp_space : out; } __syncthreads(); if (src == temp_space) { // copy from temp to out for (int i = index; i < size * 6; i += blockDim.x) { out[i] = temp_space[i]; } __syncthreads(); } // keep top k detections int ntop = size; if (nms_topk > 0 && nms_topk < ntop) { ntop = nms_topk; for (int i = ntop + index; i < size; i += blockDim.x) { out[i * 6] = -1; } __syncthreads(); } // apply NMS for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) { DType compare_id = out[compare_pos * 6]; if (compare_id < 0) continue; // not a valid positive detection, skip DType *compare_loc_ptr = out + compare_pos * 6 + 2; for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) { DType class_id = out[i * 6]; if (class_id < 0) continue; if (force_suppress || (class_id == compare_id)) { DType iou; CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou); if (iou >= nms_threshold) { out[i * 6] = -1; } } } __syncthreads(); } } } // namespace cuda template<typename DType> inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType> &out, const Tensor<gpu, 3, DType> &cls_prob, const Tensor<gpu, 2, DType> &loc_pred, const Tensor<gpu, 2, DType> &anchors, const Tensor<gpu, 3, DType> &temp_space, const float threshold, const bool clip, const nnvm::Tuple<float> &variances, const float nms_threshold, const bool force_suppress, const int nms_topk) { CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4"; const int num_classes = cls_prob.size(1); const int num_anchors = cls_prob.size(2); const int num_batches = cls_prob.size(0); const int num_threads = cuda::kMaxThreadsPerBlock; int num_blocks = num_batches; cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward"); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); cuda::DetectionForwardKernel<<<num_blocks, num_threads, 0, stream>>>(out.dptr_, cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_, num_classes, num_anchors, threshold, clip, variances[0], variances[1], variances[2], variances[3], nms_threshold, force_suppress, nms_topk); MULTIBOX_DETECTION_CUDA_CHECK(cudaPeekAtLastError()); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new MultiBoxDetectionOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
8ba0028593368d4e17af4b6005832b717fe70346.hip
// !!! This is a file automatically generated by hipify!!! #include "../../../header/enum.h" #include "../../../header/struct.h" #include "../../header/bridge.h" #include "../../header/cuda_common.h" #include "../../header/cuda_functions.h" #include "../../header/cuda_texture.h" #include "../../header/next_event_estimation_kernel.h" #include <assert.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <float.h> #include <stdio.h> __global__ void nee_global_memory_kernel( rtxFaceVertexIndex* global_serialized_face_vertex_indices_array, rtxVertex* global_serialized_vertex_array, rtxObject* global_serialized_object_array, rtxMaterialAttributeByte* global_serialized_material_attribute_byte_array, rtxThreadedBVH* global_serialized_threaded_bvh_array, rtxThreadedBVHNode* global_serialized_threaded_bvh_node_array, rtxRGBAColor* global_serialized_color_mapping_array, rtxUVCoordinate* global_serialized_uv_coordinate_array, hipTextureObject_t* global_serialized_mapping_texture_object_array, int* global_light_sampling_table, rtxRGBAPixel* global_serialized_render_array, rtxNEEKernelArguments args) { extern __shared__ char shared_memory[]; hiprandStatePhilox4_32_10_t curand_state; hiprand_init(args.curand_seed, blockIdx.x * blockDim.x + threadIdx.x, 0, &curand_state); __xorshift_init(args.curand_seed); // int offset = 0; rtxObject* shared_serialized_object_array = (rtxObject*)&shared_memory[offset]; offset += sizeof(rtxObject) * args.object_array_size; rtxMaterialAttributeByte* shared_serialized_material_attribute_byte_array = (rtxMaterialAttributeByte*)&shared_memory[offset]; offset += sizeof(rtxMaterialAttributeByte) * args.material_attribute_byte_array_size; rtxThreadedBVH* shared_serialized_threaded_bvh_array = (rtxThreadedBVH*)&shared_memory[offset]; offset += sizeof(rtxThreadedBVH) * args.threaded_bvh_array_size; rtxRGBAColor* shared_serialized_color_mapping_array = (rtxRGBAColor*)&shared_memory[offset]; offset += sizeof(rtxRGBAColor) * args.color_mapping_array_size; // cudaTextureObject_t8 offset += sizeof(hipTextureObject_t) - offset % sizeof(hipTextureObject_t); hipTextureObject_t* shared_serialized_texture_object_array = (hipTextureObject_t*)&shared_memory[offset]; offset += sizeof(hipTextureObject_t) * args.num_active_texture_units; int* shared_light_sampling_table = (int*)&shared_memory[offset]; offset += sizeof(int) * args.light_sampling_table_size; // 1 if (threadIdx.x == 0) { for (int m = 0; m < args.object_array_size; m++) { shared_serialized_object_array[m] = global_serialized_object_array[m]; } for (int m = 0; m < args.material_attribute_byte_array_size; m++) { shared_serialized_material_attribute_byte_array[m] = global_serialized_material_attribute_byte_array[m]; } for (int m = 0; m < args.threaded_bvh_array_size; m++) { shared_serialized_threaded_bvh_array[m] = global_serialized_threaded_bvh_array[m]; } for (int m = 0; m < args.color_mapping_array_size; m++) { shared_serialized_color_mapping_array[m] = global_serialized_color_mapping_array[m]; } for (int m = 0; m < args.num_active_texture_units; m++) { shared_serialized_texture_object_array[m] = global_serialized_mapping_texture_object_array[m]; } for (int m = 0; m < args.light_sampling_table_size; m++) { shared_light_sampling_table[m] = global_light_sampling_table[m]; } } __syncthreads(); int ray_index_offset = (blockIdx.x * blockDim.x + threadIdx.x) * args.num_rays_per_thread; int num_generated_rays_per_pixel = args.num_rays_per_thread * int(ceilf(float(args.num_rays_per_pixel) / float(args.num_rays_per_thread))); int target_pixel_index = ray_index_offset / num_generated_rays_per_pixel; if (target_pixel_index >= args.screen_width * args.screen_height) { return; } int target_pixel_x = target_pixel_index % args.screen_width; int target_pixel_y = target_pixel_index / args.screen_width; float aspect_ratio = float(args.screen_width) / float(args.screen_height); int render_buffer_index = ray_index_offset / args.num_rays_per_thread; // rtxRGBAPixel pixel = { 0.0f, 0.0f, 0.0f, 0.0f }; for (int n = 0; n < args.num_rays_per_thread; n++) { int ray_index = ray_index_offset + n; int ray_index_in_pixel = ray_index % num_generated_rays_per_pixel; if (ray_index_in_pixel >= args.num_rays_per_pixel) { return; } rtxCUDARay* ray; rtxCUDARay shadow_ray; rtxCUDARay primary_ray; float3 hit_point; float3 unit_hit_face_normal; rtxVertex hit_va; rtxVertex hit_vb; rtxVertex hit_vc; rtxFaceVertexIndex hit_face; rtxObject hit_object; rtxRGBAColor hit_object_color; float g_term; float shadow_ray_brdf; // __rtx_generate_ray(primary_ray, args, aspect_ratio); float3 ray_direction_inv; ray = &primary_ray; // rtxRGBAColor path_weight = { 1.0f, 1.0f, 1.0f }; rtxRGBAColor next_path_weight; // 2 int total_rays = args.max_bounce * 2; for (int iter = 0; iter < total_rays; iter++) { bool is_shadow_ray = (iter & 1) == 1; // iter % 2 float min_distance = FLT_MAX; bool did_hit_object = false; ray_direction_inv.x = 1.0f / ray->direction.x; ray_direction_inv.y = 1.0f / ray->direction.y; ray_direction_inv.z = 1.0f / ray->direction.z; // for (int object_index = 0; object_index < args.object_array_size; object_index++) { rtxObject object = shared_serialized_object_array[object_index]; // Threaded BVH rtxThreadedBVH bvh = shared_serialized_threaded_bvh_array[object_index]; // BVH int bvh_current_node_index = 0; for (int traversal = 0; traversal < bvh.num_nodes; traversal++) { if (bvh_current_node_index == THREADED_BVH_TERMINAL_NODE) { // break; } // BVH // rtxCUDAThreadedBVHNode48float416x3 // int4reinterpret_castint4 int serialized_node_index = bvh.serial_node_index_offset + bvh_current_node_index; rtxThreadedBVHNode node = global_serialized_threaded_bvh_node_array[serialized_node_index]; bool is_inner_node = node.assigned_face_index_start == -1; if (is_inner_node) { // AABB // // An Efficient and Robust RayBox Intersection Algorithm // http://www.cs.utah.edu/~awilliam/box/box.pdf __rtx_bvh_traversal_one_step_or_continue((*ray), node, ray_direction_inv, bvh_current_node_index); } else { // // // // Fast Minimum Storage Ray/Triangle Intersectio // https://cadxfem.org/inf/Fast%20MinimumStorage%20RayTriangle%20Intersection.pdf int num_assigned_faces = node.assigned_face_index_end - node.assigned_face_index_start + 1; if (object.geometry_type == RTXGeometryTypeStandard) { for (int m = 0; m < num_assigned_faces; m++) { const int serialized_face_index = node.assigned_face_index_start + m + object.serialized_face_index_offset; const rtxFaceVertexIndex face = global_serialized_face_vertex_indices_array[serialized_face_index]; const rtxVertex va = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex vb = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex vc = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; float3 face_normal; float distance; __rtx_intersect_triangle_or_continue((*ray), va, vb, vc, face_normal, distance, min_distance); min_distance = distance; hit_point.x = ray->origin.x + distance * ray->direction.x; hit_point.y = ray->origin.y + distance * ray->direction.y; hit_point.z = ray->origin.z + distance * ray->direction.z; unit_hit_face_normal.x = face_normal.x; unit_hit_face_normal.y = face_normal.y; unit_hit_face_normal.z = face_normal.z; hit_va = va; hit_vb = vb; hit_vc = vc; hit_face.a = face.a; hit_face.b = face.b; hit_face.c = face.c; did_hit_object = true; hit_object = object; } } else if (object.geometry_type == RTXGeometryTypeSphere) { int serialized_array_index = node.assigned_face_index_start + object.serialized_face_index_offset; const rtxFaceVertexIndex face = global_serialized_face_vertex_indices_array[serialized_array_index]; const rtxVertex center = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex radius = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; float distance; __rtx_intersect_sphere_or_continue((*ray), center, radius, distance, min_distance); min_distance = distance; hit_point.x = ray->origin.x + distance * ray->direction.x; hit_point.y = ray->origin.y + distance * ray->direction.y; hit_point.z = ray->origin.z + distance * ray->direction.z; const float3 normal = { hit_point.x - center.x, hit_point.y - center.y, hit_point.z - center.z, }; const float norm = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z); unit_hit_face_normal.x = normal.x / norm; unit_hit_face_normal.y = normal.y / norm; unit_hit_face_normal.z = normal.z / norm; did_hit_object = true; hit_object = object; } else if (object.geometry_type == RTXGeometryTypeCylinder) { rtxFaceVertexIndex face; int offset = node.assigned_face_index_start + object.serialized_face_index_offset; // Load cylinder parameters face = global_serialized_face_vertex_indices_array[offset + 0]; const rtxVertex params = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const float radius = params.x; const float y_max = params.y; const float y_min = params.z; // Load transformation matrix face = global_serialized_face_vertex_indices_array[offset + 1]; const rtxVertex trans_a = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex trans_b = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex trans_c = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; // Load inverse transformation matrix face = global_serialized_face_vertex_indices_array[offset + 2]; const rtxVertex inv_trans_a = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex inv_trans_b = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex inv_trans_c = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; float distance; __rtx_intersect_cylinder_or_continue( (*ray), trans_a, trans_b, trans_c, inv_trans_a, inv_trans_b, inv_trans_c, unit_hit_face_normal, distance, min_distance); min_distance = distance; // hit point in view space hit_point.x = ray->origin.x + distance * ray->direction.x; hit_point.y = ray->origin.y + distance * ray->direction.y; hit_point.z = ray->origin.z + distance * ray->direction.z; did_hit_object = true; hit_object = object; } else if (object.geometry_type == RTXGeometryTypeCone) { rtxFaceVertexIndex face; int offset = node.assigned_face_index_start + object.serialized_face_index_offset; // Load cylinder parameters face = global_serialized_face_vertex_indices_array[offset + 0]; const rtxVertex params = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const float radius = params.x; const float height = params.y; // Load transformation matrix face = global_serialized_face_vertex_indices_array[offset + 1]; const rtxVertex trans_a = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex trans_b = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex trans_c = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; // Load inverse transformation matrix face = global_serialized_face_vertex_indices_array[offset + 2]; const rtxVertex inv_trans_a = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex inv_trans_b = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex inv_trans_c = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; float distance; __rtx_intersect_cone_or_continue( (*ray), trans_a, trans_b, trans_c, inv_trans_a, inv_trans_b, inv_trans_c, unit_hit_face_normal, distance, min_distance); min_distance = distance; // hit point in view space hit_point.x = ray->origin.x + distance * ray->direction.x; hit_point.y = ray->origin.y + distance * ray->direction.y; hit_point.z = ray->origin.z + distance * ray->direction.z; did_hit_object = true; hit_object = object; } } if (node.hit_node_index == THREADED_BVH_TERMINAL_NODE) { bvh_current_node_index = node.miss_node_index; } else { bvh_current_node_index = node.hit_node_index; } } } if (did_hit_object == false) { if (iter == 0){ pixel.r += args.ambient_color.r; pixel.g += args.ambient_color.g; pixel.b += args.ambient_color.b; } break; } if (is_shadow_ray) { // rtxRGBAColor hit_light_color; int material_type = hit_object.layerd_material_types.outside; if (material_type == RTXMaterialTypeEmissive) { __rtx_fetch_color_in_linear_memory( hit_point, hit_object, hit_face, hit_light_color, shared_serialized_material_attribute_byte_array, shared_serialized_color_mapping_array, shared_serialized_texture_object_array, global_serialized_uv_coordinate_array); rtxEmissiveMaterialAttribute attr = ((rtxEmissiveMaterialAttribute*)&shared_serialized_material_attribute_byte_array[hit_object.material_attribute_byte_array_offset])[0]; float emission = attr.intensity; float inv_pdf = args.total_light_face_area; pixel.r += path_weight.r * emission * shadow_ray_brdf * hit_light_color.r * hit_object_color.r * inv_pdf * g_term; pixel.g += path_weight.g * emission * shadow_ray_brdf * hit_light_color.g * hit_object_color.g * inv_pdf * g_term; pixel.b += path_weight.b * emission * shadow_ray_brdf * hit_light_color.b * hit_object_color.b * inv_pdf * g_term; } path_weight.r = next_path_weight.r; path_weight.g = next_path_weight.g; path_weight.b = next_path_weight.b; ray = &primary_ray; } else { int material_type = hit_object.layerd_material_types.outside; bool did_hit_light = material_type == RTXMaterialTypeEmissive; __rtx_fetch_color_in_linear_memory( hit_point, hit_object, hit_face, hit_object_color, shared_serialized_material_attribute_byte_array, shared_serialized_color_mapping_array, shared_serialized_texture_object_array, global_serialized_uv_coordinate_array); // if (did_hit_light) { if (iter > 0) { break; } // rtxEmissiveMaterialAttribute attr = ((rtxEmissiveMaterialAttribute*)&shared_serialized_material_attribute_byte_array[hit_object.material_attribute_byte_array_offset])[0]; if (attr.visible) { pixel.r += hit_object_color.r * path_weight.r * attr.intensity; pixel.g += hit_object_color.g * path_weight.g * attr.intensity; pixel.b += hit_object_color.b * path_weight.b * attr.intensity; } else { pixel.r += args.ambient_color.r; pixel.g += args.ambient_color.g; pixel.b += args.ambient_color.b; } break; } else { if (iter == 0) { pixel.r += hit_object_color.r * args.ambient_light_intensity; pixel.g += hit_object_color.g * args.ambient_light_intensity; pixel.b += hit_object_color.b * args.ambient_light_intensity; } } // float3 unit_next_path_direction; float cosine_term; __rtx_sample_ray_direction( unit_hit_face_normal, unit_next_path_direction, cosine_term, curand_state); float input_ray_brdf = 0.0f; __rtx_compute_brdf( unit_hit_face_normal, hit_object, hit_face, ray->direction, unit_next_path_direction, shared_serialized_material_attribute_byte_array, input_ray_brdf); // ray->origin.x = hit_point.x; ray->origin.y = hit_point.y; ray->origin.z = hit_point.z; ray->direction.x = unit_next_path_direction.x; ray->direction.y = unit_next_path_direction.y; ray->direction.z = unit_next_path_direction.z; float inv_pdf = 2.0f * M_PI; next_path_weight.r = path_weight.r * input_ray_brdf * hit_object_color.r * cosine_term * inv_pdf; next_path_weight.g = path_weight.g * input_ray_brdf * hit_object_color.g * cosine_term * inv_pdf; next_path_weight.b = path_weight.b * input_ray_brdf * hit_object_color.b * cosine_term * inv_pdf; float4 random_uniform4 = hiprand_uniform4(&curand_state); // const int table_index = min(int(floorf(random_uniform4.x * float(args.light_sampling_table_size))), args.light_sampling_table_size - 1); const int object_index = shared_light_sampling_table[table_index]; rtxObject object = shared_serialized_object_array[object_index]; float light_distance; float3 unit_light_normal; if (object.geometry_type == RTXGeometryTypeStandard) { const int face_index = min(int(floorf(random_uniform4.y * float(object.num_faces))), object.num_faces - 1); const int serialized_face_index = face_index + object.serialized_face_index_offset; const rtxFaceVertexIndex face = global_serialized_face_vertex_indices_array[serialized_face_index]; const rtxVertex va = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex vb = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex vc = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; __rtx_nee_sample_point_in_triangle(random_uniform4, va, vb, vc, shadow_ray, light_distance, unit_light_normal); } else if (object.geometry_type == RTXGeometryTypeSphere) { const int serialized_array_index = object.serialized_face_index_offset; const rtxFaceVertexIndex face = global_serialized_face_vertex_indices_array[serialized_array_index]; const rtxVertex center = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex radius = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; __rtx_nee_sample_point_in_sphere(curand_state, unit_light_normal, shadow_ray, light_distance); } const float dot_ray_face = shadow_ray.direction.x * unit_hit_face_normal.x + shadow_ray.direction.y * unit_hit_face_normal.y + shadow_ray.direction.z * unit_hit_face_normal.z; if (dot_ray_face <= 0.0f) { ray = &primary_ray; iter += 1; path_weight.r = next_path_weight.r; path_weight.g = next_path_weight.g; path_weight.b = next_path_weight.b; continue; } shadow_ray.origin.x = hit_point.x; shadow_ray.origin.y = hit_point.y; shadow_ray.origin.z = hit_point.z; shadow_ray_brdf = 0.0f; __rtx_compute_brdf( unit_hit_face_normal, hit_object, hit_face, primary_ray.direction, shadow_ray.direction, shared_serialized_material_attribute_byte_array, shadow_ray_brdf); // primary_ray.origin.x = hit_point.x; primary_ray.origin.y = hit_point.y; primary_ray.origin.z = hit_point.z; primary_ray.direction.x = unit_next_path_direction.x; primary_ray.direction.y = unit_next_path_direction.y; primary_ray.direction.z = unit_next_path_direction.z; const float dot_ray_light = fabsf(shadow_ray.direction.x * unit_light_normal.x + shadow_ray.direction.y * unit_light_normal.y + shadow_ray.direction.z * unit_light_normal.z); // const float r = max(light_distance, 0.5f); g_term = dot_ray_face * dot_ray_light / (r * r); ray = &shadow_ray; } } } global_serialized_render_array[render_buffer_index] = pixel; } void rtx_cuda_launch_nee_global_memory_kernel( rtxFaceVertexIndex* gpu_serialized_face_vertex_index_array, rtxVertex* gpu_serialized_vertex_array, rtxObject* gpu_serialized_object_array, rtxMaterialAttributeByte* gpu_serialized_material_attribute_byte_array, rtxThreadedBVH* gpu_serialized_threaded_bvh_array, rtxThreadedBVHNode* gpu_serialized_threaded_bvh_node_array, rtxRGBAColor* gpu_serialized_color_mapping_array, rtxUVCoordinate* gpu_serialized_uv_coordinate_array, int* gpu_light_sampling_table, rtxRGBAPixel* gpu_serialized_render_array, rtxNEEKernelArguments& args, int num_threads, int num_blocks, size_t shared_memory_bytes) { __check_kernel_arguments(); hipLaunchKernelGGL(( nee_global_memory_kernel), dim3(num_blocks), dim3(num_threads), shared_memory_bytes, 0, gpu_serialized_face_vertex_index_array, gpu_serialized_vertex_array, gpu_serialized_object_array, gpu_serialized_material_attribute_byte_array, gpu_serialized_threaded_bvh_array, gpu_serialized_threaded_bvh_node_array, gpu_serialized_color_mapping_array, gpu_serialized_uv_coordinate_array, g_gpu_serialized_mapping_texture_object_array, gpu_light_sampling_table, gpu_serialized_render_array, args); cudaCheckError(hipDeviceSynchronize()); }
8ba0028593368d4e17af4b6005832b717fe70346.cu
#include "../../../header/enum.h" #include "../../../header/struct.h" #include "../../header/bridge.h" #include "../../header/cuda_common.h" #include "../../header/cuda_functions.h" #include "../../header/cuda_texture.h" #include "../../header/next_event_estimation_kernel.h" #include <assert.h> #include <cuda_runtime.h> #include <curand_kernel.h> #include <float.h> #include <stdio.h> __global__ void nee_global_memory_kernel( rtxFaceVertexIndex* global_serialized_face_vertex_indices_array, rtxVertex* global_serialized_vertex_array, rtxObject* global_serialized_object_array, rtxMaterialAttributeByte* global_serialized_material_attribute_byte_array, rtxThreadedBVH* global_serialized_threaded_bvh_array, rtxThreadedBVHNode* global_serialized_threaded_bvh_node_array, rtxRGBAColor* global_serialized_color_mapping_array, rtxUVCoordinate* global_serialized_uv_coordinate_array, cudaTextureObject_t* global_serialized_mapping_texture_object_array, int* global_light_sampling_table, rtxRGBAPixel* global_serialized_render_array, rtxNEEKernelArguments args) { extern __shared__ char shared_memory[]; curandStatePhilox4_32_10_t curand_state; curand_init(args.curand_seed, blockIdx.x * blockDim.x + threadIdx.x, 0, &curand_state); __xorshift_init(args.curand_seed); // グローバルメモリの直列データを共有メモリにコピーする int offset = 0; rtxObject* shared_serialized_object_array = (rtxObject*)&shared_memory[offset]; offset += sizeof(rtxObject) * args.object_array_size; rtxMaterialAttributeByte* shared_serialized_material_attribute_byte_array = (rtxMaterialAttributeByte*)&shared_memory[offset]; offset += sizeof(rtxMaterialAttributeByte) * args.material_attribute_byte_array_size; rtxThreadedBVH* shared_serialized_threaded_bvh_array = (rtxThreadedBVH*)&shared_memory[offset]; offset += sizeof(rtxThreadedBVH) * args.threaded_bvh_array_size; rtxRGBAColor* shared_serialized_color_mapping_array = (rtxRGBAColor*)&shared_memory[offset]; offset += sizeof(rtxRGBAColor) * args.color_mapping_array_size; // cudaTextureObject_tは8バイトなのでアライメントに気をつける offset += sizeof(cudaTextureObject_t) - offset % sizeof(cudaTextureObject_t); cudaTextureObject_t* shared_serialized_texture_object_array = (cudaTextureObject_t*)&shared_memory[offset]; offset += sizeof(cudaTextureObject_t) * args.num_active_texture_units; int* shared_light_sampling_table = (int*)&shared_memory[offset]; offset += sizeof(int) * args.light_sampling_table_size; // ブロック内のどれか1スレッドが代表して共有メモリに内容をコピー if (threadIdx.x == 0) { for (int m = 0; m < args.object_array_size; m++) { shared_serialized_object_array[m] = global_serialized_object_array[m]; } for (int m = 0; m < args.material_attribute_byte_array_size; m++) { shared_serialized_material_attribute_byte_array[m] = global_serialized_material_attribute_byte_array[m]; } for (int m = 0; m < args.threaded_bvh_array_size; m++) { shared_serialized_threaded_bvh_array[m] = global_serialized_threaded_bvh_array[m]; } for (int m = 0; m < args.color_mapping_array_size; m++) { shared_serialized_color_mapping_array[m] = global_serialized_color_mapping_array[m]; } for (int m = 0; m < args.num_active_texture_units; m++) { shared_serialized_texture_object_array[m] = global_serialized_mapping_texture_object_array[m]; } for (int m = 0; m < args.light_sampling_table_size; m++) { shared_light_sampling_table[m] = global_light_sampling_table[m]; } } __syncthreads(); int ray_index_offset = (blockIdx.x * blockDim.x + threadIdx.x) * args.num_rays_per_thread; int num_generated_rays_per_pixel = args.num_rays_per_thread * int(ceilf(float(args.num_rays_per_pixel) / float(args.num_rays_per_thread))); int target_pixel_index = ray_index_offset / num_generated_rays_per_pixel; if (target_pixel_index >= args.screen_width * args.screen_height) { return; } int target_pixel_x = target_pixel_index % args.screen_width; int target_pixel_y = target_pixel_index / args.screen_width; float aspect_ratio = float(args.screen_width) / float(args.screen_height); int render_buffer_index = ray_index_offset / args.num_rays_per_thread; // 出力する画素 rtxRGBAPixel pixel = { 0.0f, 0.0f, 0.0f, 0.0f }; for (int n = 0; n < args.num_rays_per_thread; n++) { int ray_index = ray_index_offset + n; int ray_index_in_pixel = ray_index % num_generated_rays_per_pixel; if (ray_index_in_pixel >= args.num_rays_per_pixel) { return; } rtxCUDARay* ray; rtxCUDARay shadow_ray; rtxCUDARay primary_ray; float3 hit_point; float3 unit_hit_face_normal; rtxVertex hit_va; rtxVertex hit_vb; rtxVertex hit_vc; rtxFaceVertexIndex hit_face; rtxObject hit_object; rtxRGBAColor hit_object_color; float g_term; float shadow_ray_brdf; // レイの生成 __rtx_generate_ray(primary_ray, args, aspect_ratio); float3 ray_direction_inv; ray = &primary_ray; // 光輸送経路のウェイト rtxRGBAColor path_weight = { 1.0f, 1.0f, 1.0f }; rtxRGBAColor next_path_weight; // レイが当たるたびにシャドウレイを飛ばすので2倍ループが必要 int total_rays = args.max_bounce * 2; for (int iter = 0; iter < total_rays; iter++) { bool is_shadow_ray = (iter & 1) == 1; // iter % 2 float min_distance = FLT_MAX; bool did_hit_object = false; ray_direction_inv.x = 1.0f / ray->direction.x; ray_direction_inv.y = 1.0f / ray->direction.y; ray_direction_inv.z = 1.0f / ray->direction.z; // シーン上の全オブジェクトについて for (int object_index = 0; object_index < args.object_array_size; object_index++) { rtxObject object = shared_serialized_object_array[object_index]; // 各ジオメトリのThreaded BVH rtxThreadedBVH bvh = shared_serialized_threaded_bvh_array[object_index]; // BVHの各ノードを遷移していく int bvh_current_node_index = 0; for (int traversal = 0; traversal < bvh.num_nodes; traversal++) { if (bvh_current_node_index == THREADED_BVH_TERMINAL_NODE) { // 終端ノードならこのオブジェクトにはヒットしていない break; } // BVHノードの読み込み // rtxCUDAThreadedBVHNodeは48バイトなのでfloat4(16バイト)x3に分割 // さらにint型の要素が4つあるためreinterpret_castでint4として解釈する int serialized_node_index = bvh.serial_node_index_offset + bvh_current_node_index; rtxThreadedBVHNode node = global_serialized_threaded_bvh_node_array[serialized_node_index]; bool is_inner_node = node.assigned_face_index_start == -1; if (is_inner_node) { // 中間ノードの場合AABBとの衝突判定を行う // 詳細は以下参照 // An Efficient and Robust Ray–Box Intersection Algorithm // http://www.cs.utah.edu/~awilliam/box/box.pdf __rtx_bvh_traversal_one_step_or_continue((*ray), node, ray_direction_inv, bvh_current_node_index); } else { // 葉ノード // 割り当てられたジオメトリの各面との衝突判定を行う // アルゴリズムの詳細は以下 // Fast Minimum Storage Ray/Triangle Intersectio // https://cadxfem.org/inf/Fast%20MinimumStorage%20RayTriangle%20Intersection.pdf int num_assigned_faces = node.assigned_face_index_end - node.assigned_face_index_start + 1; if (object.geometry_type == RTXGeometryTypeStandard) { for (int m = 0; m < num_assigned_faces; m++) { const int serialized_face_index = node.assigned_face_index_start + m + object.serialized_face_index_offset; const rtxFaceVertexIndex face = global_serialized_face_vertex_indices_array[serialized_face_index]; const rtxVertex va = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex vb = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex vc = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; float3 face_normal; float distance; __rtx_intersect_triangle_or_continue((*ray), va, vb, vc, face_normal, distance, min_distance); min_distance = distance; hit_point.x = ray->origin.x + distance * ray->direction.x; hit_point.y = ray->origin.y + distance * ray->direction.y; hit_point.z = ray->origin.z + distance * ray->direction.z; unit_hit_face_normal.x = face_normal.x; unit_hit_face_normal.y = face_normal.y; unit_hit_face_normal.z = face_normal.z; hit_va = va; hit_vb = vb; hit_vc = vc; hit_face.a = face.a; hit_face.b = face.b; hit_face.c = face.c; did_hit_object = true; hit_object = object; } } else if (object.geometry_type == RTXGeometryTypeSphere) { int serialized_array_index = node.assigned_face_index_start + object.serialized_face_index_offset; const rtxFaceVertexIndex face = global_serialized_face_vertex_indices_array[serialized_array_index]; const rtxVertex center = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex radius = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; float distance; __rtx_intersect_sphere_or_continue((*ray), center, radius, distance, min_distance); min_distance = distance; hit_point.x = ray->origin.x + distance * ray->direction.x; hit_point.y = ray->origin.y + distance * ray->direction.y; hit_point.z = ray->origin.z + distance * ray->direction.z; const float3 normal = { hit_point.x - center.x, hit_point.y - center.y, hit_point.z - center.z, }; const float norm = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z); unit_hit_face_normal.x = normal.x / norm; unit_hit_face_normal.y = normal.y / norm; unit_hit_face_normal.z = normal.z / norm; did_hit_object = true; hit_object = object; } else if (object.geometry_type == RTXGeometryTypeCylinder) { rtxFaceVertexIndex face; int offset = node.assigned_face_index_start + object.serialized_face_index_offset; // Load cylinder parameters face = global_serialized_face_vertex_indices_array[offset + 0]; const rtxVertex params = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const float radius = params.x; const float y_max = params.y; const float y_min = params.z; // Load transformation matrix face = global_serialized_face_vertex_indices_array[offset + 1]; const rtxVertex trans_a = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex trans_b = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex trans_c = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; // Load inverse transformation matrix face = global_serialized_face_vertex_indices_array[offset + 2]; const rtxVertex inv_trans_a = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex inv_trans_b = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex inv_trans_c = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; float distance; __rtx_intersect_cylinder_or_continue( (*ray), trans_a, trans_b, trans_c, inv_trans_a, inv_trans_b, inv_trans_c, unit_hit_face_normal, distance, min_distance); min_distance = distance; // hit point in view space hit_point.x = ray->origin.x + distance * ray->direction.x; hit_point.y = ray->origin.y + distance * ray->direction.y; hit_point.z = ray->origin.z + distance * ray->direction.z; did_hit_object = true; hit_object = object; } else if (object.geometry_type == RTXGeometryTypeCone) { rtxFaceVertexIndex face; int offset = node.assigned_face_index_start + object.serialized_face_index_offset; // Load cylinder parameters face = global_serialized_face_vertex_indices_array[offset + 0]; const rtxVertex params = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const float radius = params.x; const float height = params.y; // Load transformation matrix face = global_serialized_face_vertex_indices_array[offset + 1]; const rtxVertex trans_a = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex trans_b = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex trans_c = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; // Load inverse transformation matrix face = global_serialized_face_vertex_indices_array[offset + 2]; const rtxVertex inv_trans_a = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex inv_trans_b = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex inv_trans_c = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; float distance; __rtx_intersect_cone_or_continue( (*ray), trans_a, trans_b, trans_c, inv_trans_a, inv_trans_b, inv_trans_c, unit_hit_face_normal, distance, min_distance); min_distance = distance; // hit point in view space hit_point.x = ray->origin.x + distance * ray->direction.x; hit_point.y = ray->origin.y + distance * ray->direction.y; hit_point.z = ray->origin.z + distance * ray->direction.z; did_hit_object = true; hit_object = object; } } if (node.hit_node_index == THREADED_BVH_TERMINAL_NODE) { bvh_current_node_index = node.miss_node_index; } else { bvh_current_node_index = node.hit_node_index; } } } if (did_hit_object == false) { if (iter == 0){ pixel.r += args.ambient_color.r; pixel.g += args.ambient_color.g; pixel.b += args.ambient_color.b; } break; } if (is_shadow_ray) { // 光源に当たった場合寄与を加算 rtxRGBAColor hit_light_color; int material_type = hit_object.layerd_material_types.outside; if (material_type == RTXMaterialTypeEmissive) { __rtx_fetch_color_in_linear_memory( hit_point, hit_object, hit_face, hit_light_color, shared_serialized_material_attribute_byte_array, shared_serialized_color_mapping_array, shared_serialized_texture_object_array, global_serialized_uv_coordinate_array); rtxEmissiveMaterialAttribute attr = ((rtxEmissiveMaterialAttribute*)&shared_serialized_material_attribute_byte_array[hit_object.material_attribute_byte_array_offset])[0]; float emission = attr.intensity; float inv_pdf = args.total_light_face_area; pixel.r += path_weight.r * emission * shadow_ray_brdf * hit_light_color.r * hit_object_color.r * inv_pdf * g_term; pixel.g += path_weight.g * emission * shadow_ray_brdf * hit_light_color.g * hit_object_color.g * inv_pdf * g_term; pixel.b += path_weight.b * emission * shadow_ray_brdf * hit_light_color.b * hit_object_color.b * inv_pdf * g_term; } path_weight.r = next_path_weight.r; path_weight.g = next_path_weight.g; path_weight.b = next_path_weight.b; ray = &primary_ray; } else { int material_type = hit_object.layerd_material_types.outside; bool did_hit_light = material_type == RTXMaterialTypeEmissive; __rtx_fetch_color_in_linear_memory( hit_point, hit_object, hit_face, hit_object_color, shared_serialized_material_attribute_byte_array, shared_serialized_color_mapping_array, shared_serialized_texture_object_array, global_serialized_uv_coordinate_array); // 光源に当たった場合トレースを打ち切り if (did_hit_light) { if (iter > 0) { break; } // 最初のパスで光源に当たった場合のみ寄与を加算 rtxEmissiveMaterialAttribute attr = ((rtxEmissiveMaterialAttribute*)&shared_serialized_material_attribute_byte_array[hit_object.material_attribute_byte_array_offset])[0]; if (attr.visible) { pixel.r += hit_object_color.r * path_weight.r * attr.intensity; pixel.g += hit_object_color.g * path_weight.g * attr.intensity; pixel.b += hit_object_color.b * path_weight.b * attr.intensity; } else { pixel.r += args.ambient_color.r; pixel.g += args.ambient_color.g; pixel.b += args.ambient_color.b; } break; } else { if (iter == 0) { pixel.r += hit_object_color.r * args.ambient_light_intensity; pixel.g += hit_object_color.g * args.ambient_light_intensity; pixel.b += hit_object_color.b * args.ambient_light_intensity; } } // 反射方向のサンプリング float3 unit_next_path_direction; float cosine_term; __rtx_sample_ray_direction( unit_hit_face_normal, unit_next_path_direction, cosine_term, curand_state); float input_ray_brdf = 0.0f; __rtx_compute_brdf( unit_hit_face_normal, hit_object, hit_face, ray->direction, unit_next_path_direction, shared_serialized_material_attribute_byte_array, input_ray_brdf); // 入射方向のサンプリング ray->origin.x = hit_point.x; ray->origin.y = hit_point.y; ray->origin.z = hit_point.z; ray->direction.x = unit_next_path_direction.x; ray->direction.y = unit_next_path_direction.y; ray->direction.z = unit_next_path_direction.z; float inv_pdf = 2.0f * M_PI; next_path_weight.r = path_weight.r * input_ray_brdf * hit_object_color.r * cosine_term * inv_pdf; next_path_weight.g = path_weight.g * input_ray_brdf * hit_object_color.g * cosine_term * inv_pdf; next_path_weight.b = path_weight.b * input_ray_brdf * hit_object_color.b * cosine_term * inv_pdf; float4 random_uniform4 = curand_uniform4(&curand_state); // 光源のサンプリング const int table_index = min(int(floorf(random_uniform4.x * float(args.light_sampling_table_size))), args.light_sampling_table_size - 1); const int object_index = shared_light_sampling_table[table_index]; rtxObject object = shared_serialized_object_array[object_index]; float light_distance; float3 unit_light_normal; if (object.geometry_type == RTXGeometryTypeStandard) { const int face_index = min(int(floorf(random_uniform4.y * float(object.num_faces))), object.num_faces - 1); const int serialized_face_index = face_index + object.serialized_face_index_offset; const rtxFaceVertexIndex face = global_serialized_face_vertex_indices_array[serialized_face_index]; const rtxVertex va = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex vb = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; const rtxVertex vc = global_serialized_vertex_array[face.c + object.serialized_vertex_index_offset]; __rtx_nee_sample_point_in_triangle(random_uniform4, va, vb, vc, shadow_ray, light_distance, unit_light_normal); } else if (object.geometry_type == RTXGeometryTypeSphere) { const int serialized_array_index = object.serialized_face_index_offset; const rtxFaceVertexIndex face = global_serialized_face_vertex_indices_array[serialized_array_index]; const rtxVertex center = global_serialized_vertex_array[face.a + object.serialized_vertex_index_offset]; const rtxVertex radius = global_serialized_vertex_array[face.b + object.serialized_vertex_index_offset]; __rtx_nee_sample_point_in_sphere(curand_state, unit_light_normal, shadow_ray, light_distance); } const float dot_ray_face = shadow_ray.direction.x * unit_hit_face_normal.x + shadow_ray.direction.y * unit_hit_face_normal.y + shadow_ray.direction.z * unit_hit_face_normal.z; if (dot_ray_face <= 0.0f) { ray = &primary_ray; iter += 1; path_weight.r = next_path_weight.r; path_weight.g = next_path_weight.g; path_weight.b = next_path_weight.b; continue; } shadow_ray.origin.x = hit_point.x; shadow_ray.origin.y = hit_point.y; shadow_ray.origin.z = hit_point.z; shadow_ray_brdf = 0.0f; __rtx_compute_brdf( unit_hit_face_normal, hit_object, hit_face, primary_ray.direction, shadow_ray.direction, shared_serialized_material_attribute_byte_array, shadow_ray_brdf); // 次のパス primary_ray.origin.x = hit_point.x; primary_ray.origin.y = hit_point.y; primary_ray.origin.z = hit_point.z; primary_ray.direction.x = unit_next_path_direction.x; primary_ray.direction.y = unit_next_path_direction.y; primary_ray.direction.z = unit_next_path_direction.z; const float dot_ray_light = fabsf(shadow_ray.direction.x * unit_light_normal.x + shadow_ray.direction.y * unit_light_normal.y + shadow_ray.direction.z * unit_light_normal.z); // ハック const float r = max(light_distance, 0.5f); g_term = dot_ray_face * dot_ray_light / (r * r); ray = &shadow_ray; } } } global_serialized_render_array[render_buffer_index] = pixel; } void rtx_cuda_launch_nee_global_memory_kernel( rtxFaceVertexIndex* gpu_serialized_face_vertex_index_array, rtxVertex* gpu_serialized_vertex_array, rtxObject* gpu_serialized_object_array, rtxMaterialAttributeByte* gpu_serialized_material_attribute_byte_array, rtxThreadedBVH* gpu_serialized_threaded_bvh_array, rtxThreadedBVHNode* gpu_serialized_threaded_bvh_node_array, rtxRGBAColor* gpu_serialized_color_mapping_array, rtxUVCoordinate* gpu_serialized_uv_coordinate_array, int* gpu_light_sampling_table, rtxRGBAPixel* gpu_serialized_render_array, rtxNEEKernelArguments& args, int num_threads, int num_blocks, size_t shared_memory_bytes) { __check_kernel_arguments(); nee_global_memory_kernel<<<num_blocks, num_threads, shared_memory_bytes>>>( gpu_serialized_face_vertex_index_array, gpu_serialized_vertex_array, gpu_serialized_object_array, gpu_serialized_material_attribute_byte_array, gpu_serialized_threaded_bvh_array, gpu_serialized_threaded_bvh_node_array, gpu_serialized_color_mapping_array, gpu_serialized_uv_coordinate_array, g_gpu_serialized_mapping_texture_object_array, gpu_light_sampling_table, gpu_serialized_render_array, args); cudaCheckError(cudaThreadSynchronize()); }
5335598b9e622d76157b2a051a55edd4c716c17c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuNDArray_utils.h" #include "vector_td_utilities.h" #include "cudaDeviceManager.h" #include "setup_grid.h" #include <math_functions.h> #include <cmath> namespace Gadgetron { template <class T> __global__ void cuNDArray_permute_kernel(const T* __restrict__ in, T* __restrict__ out, unsigned int ndim, const unsigned int* __restrict__ dims, const unsigned int* __restrict__ strides_out, unsigned int elements, int shift_mode) { unsigned int idx_in = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; unsigned int idx_out = 0; unsigned int idx_in_tmp = idx_in; if (idx_in < elements) { unsigned int cur_index; for (unsigned int i = 0; i < ndim; i++) { unsigned int idx_in_remainder = idx_in_tmp / dims[i]; cur_index = idx_in_tmp-(idx_in_remainder*dims[i]); //cur_index = idx_in_tmp%dims[i]; if (shift_mode < 0) { //IFFTSHIFT idx_out += ((cur_index+(dims[i]>>1))%dims[i])*strides_out[i]; } else if (shift_mode > 0) { //FFTSHIFT idx_out += ((cur_index+((dims[i]+1)>>1))%dims[i])*strides_out[i]; } else { idx_out += cur_index*strides_out[i]; } idx_in_tmp = idx_in_remainder; } out[idx_in] = in[idx_out]; } } template <class T> void cuNDArray_permute( cuNDArray<T>* in, cuNDArray<T>* out, std::vector<size_t> *order, int shift_mode) { if( out == 0x0 ){ throw cuda_error("cuNDArray_permute(internal): 0x0 output");; } hipError_t err; T* in_ptr = in->get_data_ptr(); T* out_ptr = 0; if (out) { out_ptr = out->get_data_ptr(); } else { if (hipMalloc((void**) &out_ptr, in->get_number_of_elements()*sizeof(T)) != hipSuccess) { throw cuda_error("cuNDArray_permute : Error allocating CUDA memory");; } } unsigned int* dims = new unsigned int[in->get_number_of_dimensions()]; unsigned int* strides_out = new unsigned int[in->get_number_of_dimensions()]; if (!dims || !strides_out) { throw cuda_error("cuNDArray_permute: failed to allocate temporary storage for arrays");; } for (unsigned int i = 0; i < in->get_number_of_dimensions(); i++) { dims[i] = (*in->get_dimensions())[(*order)[i]]; strides_out[i] = 1; for (unsigned int j = 0; j < (*order)[i]; j++) { strides_out[i] *= (*in->get_dimensions())[j]; } } unsigned int* dims_dev = 0; unsigned int* strides_out_dev = 0; if (hipMalloc((void**) &dims_dev, in->get_number_of_dimensions()*sizeof(unsigned int)) != hipSuccess) { throw cuda_error("cuNDArray_permute : Error allocating CUDA dims memory");; } if (hipMalloc((void**) &strides_out_dev, in->get_number_of_dimensions()*sizeof(unsigned int)) != hipSuccess) { throw cuda_error("cuNDArray_permute : Error allocating CUDA strides_out memory");; } if (hipMemcpy(dims_dev, dims, in->get_number_of_dimensions()*sizeof(unsigned int), hipMemcpyHostToDevice) != hipSuccess) { err = hipGetLastError(); std::stringstream ss; ss << "cuNDArray_permute : Error uploading dimensions to device, " << hipGetErrorString(err); throw cuda_error(ss.str());; } if (hipMemcpy(strides_out_dev, strides_out, in->get_number_of_dimensions()*sizeof(unsigned int), hipMemcpyHostToDevice) != hipSuccess) { throw cuda_error("cuNDArray_permute : Error uploading strides to device");; } dim3 blockDim(512,1,1); dim3 gridDim; if( in->get_number_of_dimensions() > 2 ){ gridDim = dim3((unsigned int) ::ceil((double)in->get_size(0)*in->get_size(1)/blockDim.x), 1, 1 ); for( unsigned int d=2; d<in->get_number_of_dimensions(); d++ ) gridDim.y *= in->get_size(d); } else gridDim = dim3((unsigned int) ::ceil((double)in->get_number_of_elements()/blockDim.x), 1, 1 ); hipLaunchKernelGGL(( cuNDArray_permute_kernel), dim3(gridDim), dim3(blockDim) , 0, 0, in_ptr, out_ptr, in->get_number_of_dimensions(), dims_dev, strides_out_dev, in->get_number_of_elements(), shift_mode); err = hipGetLastError(); if( err != hipSuccess ){ std::stringstream ss; ss <<"cuNDArray_permute : Error during kernel call: " << hipGetErrorString(err); throw cuda_error(ss.str());; } if (hipFree(dims_dev) != hipSuccess) { err = hipGetLastError(); std::stringstream ss; ss << "cuNDArray_permute: failed to delete device memory (dims_dev) " << hipGetErrorString(err); throw cuda_error(ss.str());; } if (hipFree(strides_out_dev) != hipSuccess) { err = hipGetLastError(); std::stringstream ss; ss << "cuNDArray_permute: failed to delete device memory (strides_out_dev) "<< hipGetErrorString(err); throw cuda_error(ss.str());; } delete [] dims; delete [] strides_out; } template <class T> boost::shared_ptr< cuNDArray<T> > permute( cuNDArray<T> *in, std::vector<size_t> *dim_order, int shift_mode ) { if( in == 0x0 || dim_order == 0x0 ) { throw std::runtime_error("permute(): invalid pointer provided");; } std::vector<size_t> dims; for (unsigned int i = 0; i < dim_order->size(); i++) dims.push_back(in->get_dimensions()->at(dim_order->at(i))); boost::shared_ptr< cuNDArray<T> > out( new cuNDArray<T>() ); out->create(&dims); permute( in, out.get(), dim_order, shift_mode ); return out; } template <class T> void permute( cuNDArray<T> *in, cuNDArray<T> *out, std::vector<size_t> *dim_order, int shift_mode ) { if( in == 0x0 || out == 0x0 || dim_order == 0x0 ) { throw std::runtime_error("permute(): invalid pointer provided");; } //Check ordering array if (dim_order->size() > in->get_number_of_dimensions()) { throw std::runtime_error("permute(): invalid length of dimension ordering array");; } std::vector<size_t> dim_count(in->get_number_of_dimensions(),0); for (unsigned int i = 0; i < dim_order->size(); i++) { if ((*dim_order)[i] >= in->get_number_of_dimensions()) { throw std::runtime_error("permute(): invalid dimension order array");; } dim_count[(*dim_order)[i]]++; } //Create an internal array to store the dimensions std::vector<size_t> dim_order_int; //Check that there are no duplicate dimensions for (unsigned int i = 0; i < dim_order->size(); i++) { if (dim_count[(*dim_order)[i]] != 1) { throw std::runtime_error("permute(): invalid dimension order array (duplicates)");; } dim_order_int.push_back((*dim_order)[i]); } for (unsigned int i = 0; i < dim_order_int.size(); i++) { if ((*in->get_dimensions())[dim_order_int[i]] != out->get_size(i)) { throw std::runtime_error("permute(): dimensions of output array do not match the input array");; } } //Pad dimension order array with dimension not mentioned in order array if (dim_order_int.size() < in->get_number_of_dimensions()) { for (unsigned int i = 0; i < dim_count.size(); i++) { if (dim_count[i] == 0) { dim_order_int.push_back(i); } } } cuNDArray_permute(in, out, &dim_order_int, shift_mode); } template<class T> boost::shared_ptr< cuNDArray<T> > shift_dim( cuNDArray<T> *in, int shift ) { if( in == 0x0 ) { throw std::runtime_error("shift_dim(): invalid input pointer provided");; } std::vector<size_t> order; for (int i = 0; i < in->get_number_of_dimensions(); i++) { order.push_back(static_cast<unsigned int>((i+shift)%in->get_number_of_dimensions())); } return permute(in, &order); } template<class T> void shift_dim( cuNDArray<T> *in, cuNDArray<T> *out, int shift ) { if( in == 0x0 || out == 0x0 ) { throw std::runtime_error("shift_dim(): invalid pointer provided");; } std::vector<size_t> order; for (int i = 0; i < in->get_number_of_dimensions(); i++) { order.push_back(static_cast<unsigned int>((i+shift)%in->get_number_of_dimensions())); } permute(in,out,&order); } // Expand // template<class T> __global__ void expand_kernel( const T * __restrict__ in, T * __restrict__ out, unsigned int number_of_elements_in, unsigned int number_of_elements_out, unsigned int new_dim_size ) { const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; if( idx < number_of_elements_out ){ out[idx] = in[idx%number_of_elements_in]; } } // Expand // template<class T> boost::shared_ptr< cuNDArray<T> > expand( cuNDArray<T> *in, size_t new_dim_size ) { unsigned int number_of_elements_out = in->get_number_of_elements()*new_dim_size; // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( number_of_elements_out, &blockDim, &gridDim ); // Find element stride std::vector<size_t> dims = *in->get_dimensions(); dims.push_back(new_dim_size); // Invoke kernel boost::shared_ptr< cuNDArray<T> > out( new cuNDArray<T>()); out->create(&dims); hipLaunchKernelGGL(( expand_kernel<T>), dim3(gridDim), dim3(blockDim) , 0, 0, in->get_data_ptr(), out->get_data_ptr(), in->get_number_of_elements(), number_of_elements_out, new_dim_size ); CHECK_FOR_CUDA_ERROR(); return out; } // Crop template<class T, unsigned int D> __global__ void crop_kernel ( vector_td<unsigned int,D> offset, vector_td<unsigned int,D> matrix_size_in, vector_td<unsigned int,D> matrix_size_out, const T * __restrict__ in, T * __restrict__ out, unsigned int num_batches, unsigned int num_elements ) { const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; const unsigned int frame_offset = idx/num_elements; if( idx < num_elements*num_batches ){ const typename uintd<D>::Type co = idx_to_co<D>( idx-frame_offset*num_elements, matrix_size_out ); const typename uintd<D>::Type co_os = offset + co; const unsigned int in_idx = co_to_idx<D>(co_os, matrix_size_in)+frame_offset*prod(matrix_size_in); out[idx] = in[in_idx]; } } // Crop template<class T, unsigned int D> void crop( typename uint64d<D>::Type offset, cuNDArray<T> *in, cuNDArray<T> *out ) { if( in == 0x0 || out == 0x0 ){ throw std::runtime_error("crop: 0x0 ndarray provided");; } if( in->get_number_of_dimensions() != out->get_number_of_dimensions() ){ throw std::runtime_error("crop: image dimensions mismatch");; } if( in->get_number_of_dimensions() < D ){ std::stringstream ss; ss << "crop: number of image dimensions should be at least " << D; throw std::runtime_error(ss.str());; } typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in->get_dimensions() ); typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *out->get_dimensions() ); unsigned int number_of_batches = 1; for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ ){ number_of_batches *= in->get_size(d); } if( weak_greater(offset+matrix_size_out, matrix_size_in) ){ throw std::runtime_error( "crop: cropping size mismatch"); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim, number_of_batches ); // Invoke kernel hipLaunchKernelGGL(( crop_kernel<T,D>), dim3(gridDim), dim3(blockDim) , 0, 0, vector_td<unsigned int,D>(offset), vector_td<unsigned int,D>(matrix_size_in), vector_td<unsigned int,D>(matrix_size_out), in->get_data_ptr(), out->get_data_ptr(), number_of_batches, prod(matrix_size_out) ); CHECK_FOR_CUDA_ERROR(); } template<class T, unsigned int D> boost::shared_ptr< cuNDArray<T> > crop( typename uint64d<D>::Type offset, typename uint64d<D>::Type size, cuNDArray<T> *in ) { if( in == 0x0 ){ throw std::runtime_error("crop: 0x0 array provided");; } std::vector<size_t> dims = to_std_vector(size); for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ ){ dims.push_back(in->get_size(d)); } boost::shared_ptr< cuNDArray<T> > result( new cuNDArray<T>(&dims) ); crop<T,D>(offset, in, result.get()); return result; } // Expand and zero fill template<class T, unsigned int D> __global__ void pad_kernel( vector_td<unsigned int,D> matrix_size_in, vector_td<unsigned int,D> matrix_size_out, const T * __restrict__ in, T * __restrict__ out, unsigned int number_of_batches, unsigned int num_elements, T val ) { const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; const unsigned int frame_offset = idx/num_elements; if( idx < num_elements*number_of_batches ){ const typename uintd<D>::Type co_out = idx_to_co<D>( idx-frame_offset*num_elements, matrix_size_out ); const typename uintd<D>::Type offset = (matrix_size_out-matrix_size_in)>>1; T _out; bool inside = (co_out>=offset) && (co_out<(matrix_size_in+offset)); if( inside ) _out = in[co_to_idx<D>(co_out-offset, matrix_size_in)+frame_offset*prod(matrix_size_in)]; else{ _out = val; } out[idx] = _out; } } template<class T, unsigned int D> void pad( cuNDArray<T> *in, cuNDArray<T> *out, T val ) { if( in == 0x0 || out == 0x0 ){ throw std::runtime_error("pad: 0x0 ndarray provided");; } if( in->get_number_of_dimensions() != out->get_number_of_dimensions() ){ throw std::runtime_error("pad: image dimensions mismatch");; } if( in->get_number_of_dimensions() < D ){ std::stringstream ss; ss << "pad: number of image dimensions should be at least " << D; throw std::runtime_error(ss.str());; } typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in->get_dimensions() ); typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *out->get_dimensions() ); unsigned int number_of_batches = 1; for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ ){ number_of_batches *= in->get_size(d); } if( weak_greater(matrix_size_in,matrix_size_out) ){ throw std::runtime_error("pad: size mismatch, cannot expand"); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim, number_of_batches ); // Invoke kernel hipLaunchKernelGGL(( pad_kernel<T,D>), dim3(gridDim), dim3(blockDim) , 0, 0, vector_td<unsigned int,D>(matrix_size_in), vector_td<unsigned int,D>(matrix_size_out), in->get_data_ptr(), out->get_data_ptr(), number_of_batches, prod(matrix_size_out), val ); CHECK_FOR_CUDA_ERROR(); } template<class T, unsigned int D> boost::shared_ptr< cuNDArray<T> > pad( typename uint64d<D>::Type size, cuNDArray<T> *in, T val ) { if( in == 0x0 ){ throw std::runtime_error("pad: 0x0 array provided");; } std::vector<size_t> dims = to_std_vector(size); for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ ){ dims.push_back(in->get_size(d)); } boost::shared_ptr< cuNDArray<T> > result( new cuNDArray<T>(&dims) ); pad<T,D>(in, result.get(), val); return result; } template<class T, unsigned int D> __global__ void fill_border_kernel( vector_td<unsigned int,D> matrix_size_in, vector_td<unsigned int,D> matrix_size_out, T *image, unsigned int number_of_batches, unsigned int number_of_elements, T val ) { const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; if( idx < number_of_elements ){ const vector_td<unsigned int,D> co_out = idx_to_co<D>( idx, matrix_size_out ); const vector_td<unsigned int,D> offset = (matrix_size_out-matrix_size_in)>>1; if( weak_less( co_out, offset ) || weak_greater_equal( co_out, matrix_size_in+offset ) ){ for( unsigned int batch=0; batch<number_of_batches; batch++ ){ image[idx+batch*number_of_elements] = val; } } else ; // do nothing } } // Zero fill border (rectangular) template<class T, unsigned int D> void fill_border( typename uint64d<D>::Type matrix_size_in, cuNDArray<T> *in_out, T val ) { typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *in_out->get_dimensions() ); if( weak_greater(matrix_size_in, matrix_size_out) ){ throw std::runtime_error("fill_border: size mismatch, cannot zero fill");; } unsigned int number_of_batches = 1; for( unsigned int d=D; d<in_out->get_number_of_dimensions(); d++ ){ number_of_batches *= in_out->get_size(d); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim ); // Invoke kernel hipLaunchKernelGGL(( fill_border_kernel<T,D>), dim3(gridDim), dim3(blockDim) , 0, 0, vector_td<unsigned int,D>(matrix_size_in), vector_td<unsigned int,D>(matrix_size_out), in_out->get_data_ptr(), number_of_batches, prod(matrix_size_out), val ); CHECK_FOR_CUDA_ERROR(); } template<class T, unsigned int D> __global__ void fill_border_kernel( typename realType<T>::Type radius, vector_td<int,D> matrix_size, T *image, unsigned int number_of_batches, unsigned int number_of_elements, T val ) { const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; if( idx < number_of_elements ){ const vector_td<typename realType<T>::Type,D> co_out( (matrix_size>>1) - idx_to_co<D>( idx, matrix_size )); if( norm(co_out) > radius ){ for( unsigned int batch=0; batch<number_of_batches; batch++ ){ image[idx+batch*number_of_elements] = val; } } else ; // do nothing } } // Zero fill border (radial) template<class T, unsigned int D> void fill_border( typename realType<T>::Type radius, cuNDArray<T> *in_out, T val ) { typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *in_out->get_dimensions() ); unsigned int number_of_batches = 1; for( unsigned int d=D; d<in_out->get_number_of_dimensions(); d++ ){ number_of_batches *= in_out->get_size(d); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim ); // Invoke kernel hipLaunchKernelGGL(( fill_border_kernel<T,D>), dim3(gridDim), dim3(blockDim) , 0, 0, radius, vector_td<int,D>(matrix_size_out), in_out->get_data_ptr(), number_of_batches, prod(matrix_size_out), val ); CHECK_FOR_CUDA_ERROR(); } template<class T, unsigned int D> __global__ void upsample_kernel( typename uintd<D>::Type matrix_size_in, typename uintd<D>::Type matrix_size_out, unsigned int num_batches, const T * __restrict__ image_in, T * __restrict__ image_out ) { typedef typename realType<T>::Type REAL; const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; const unsigned int num_elements_out = prod(matrix_size_out); if( idx < num_elements_out*num_batches ){ const unsigned int batch = idx/num_elements_out; const unsigned int batch_offset_in = batch*prod(matrix_size_in); const typename uintd<D>::Type co_out = idx_to_co<D>( idx-batch*num_elements_out, matrix_size_out ); const typename uintd<D>::Type co_in = co_out >> 1; const typename uintd<D>::Type ones(1); const typename uintd<D>::Type twos(2); const typename uintd<D>::Type offset = co_out%twos; const unsigned int num_cells = 1 << D; T cellsum(0); unsigned int count = 0; for( unsigned int i=0; i<num_cells; i++ ){ const typename uintd<D>::Type stride = idx_to_co<D>( i, twos ); if( offset >= stride ){ cellsum += image_in[batch_offset_in+co_to_idx(amin(co_in+stride, matrix_size_in-ones), matrix_size_in)]; count++; } } image_out[idx] = cellsum / REAL(count); } } // // Linear upsampling by a factor of two (on a D-dimensional grid) // Note that this operator is the transpose of the downsampling operator below by design // - based on Briggs et al, A Multigrid Tutorial 2nd edition, pp. 34-35 // template<class T, unsigned int D> boost::shared_ptr< cuNDArray<T> > upsample( cuNDArray<T>* in ) { if( in == 0x0 ) throw std::runtime_error("upsample: illegal input pointer"); std::vector<size_t> dims_out = *in->get_dimensions(); for( unsigned int i=0; i<D; i++ ) dims_out[i] <<= 1; boost::shared_ptr< cuNDArray<T> > out(new cuNDArray<T>(&dims_out)); upsample<T,D>( in, out.get() ); return out; } template<class T, unsigned int D> void upsample( cuNDArray<T> *in, cuNDArray<T> *out ) { if( in == 0x0 || out == 0x0 ) throw std::runtime_error("upsample: illegal input pointer"); typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in->get_dimensions() ); typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *out->get_dimensions() ); if( (matrix_size_in<<1) != matrix_size_out ){ throw std::runtime_error("upsample: arrays do not correspond to upsampling by a factor of two"); } unsigned int number_of_batches = 1; for( unsigned int d=D; d<out->get_number_of_dimensions(); d++ ){ number_of_batches *= out->get_size(d); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim, number_of_batches ); // Invoke kernel hipLaunchKernelGGL(( upsample_kernel<T,D>), dim3(gridDim), dim3(blockDim) , 0, 0, vector_td<unsigned int,D>(matrix_size_in), vector_td<unsigned int,D>(matrix_size_out), number_of_batches, in->get_data_ptr(), out->get_data_ptr() ); CHECK_FOR_CUDA_ERROR(); } // // Linear downsampling by a factor of two (on a D-dimensional grid) // Note that this operator is the transpose of the upsampling operator above by design // - based on Briggs et al, A Multigrid Tutorial 2nd edition, pp. 36. // template<class T, unsigned int D> __global__ void downsample_kernel( typename intd<D>::Type matrix_size_in, typename intd<D>::Type matrix_size_out, int num_batches, const T * __restrict__ image_in, T * __restrict__ image_out ) { typedef typename realType<T>::Type REAL; const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; const int num_elements_out = prod(matrix_size_out); if( idx < num_elements_out*num_batches ){ const int batch = idx/num_elements_out; const int batch_offset_in = batch*prod(matrix_size_in); const typename intd<D>::Type co_out = idx_to_co<D>( idx-batch*num_elements_out, matrix_size_out ); const typename intd<D>::Type co_in = co_out << 1; T cellsum[D+1]; for( unsigned int d=0; d<D+1; d++ ){ cellsum[d] = T(0); } //const int num_cells = pow(3,D); // no pow for integers on device int num_cells = 1; for( int i=0; i<D; i++ ) num_cells *=3; const REAL denominator = pow(REAL(4),REAL(D)); for( int i=0; i<num_cells; i++ ){ const typename intd<D>::Type zeros(0); const typename intd<D>::Type ones(1); const typename intd<D>::Type threes(3); const typename intd<D>::Type stride = idx_to_co<D>(i,threes)-ones; // in the range [-1;1]^D int distance = 0; for( int d=0; d<D; d++ ){ if( abs(stride[d])>0 ) distance++; } cellsum[distance] += image_in[batch_offset_in+co_to_idx(amax(zeros, amin(matrix_size_in-ones,co_in+stride)), matrix_size_in)]; } T res = T(0); for( unsigned int d=0; d<D+1; d++ ){ res += (REAL(1<<(D-d))*cellsum[d]); } image_out[idx] = res / denominator; } } template<class T, unsigned int D> boost::shared_ptr< cuNDArray<T> > downsample( cuNDArray<T>* in ) { if( in == 0x0 ) throw std::runtime_error("downsample: illegal input pointer"); std::vector<size_t> dims_out = *in->get_dimensions(); for( unsigned int i=0; i<D; i++ ) dims_out[i] >>= 1; boost::shared_ptr< cuNDArray<T> > out(new cuNDArray<T>(&dims_out)); downsample<T,D>( in, out.get() ); return out; } template<class T, unsigned int D> void downsample( cuNDArray<T> *in, cuNDArray<T> *out ) { if( in == 0x0 || out == 0x0 ) throw std::runtime_error("downsample: illegal input pointer"); typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in->get_dimensions() ); typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *out->get_dimensions() ); if( (matrix_size_in>>1) != matrix_size_out ){ throw std::runtime_error("downsample: arrays do not correspond to downsampling by a factor of two"); } unsigned int number_of_batches = 1; for( unsigned int d=D; d<out->get_number_of_dimensions(); d++ ){ number_of_batches *= out->get_size(d); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim, number_of_batches ); // Invoke kernel hipLaunchKernelGGL(( downsample_kernel<T,D>), dim3(gridDim), dim3(blockDim) , 0, 0, vector_td<int,D>(matrix_size_in), vector_td<int,D>(matrix_size_out), (int)number_of_batches, in->get_data_ptr(), out->get_data_ptr() ); CHECK_FOR_CUDA_ERROR(); } // // Instantiation // template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > permute( cuNDArray<float>*, std::vector<size_t>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > permute( cuNDArray<double>*, std::vector<size_t>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > permute( cuNDArray<float_complext>*, std::vector<size_t>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > permute( cuNDArray<double_complext>*, std::vector<size_t>*, int ); template EXPORTGPUCORE void permute( cuNDArray<float>*, cuNDArray<float>*, std::vector<size_t>*, int); template EXPORTGPUCORE void permute( cuNDArray<double>*, cuNDArray<double>*, std::vector<size_t>*, int); template EXPORTGPUCORE void permute( cuNDArray<float_complext>*, cuNDArray<float_complext>*, std::vector<size_t>*, int); template EXPORTGPUCORE void permute( cuNDArray<double_complext>*, cuNDArray<double_complext>*, std::vector<size_t>*, int); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > shift_dim( cuNDArray<float>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > shift_dim( cuNDArray<double>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > shift_dim( cuNDArray<float_complext>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > shift_dim( cuNDArray<double_complext>*, int ); template EXPORTGPUCORE void shift_dim( cuNDArray<float>*, cuNDArray<float>*, int shift ); template EXPORTGPUCORE void shift_dim( cuNDArray<double>*, cuNDArray<double>*, int shift ); template EXPORTGPUCORE void shift_dim( cuNDArray<float_complext>*, cuNDArray<float_complext>*, int shift ); template EXPORTGPUCORE void shift_dim( cuNDArray<double_complext>*, cuNDArray<double_complext>*, int shift ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > expand<float>( cuNDArray<float>*, size_t); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > expand<double>( cuNDArray<double>*, size_t); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > expand<float_complext>( cuNDArray<float_complext>*, size_t); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > expand<double_complext>( cuNDArray<double_complext>*, size_t); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > crop<float,1>( typename uint64d<1>::Type, typename uint64d<1>::Type, cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > crop<float,2>( typename uint64d<2>::Type, typename uint64d<2>::Type, cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > crop<float,3>( typename uint64d<3>::Type, typename uint64d<3>::Type, cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > crop<float,4>( typename uint64d<4>::Type, typename uint64d<4>::Type, cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > crop<float_complext,1>( typename uint64d<1>::Type, typename uint64d<1>::Type, cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > crop<float_complext,2>( typename uint64d<2>::Type, typename uint64d<2>::Type, cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > crop<float_complext,3>( typename uint64d<3>::Type, typename uint64d<3>::Type, cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > crop<float_complext,4>( typename uint64d<4>::Type, typename uint64d<4>::Type, cuNDArray<float_complext>*); template EXPORTGPUCORE void crop<float,1>( uint64d1, cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void crop<float,2>( uint64d2, cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void crop<float,3>( uint64d3, cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void crop<float,4>( uint64d4, cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void crop<complext<float>,1>( uint64d1, cuNDArray<complext<float> >*, cuNDArray< complext<float> >*); template EXPORTGPUCORE void crop<complext<float>,2>( uint64d2, cuNDArray<complext<float> >*, cuNDArray< complext<float> >*); template EXPORTGPUCORE void crop<complext<float>,3>( uint64d3, cuNDArray<complext<float> >*, cuNDArray< complext<float> >*); template EXPORTGPUCORE void crop<complext<float>,4>( uint64d4, cuNDArray<complext<float> >*, cuNDArray< complext<float> >*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > pad<float,1>( typename uint64d<1>::Type, cuNDArray<float>*, float ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > pad<float,2>( typename uint64d<2>::Type, cuNDArray<float>*, float ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > pad<float,3>( typename uint64d<3>::Type, cuNDArray<float>*, float ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > pad<float,4>( typename uint64d<4>::Type, cuNDArray<float>*, float ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > pad<float_complext,1>( typename uint64d<1>::Type, cuNDArray<float_complext>*, float_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > pad<float_complext,2>( typename uint64d<2>::Type, cuNDArray<float_complext>*, float_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > pad<float_complext,3>( typename uint64d<3>::Type, cuNDArray<float_complext>*, float_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > pad<float_complext,4>( typename uint64d<4>::Type, cuNDArray<float_complext>*, float_complext ); template EXPORTGPUCORE void pad<float,1>( cuNDArray<float>*, cuNDArray<float>*, float); template EXPORTGPUCORE void pad<float,2>( cuNDArray<float>*, cuNDArray<float>*, float); template EXPORTGPUCORE void pad<float,3>( cuNDArray<float>*, cuNDArray<float>*, float); template EXPORTGPUCORE void pad<float,4>( cuNDArray<float>*, cuNDArray<float>*, float); template EXPORTGPUCORE void pad<float_complext,1>( cuNDArray<float_complext>*, cuNDArray<float_complext>*, float_complext); template EXPORTGPUCORE void pad<float_complext,2>( cuNDArray<float_complext>*, cuNDArray<float_complext>*, float_complext); template EXPORTGPUCORE void pad<float_complext,3>( cuNDArray<float_complext>*, cuNDArray<float_complext>*, float_complext); template EXPORTGPUCORE void pad<float_complext,4>( cuNDArray<float_complext>*, cuNDArray<float_complext>*, float_complext); template EXPORTGPUCORE void fill_border<float,1>(uint64d1, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,2>(uint64d2, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,3>(uint64d3, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,4>(uint64d4, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,1>(float, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,2>(float, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,3>(float, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,4>(float, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float_complext,1>(uint64d1, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,2>(uint64d2, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,3>(uint64d3, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,4>(uint64d4, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,1>(float, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,2>(float, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,3>(float, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,4>(float, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > crop<double,1>( typename uint64d<1>::Type, typename uint64d<1>::Type, cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > crop<double,2>( typename uint64d<2>::Type, typename uint64d<2>::Type, cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > crop<double,3>( typename uint64d<3>::Type, typename uint64d<3>::Type, cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > crop<double,4>( typename uint64d<4>::Type, typename uint64d<4>::Type, cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > crop<double_complext,1>( typename uint64d<1>::Type, typename uint64d<1>::Type, cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > crop<double_complext,2>( typename uint64d<2>::Type, typename uint64d<2>::Type, cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > crop<double_complext,3>( typename uint64d<3>::Type, typename uint64d<3>::Type, cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > crop<double_complext,4>( typename uint64d<4>::Type, typename uint64d<4>::Type, cuNDArray<double_complext>*); template EXPORTGPUCORE void crop<double,1>( uint64d1, cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void crop<double,2>( uint64d2, cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void crop<double,3>( uint64d3, cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void crop<double,4>( uint64d4, cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void crop<complext<double>,1>( uint64d1, cuNDArray<complext<double> >*, cuNDArray< complext<double> >*); template EXPORTGPUCORE void crop<complext<double>,2>( uint64d2, cuNDArray<complext<double> >*, cuNDArray< complext<double> >*); template EXPORTGPUCORE void crop<complext<double>,3>( uint64d3, cuNDArray<complext<double> >*, cuNDArray< complext<double> >*); template EXPORTGPUCORE void crop<complext<double>,4>( uint64d4, cuNDArray<complext<double> >*, cuNDArray< complext<double> >*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > pad<double,1>( typename uint64d<1>::Type, cuNDArray<double>*, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > pad<double,2>( typename uint64d<2>::Type, cuNDArray<double>*, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > pad<double,3>( typename uint64d<3>::Type, cuNDArray<double>*, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > pad<double,4>( typename uint64d<4>::Type, cuNDArray<double>*, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > pad<double_complext,1>( typename uint64d<1>::Type, cuNDArray<double_complext>*, double_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > pad<double_complext,2>( typename uint64d<2>::Type, cuNDArray<double_complext>*, double_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > pad<double_complext,3>( typename uint64d<3>::Type, cuNDArray<double_complext>*, double_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > pad<double_complext,4>( typename uint64d<4>::Type, cuNDArray<double_complext>*, double_complext ); template EXPORTGPUCORE void pad<double,1>( cuNDArray<double>*, cuNDArray<double>*, double); template EXPORTGPUCORE void pad<double,2>( cuNDArray<double>*, cuNDArray<double>*, double); template EXPORTGPUCORE void pad<double,3>( cuNDArray<double>*, cuNDArray<double>*, double); template EXPORTGPUCORE void pad<double,4>( cuNDArray<double>*, cuNDArray<double>*, double); template EXPORTGPUCORE void pad<double_complext,1>( cuNDArray<double_complext>*, cuNDArray<double_complext>*, double_complext); template EXPORTGPUCORE void pad<double_complext,2>( cuNDArray<double_complext>*, cuNDArray<double_complext>*, double_complext); template EXPORTGPUCORE void pad<double_complext,3>( cuNDArray<double_complext>*, cuNDArray<double_complext>*, double_complext); template EXPORTGPUCORE void pad<double_complext,4>( cuNDArray<double_complext>*, cuNDArray<double_complext>*, double_complext); template EXPORTGPUCORE void fill_border<double,1>(uint64d1, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,2>(uint64d2, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,3>(uint64d3, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,4>(uint64d4, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,1>(double, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,2>(double, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,3>(double, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,4>(double, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double_complext,1>(uint64d1, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,2>(uint64d2, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,3>(uint64d3, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,4>(uint64d4, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,1>(double, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,2>(double, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,3>(double, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,4>(double, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > upsample<float,1>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > upsample<float,2>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > upsample<float,3>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > upsample<float,4>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > upsample<float_complext,1>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > upsample<float_complext,2>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > upsample<float_complext,3>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > upsample<float_complext,4>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > upsample<double,1>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > upsample<double,2>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > upsample<double,3>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > upsample<double,4>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > upsample<double_complext,1>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > upsample<double_complext,2>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > upsample<double_complext,3>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > upsample<double_complext,4>(cuNDArray<double_complext>*); template EXPORTGPUCORE void upsample<float,1>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void upsample<float,2>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void upsample<float,3>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void upsample<float,4>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void upsample<float_complext,1>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void upsample<float_complext,2>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void upsample<float_complext,3>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void upsample<float_complext,4>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void upsample<double,1>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void upsample<double,2>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void upsample<double,3>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void upsample<double,4>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void upsample<double_complext,1>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void upsample<double_complext,2>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void upsample<double_complext,3>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void upsample<double_complext,4>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > downsample<float,1>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > downsample<float,2>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > downsample<float,3>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > downsample<float,4>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > downsample<float_complext,1>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > downsample<float_complext,2>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > downsample<float_complext,3>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > downsample<float_complext,4>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > downsample<double,1>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > downsample<double,2>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > downsample<double,3>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > downsample<double,4>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > downsample<double_complext,1>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > downsample<double_complext,2>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > downsample<double_complext,3>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > downsample<double_complext,4>(cuNDArray<double_complext>*); template EXPORTGPUCORE void downsample<float,1>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void downsample<float,2>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void downsample<float,3>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void downsample<float,4>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void downsample<float_complext,1>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void downsample<float_complext,2>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void downsample<float_complext,3>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void downsample<float_complext,4>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void downsample<double,1>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void downsample<double,2>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void downsample<double,3>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void downsample<double,4>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void downsample<double_complext,1>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void downsample<double_complext,2>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void downsample<double_complext,3>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void downsample<double_complext,4>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); // We can probably instantiate the functions below functionsfor many more types? E.g. arrays of floatd2. // For now we just introduce what we have needed... // template EXPORTGPUCORE boost::shared_ptr< cuNDArray<floatd2> > expand<floatd2>( cuNDArray<floatd2>*, size_t); }
5335598b9e622d76157b2a051a55edd4c716c17c.cu
#include "cuNDArray_utils.h" #include "vector_td_utilities.h" #include "cudaDeviceManager.h" #include "setup_grid.h" #include <math_functions.h> #include <cmath> namespace Gadgetron { template <class T> __global__ void cuNDArray_permute_kernel(const T* __restrict__ in, T* __restrict__ out, unsigned int ndim, const unsigned int* __restrict__ dims, const unsigned int* __restrict__ strides_out, unsigned int elements, int shift_mode) { unsigned int idx_in = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; unsigned int idx_out = 0; unsigned int idx_in_tmp = idx_in; if (idx_in < elements) { unsigned int cur_index; for (unsigned int i = 0; i < ndim; i++) { unsigned int idx_in_remainder = idx_in_tmp / dims[i]; cur_index = idx_in_tmp-(idx_in_remainder*dims[i]); //cur_index = idx_in_tmp%dims[i]; if (shift_mode < 0) { //IFFTSHIFT idx_out += ((cur_index+(dims[i]>>1))%dims[i])*strides_out[i]; } else if (shift_mode > 0) { //FFTSHIFT idx_out += ((cur_index+((dims[i]+1)>>1))%dims[i])*strides_out[i]; } else { idx_out += cur_index*strides_out[i]; } idx_in_tmp = idx_in_remainder; } out[idx_in] = in[idx_out]; } } template <class T> void cuNDArray_permute( cuNDArray<T>* in, cuNDArray<T>* out, std::vector<size_t> *order, int shift_mode) { if( out == 0x0 ){ throw cuda_error("cuNDArray_permute(internal): 0x0 output");; } cudaError_t err; T* in_ptr = in->get_data_ptr(); T* out_ptr = 0; if (out) { out_ptr = out->get_data_ptr(); } else { if (cudaMalloc((void**) &out_ptr, in->get_number_of_elements()*sizeof(T)) != cudaSuccess) { throw cuda_error("cuNDArray_permute : Error allocating CUDA memory");; } } unsigned int* dims = new unsigned int[in->get_number_of_dimensions()]; unsigned int* strides_out = new unsigned int[in->get_number_of_dimensions()]; if (!dims || !strides_out) { throw cuda_error("cuNDArray_permute: failed to allocate temporary storage for arrays");; } for (unsigned int i = 0; i < in->get_number_of_dimensions(); i++) { dims[i] = (*in->get_dimensions())[(*order)[i]]; strides_out[i] = 1; for (unsigned int j = 0; j < (*order)[i]; j++) { strides_out[i] *= (*in->get_dimensions())[j]; } } unsigned int* dims_dev = 0; unsigned int* strides_out_dev = 0; if (cudaMalloc((void**) &dims_dev, in->get_number_of_dimensions()*sizeof(unsigned int)) != cudaSuccess) { throw cuda_error("cuNDArray_permute : Error allocating CUDA dims memory");; } if (cudaMalloc((void**) &strides_out_dev, in->get_number_of_dimensions()*sizeof(unsigned int)) != cudaSuccess) { throw cuda_error("cuNDArray_permute : Error allocating CUDA strides_out memory");; } if (cudaMemcpy(dims_dev, dims, in->get_number_of_dimensions()*sizeof(unsigned int), cudaMemcpyHostToDevice) != cudaSuccess) { err = cudaGetLastError(); std::stringstream ss; ss << "cuNDArray_permute : Error uploading dimensions to device, " << cudaGetErrorString(err); throw cuda_error(ss.str());; } if (cudaMemcpy(strides_out_dev, strides_out, in->get_number_of_dimensions()*sizeof(unsigned int), cudaMemcpyHostToDevice) != cudaSuccess) { throw cuda_error("cuNDArray_permute : Error uploading strides to device");; } dim3 blockDim(512,1,1); dim3 gridDim; if( in->get_number_of_dimensions() > 2 ){ gridDim = dim3((unsigned int) std::ceil((double)in->get_size(0)*in->get_size(1)/blockDim.x), 1, 1 ); for( unsigned int d=2; d<in->get_number_of_dimensions(); d++ ) gridDim.y *= in->get_size(d); } else gridDim = dim3((unsigned int) std::ceil((double)in->get_number_of_elements()/blockDim.x), 1, 1 ); cuNDArray_permute_kernel<<< gridDim, blockDim >>>( in_ptr, out_ptr, in->get_number_of_dimensions(), dims_dev, strides_out_dev, in->get_number_of_elements(), shift_mode); err = cudaGetLastError(); if( err != cudaSuccess ){ std::stringstream ss; ss <<"cuNDArray_permute : Error during kernel call: " << cudaGetErrorString(err); throw cuda_error(ss.str());; } if (cudaFree(dims_dev) != cudaSuccess) { err = cudaGetLastError(); std::stringstream ss; ss << "cuNDArray_permute: failed to delete device memory (dims_dev) " << cudaGetErrorString(err); throw cuda_error(ss.str());; } if (cudaFree(strides_out_dev) != cudaSuccess) { err = cudaGetLastError(); std::stringstream ss; ss << "cuNDArray_permute: failed to delete device memory (strides_out_dev) "<< cudaGetErrorString(err); throw cuda_error(ss.str());; } delete [] dims; delete [] strides_out; } template <class T> boost::shared_ptr< cuNDArray<T> > permute( cuNDArray<T> *in, std::vector<size_t> *dim_order, int shift_mode ) { if( in == 0x0 || dim_order == 0x0 ) { throw std::runtime_error("permute(): invalid pointer provided");; } std::vector<size_t> dims; for (unsigned int i = 0; i < dim_order->size(); i++) dims.push_back(in->get_dimensions()->at(dim_order->at(i))); boost::shared_ptr< cuNDArray<T> > out( new cuNDArray<T>() ); out->create(&dims); permute( in, out.get(), dim_order, shift_mode ); return out; } template <class T> void permute( cuNDArray<T> *in, cuNDArray<T> *out, std::vector<size_t> *dim_order, int shift_mode ) { if( in == 0x0 || out == 0x0 || dim_order == 0x0 ) { throw std::runtime_error("permute(): invalid pointer provided");; } //Check ordering array if (dim_order->size() > in->get_number_of_dimensions()) { throw std::runtime_error("permute(): invalid length of dimension ordering array");; } std::vector<size_t> dim_count(in->get_number_of_dimensions(),0); for (unsigned int i = 0; i < dim_order->size(); i++) { if ((*dim_order)[i] >= in->get_number_of_dimensions()) { throw std::runtime_error("permute(): invalid dimension order array");; } dim_count[(*dim_order)[i]]++; } //Create an internal array to store the dimensions std::vector<size_t> dim_order_int; //Check that there are no duplicate dimensions for (unsigned int i = 0; i < dim_order->size(); i++) { if (dim_count[(*dim_order)[i]] != 1) { throw std::runtime_error("permute(): invalid dimension order array (duplicates)");; } dim_order_int.push_back((*dim_order)[i]); } for (unsigned int i = 0; i < dim_order_int.size(); i++) { if ((*in->get_dimensions())[dim_order_int[i]] != out->get_size(i)) { throw std::runtime_error("permute(): dimensions of output array do not match the input array");; } } //Pad dimension order array with dimension not mentioned in order array if (dim_order_int.size() < in->get_number_of_dimensions()) { for (unsigned int i = 0; i < dim_count.size(); i++) { if (dim_count[i] == 0) { dim_order_int.push_back(i); } } } cuNDArray_permute(in, out, &dim_order_int, shift_mode); } template<class T> boost::shared_ptr< cuNDArray<T> > shift_dim( cuNDArray<T> *in, int shift ) { if( in == 0x0 ) { throw std::runtime_error("shift_dim(): invalid input pointer provided");; } std::vector<size_t> order; for (int i = 0; i < in->get_number_of_dimensions(); i++) { order.push_back(static_cast<unsigned int>((i+shift)%in->get_number_of_dimensions())); } return permute(in, &order); } template<class T> void shift_dim( cuNDArray<T> *in, cuNDArray<T> *out, int shift ) { if( in == 0x0 || out == 0x0 ) { throw std::runtime_error("shift_dim(): invalid pointer provided");; } std::vector<size_t> order; for (int i = 0; i < in->get_number_of_dimensions(); i++) { order.push_back(static_cast<unsigned int>((i+shift)%in->get_number_of_dimensions())); } permute(in,out,&order); } // Expand // template<class T> __global__ void expand_kernel( const T * __restrict__ in, T * __restrict__ out, unsigned int number_of_elements_in, unsigned int number_of_elements_out, unsigned int new_dim_size ) { const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; if( idx < number_of_elements_out ){ out[idx] = in[idx%number_of_elements_in]; } } // Expand // template<class T> boost::shared_ptr< cuNDArray<T> > expand( cuNDArray<T> *in, size_t new_dim_size ) { unsigned int number_of_elements_out = in->get_number_of_elements()*new_dim_size; // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( number_of_elements_out, &blockDim, &gridDim ); // Find element stride std::vector<size_t> dims = *in->get_dimensions(); dims.push_back(new_dim_size); // Invoke kernel boost::shared_ptr< cuNDArray<T> > out( new cuNDArray<T>()); out->create(&dims); expand_kernel<T><<< gridDim, blockDim >>>( in->get_data_ptr(), out->get_data_ptr(), in->get_number_of_elements(), number_of_elements_out, new_dim_size ); CHECK_FOR_CUDA_ERROR(); return out; } // Crop template<class T, unsigned int D> __global__ void crop_kernel ( vector_td<unsigned int,D> offset, vector_td<unsigned int,D> matrix_size_in, vector_td<unsigned int,D> matrix_size_out, const T * __restrict__ in, T * __restrict__ out, unsigned int num_batches, unsigned int num_elements ) { const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; const unsigned int frame_offset = idx/num_elements; if( idx < num_elements*num_batches ){ const typename uintd<D>::Type co = idx_to_co<D>( idx-frame_offset*num_elements, matrix_size_out ); const typename uintd<D>::Type co_os = offset + co; const unsigned int in_idx = co_to_idx<D>(co_os, matrix_size_in)+frame_offset*prod(matrix_size_in); out[idx] = in[in_idx]; } } // Crop template<class T, unsigned int D> void crop( typename uint64d<D>::Type offset, cuNDArray<T> *in, cuNDArray<T> *out ) { if( in == 0x0 || out == 0x0 ){ throw std::runtime_error("crop: 0x0 ndarray provided");; } if( in->get_number_of_dimensions() != out->get_number_of_dimensions() ){ throw std::runtime_error("crop: image dimensions mismatch");; } if( in->get_number_of_dimensions() < D ){ std::stringstream ss; ss << "crop: number of image dimensions should be at least " << D; throw std::runtime_error(ss.str());; } typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in->get_dimensions() ); typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *out->get_dimensions() ); unsigned int number_of_batches = 1; for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ ){ number_of_batches *= in->get_size(d); } if( weak_greater(offset+matrix_size_out, matrix_size_in) ){ throw std::runtime_error( "crop: cropping size mismatch"); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim, number_of_batches ); // Invoke kernel crop_kernel<T,D><<< gridDim, blockDim >>> ( vector_td<unsigned int,D>(offset), vector_td<unsigned int,D>(matrix_size_in), vector_td<unsigned int,D>(matrix_size_out), in->get_data_ptr(), out->get_data_ptr(), number_of_batches, prod(matrix_size_out) ); CHECK_FOR_CUDA_ERROR(); } template<class T, unsigned int D> boost::shared_ptr< cuNDArray<T> > crop( typename uint64d<D>::Type offset, typename uint64d<D>::Type size, cuNDArray<T> *in ) { if( in == 0x0 ){ throw std::runtime_error("crop: 0x0 array provided");; } std::vector<size_t> dims = to_std_vector(size); for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ ){ dims.push_back(in->get_size(d)); } boost::shared_ptr< cuNDArray<T> > result( new cuNDArray<T>(&dims) ); crop<T,D>(offset, in, result.get()); return result; } // Expand and zero fill template<class T, unsigned int D> __global__ void pad_kernel( vector_td<unsigned int,D> matrix_size_in, vector_td<unsigned int,D> matrix_size_out, const T * __restrict__ in, T * __restrict__ out, unsigned int number_of_batches, unsigned int num_elements, T val ) { const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; const unsigned int frame_offset = idx/num_elements; if( idx < num_elements*number_of_batches ){ const typename uintd<D>::Type co_out = idx_to_co<D>( idx-frame_offset*num_elements, matrix_size_out ); const typename uintd<D>::Type offset = (matrix_size_out-matrix_size_in)>>1; T _out; bool inside = (co_out>=offset) && (co_out<(matrix_size_in+offset)); if( inside ) _out = in[co_to_idx<D>(co_out-offset, matrix_size_in)+frame_offset*prod(matrix_size_in)]; else{ _out = val; } out[idx] = _out; } } template<class T, unsigned int D> void pad( cuNDArray<T> *in, cuNDArray<T> *out, T val ) { if( in == 0x0 || out == 0x0 ){ throw std::runtime_error("pad: 0x0 ndarray provided");; } if( in->get_number_of_dimensions() != out->get_number_of_dimensions() ){ throw std::runtime_error("pad: image dimensions mismatch");; } if( in->get_number_of_dimensions() < D ){ std::stringstream ss; ss << "pad: number of image dimensions should be at least " << D; throw std::runtime_error(ss.str());; } typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in->get_dimensions() ); typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *out->get_dimensions() ); unsigned int number_of_batches = 1; for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ ){ number_of_batches *= in->get_size(d); } if( weak_greater(matrix_size_in,matrix_size_out) ){ throw std::runtime_error("pad: size mismatch, cannot expand"); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim, number_of_batches ); // Invoke kernel pad_kernel<T,D><<< gridDim, blockDim >>> ( vector_td<unsigned int,D>(matrix_size_in), vector_td<unsigned int,D>(matrix_size_out), in->get_data_ptr(), out->get_data_ptr(), number_of_batches, prod(matrix_size_out), val ); CHECK_FOR_CUDA_ERROR(); } template<class T, unsigned int D> boost::shared_ptr< cuNDArray<T> > pad( typename uint64d<D>::Type size, cuNDArray<T> *in, T val ) { if( in == 0x0 ){ throw std::runtime_error("pad: 0x0 array provided");; } std::vector<size_t> dims = to_std_vector(size); for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ ){ dims.push_back(in->get_size(d)); } boost::shared_ptr< cuNDArray<T> > result( new cuNDArray<T>(&dims) ); pad<T,D>(in, result.get(), val); return result; } template<class T, unsigned int D> __global__ void fill_border_kernel( vector_td<unsigned int,D> matrix_size_in, vector_td<unsigned int,D> matrix_size_out, T *image, unsigned int number_of_batches, unsigned int number_of_elements, T val ) { const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; if( idx < number_of_elements ){ const vector_td<unsigned int,D> co_out = idx_to_co<D>( idx, matrix_size_out ); const vector_td<unsigned int,D> offset = (matrix_size_out-matrix_size_in)>>1; if( weak_less( co_out, offset ) || weak_greater_equal( co_out, matrix_size_in+offset ) ){ for( unsigned int batch=0; batch<number_of_batches; batch++ ){ image[idx+batch*number_of_elements] = val; } } else ; // do nothing } } // Zero fill border (rectangular) template<class T, unsigned int D> void fill_border( typename uint64d<D>::Type matrix_size_in, cuNDArray<T> *in_out, T val ) { typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *in_out->get_dimensions() ); if( weak_greater(matrix_size_in, matrix_size_out) ){ throw std::runtime_error("fill_border: size mismatch, cannot zero fill");; } unsigned int number_of_batches = 1; for( unsigned int d=D; d<in_out->get_number_of_dimensions(); d++ ){ number_of_batches *= in_out->get_size(d); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim ); // Invoke kernel fill_border_kernel<T,D><<< gridDim, blockDim >>> ( vector_td<unsigned int,D>(matrix_size_in), vector_td<unsigned int,D>(matrix_size_out), in_out->get_data_ptr(), number_of_batches, prod(matrix_size_out), val ); CHECK_FOR_CUDA_ERROR(); } template<class T, unsigned int D> __global__ void fill_border_kernel( typename realType<T>::Type radius, vector_td<int,D> matrix_size, T *image, unsigned int number_of_batches, unsigned int number_of_elements, T val ) { const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; if( idx < number_of_elements ){ const vector_td<typename realType<T>::Type,D> co_out( (matrix_size>>1) - idx_to_co<D>( idx, matrix_size )); if( norm(co_out) > radius ){ for( unsigned int batch=0; batch<number_of_batches; batch++ ){ image[idx+batch*number_of_elements] = val; } } else ; // do nothing } } // Zero fill border (radial) template<class T, unsigned int D> void fill_border( typename realType<T>::Type radius, cuNDArray<T> *in_out, T val ) { typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *in_out->get_dimensions() ); unsigned int number_of_batches = 1; for( unsigned int d=D; d<in_out->get_number_of_dimensions(); d++ ){ number_of_batches *= in_out->get_size(d); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim ); // Invoke kernel fill_border_kernel<T,D><<< gridDim, blockDim >>> (radius, vector_td<int,D>(matrix_size_out), in_out->get_data_ptr(), number_of_batches, prod(matrix_size_out), val ); CHECK_FOR_CUDA_ERROR(); } template<class T, unsigned int D> __global__ void upsample_kernel( typename uintd<D>::Type matrix_size_in, typename uintd<D>::Type matrix_size_out, unsigned int num_batches, const T * __restrict__ image_in, T * __restrict__ image_out ) { typedef typename realType<T>::Type REAL; const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; const unsigned int num_elements_out = prod(matrix_size_out); if( idx < num_elements_out*num_batches ){ const unsigned int batch = idx/num_elements_out; const unsigned int batch_offset_in = batch*prod(matrix_size_in); const typename uintd<D>::Type co_out = idx_to_co<D>( idx-batch*num_elements_out, matrix_size_out ); const typename uintd<D>::Type co_in = co_out >> 1; const typename uintd<D>::Type ones(1); const typename uintd<D>::Type twos(2); const typename uintd<D>::Type offset = co_out%twos; const unsigned int num_cells = 1 << D; T cellsum(0); unsigned int count = 0; for( unsigned int i=0; i<num_cells; i++ ){ const typename uintd<D>::Type stride = idx_to_co<D>( i, twos ); if( offset >= stride ){ cellsum += image_in[batch_offset_in+co_to_idx(amin(co_in+stride, matrix_size_in-ones), matrix_size_in)]; count++; } } image_out[idx] = cellsum / REAL(count); } } // // Linear upsampling by a factor of two (on a D-dimensional grid) // Note that this operator is the transpose of the downsampling operator below by design // - based on Briggs et al, A Multigrid Tutorial 2nd edition, pp. 34-35 // template<class T, unsigned int D> boost::shared_ptr< cuNDArray<T> > upsample( cuNDArray<T>* in ) { if( in == 0x0 ) throw std::runtime_error("upsample: illegal input pointer"); std::vector<size_t> dims_out = *in->get_dimensions(); for( unsigned int i=0; i<D; i++ ) dims_out[i] <<= 1; boost::shared_ptr< cuNDArray<T> > out(new cuNDArray<T>(&dims_out)); upsample<T,D>( in, out.get() ); return out; } template<class T, unsigned int D> void upsample( cuNDArray<T> *in, cuNDArray<T> *out ) { if( in == 0x0 || out == 0x0 ) throw std::runtime_error("upsample: illegal input pointer"); typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in->get_dimensions() ); typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *out->get_dimensions() ); if( (matrix_size_in<<1) != matrix_size_out ){ throw std::runtime_error("upsample: arrays do not correspond to upsampling by a factor of two"); } unsigned int number_of_batches = 1; for( unsigned int d=D; d<out->get_number_of_dimensions(); d++ ){ number_of_batches *= out->get_size(d); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim, number_of_batches ); // Invoke kernel upsample_kernel<T,D><<< gridDim, blockDim >>> ( vector_td<unsigned int,D>(matrix_size_in), vector_td<unsigned int,D>(matrix_size_out), number_of_batches, in->get_data_ptr(), out->get_data_ptr() ); CHECK_FOR_CUDA_ERROR(); } // // Linear downsampling by a factor of two (on a D-dimensional grid) // Note that this operator is the transpose of the upsampling operator above by design // - based on Briggs et al, A Multigrid Tutorial 2nd edition, pp. 36. // template<class T, unsigned int D> __global__ void downsample_kernel( typename intd<D>::Type matrix_size_in, typename intd<D>::Type matrix_size_out, int num_batches, const T * __restrict__ image_in, T * __restrict__ image_out ) { typedef typename realType<T>::Type REAL; const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x; const int num_elements_out = prod(matrix_size_out); if( idx < num_elements_out*num_batches ){ const int batch = idx/num_elements_out; const int batch_offset_in = batch*prod(matrix_size_in); const typename intd<D>::Type co_out = idx_to_co<D>( idx-batch*num_elements_out, matrix_size_out ); const typename intd<D>::Type co_in = co_out << 1; T cellsum[D+1]; for( unsigned int d=0; d<D+1; d++ ){ cellsum[d] = T(0); } //const int num_cells = pow(3,D); // no pow for integers on device int num_cells = 1; for( int i=0; i<D; i++ ) num_cells *=3; const REAL denominator = pow(REAL(4),REAL(D)); for( int i=0; i<num_cells; i++ ){ const typename intd<D>::Type zeros(0); const typename intd<D>::Type ones(1); const typename intd<D>::Type threes(3); const typename intd<D>::Type stride = idx_to_co<D>(i,threes)-ones; // in the range [-1;1]^D int distance = 0; for( int d=0; d<D; d++ ){ if( abs(stride[d])>0 ) distance++; } cellsum[distance] += image_in[batch_offset_in+co_to_idx(amax(zeros, amin(matrix_size_in-ones,co_in+stride)), matrix_size_in)]; } T res = T(0); for( unsigned int d=0; d<D+1; d++ ){ res += (REAL(1<<(D-d))*cellsum[d]); } image_out[idx] = res / denominator; } } template<class T, unsigned int D> boost::shared_ptr< cuNDArray<T> > downsample( cuNDArray<T>* in ) { if( in == 0x0 ) throw std::runtime_error("downsample: illegal input pointer"); std::vector<size_t> dims_out = *in->get_dimensions(); for( unsigned int i=0; i<D; i++ ) dims_out[i] >>= 1; boost::shared_ptr< cuNDArray<T> > out(new cuNDArray<T>(&dims_out)); downsample<T,D>( in, out.get() ); return out; } template<class T, unsigned int D> void downsample( cuNDArray<T> *in, cuNDArray<T> *out ) { if( in == 0x0 || out == 0x0 ) throw std::runtime_error("downsample: illegal input pointer"); typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in->get_dimensions() ); typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *out->get_dimensions() ); if( (matrix_size_in>>1) != matrix_size_out ){ throw std::runtime_error("downsample: arrays do not correspond to downsampling by a factor of two"); } unsigned int number_of_batches = 1; for( unsigned int d=D; d<out->get_number_of_dimensions(); d++ ){ number_of_batches *= out->get_size(d); } // Setup block/grid dimensions dim3 blockDim; dim3 gridDim; setup_grid( prod(matrix_size_out), &blockDim, &gridDim, number_of_batches ); // Invoke kernel downsample_kernel<T,D><<< gridDim, blockDim >>> ( vector_td<int,D>(matrix_size_in), vector_td<int,D>(matrix_size_out), (int)number_of_batches, in->get_data_ptr(), out->get_data_ptr() ); CHECK_FOR_CUDA_ERROR(); } // // Instantiation // template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > permute( cuNDArray<float>*, std::vector<size_t>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > permute( cuNDArray<double>*, std::vector<size_t>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > permute( cuNDArray<float_complext>*, std::vector<size_t>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > permute( cuNDArray<double_complext>*, std::vector<size_t>*, int ); template EXPORTGPUCORE void permute( cuNDArray<float>*, cuNDArray<float>*, std::vector<size_t>*, int); template EXPORTGPUCORE void permute( cuNDArray<double>*, cuNDArray<double>*, std::vector<size_t>*, int); template EXPORTGPUCORE void permute( cuNDArray<float_complext>*, cuNDArray<float_complext>*, std::vector<size_t>*, int); template EXPORTGPUCORE void permute( cuNDArray<double_complext>*, cuNDArray<double_complext>*, std::vector<size_t>*, int); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > shift_dim( cuNDArray<float>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > shift_dim( cuNDArray<double>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > shift_dim( cuNDArray<float_complext>*, int ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > shift_dim( cuNDArray<double_complext>*, int ); template EXPORTGPUCORE void shift_dim( cuNDArray<float>*, cuNDArray<float>*, int shift ); template EXPORTGPUCORE void shift_dim( cuNDArray<double>*, cuNDArray<double>*, int shift ); template EXPORTGPUCORE void shift_dim( cuNDArray<float_complext>*, cuNDArray<float_complext>*, int shift ); template EXPORTGPUCORE void shift_dim( cuNDArray<double_complext>*, cuNDArray<double_complext>*, int shift ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > expand<float>( cuNDArray<float>*, size_t); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > expand<double>( cuNDArray<double>*, size_t); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > expand<float_complext>( cuNDArray<float_complext>*, size_t); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > expand<double_complext>( cuNDArray<double_complext>*, size_t); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > crop<float,1>( typename uint64d<1>::Type, typename uint64d<1>::Type, cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > crop<float,2>( typename uint64d<2>::Type, typename uint64d<2>::Type, cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > crop<float,3>( typename uint64d<3>::Type, typename uint64d<3>::Type, cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > crop<float,4>( typename uint64d<4>::Type, typename uint64d<4>::Type, cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > crop<float_complext,1>( typename uint64d<1>::Type, typename uint64d<1>::Type, cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > crop<float_complext,2>( typename uint64d<2>::Type, typename uint64d<2>::Type, cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > crop<float_complext,3>( typename uint64d<3>::Type, typename uint64d<3>::Type, cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > crop<float_complext,4>( typename uint64d<4>::Type, typename uint64d<4>::Type, cuNDArray<float_complext>*); template EXPORTGPUCORE void crop<float,1>( uint64d1, cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void crop<float,2>( uint64d2, cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void crop<float,3>( uint64d3, cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void crop<float,4>( uint64d4, cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void crop<complext<float>,1>( uint64d1, cuNDArray<complext<float> >*, cuNDArray< complext<float> >*); template EXPORTGPUCORE void crop<complext<float>,2>( uint64d2, cuNDArray<complext<float> >*, cuNDArray< complext<float> >*); template EXPORTGPUCORE void crop<complext<float>,3>( uint64d3, cuNDArray<complext<float> >*, cuNDArray< complext<float> >*); template EXPORTGPUCORE void crop<complext<float>,4>( uint64d4, cuNDArray<complext<float> >*, cuNDArray< complext<float> >*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > pad<float,1>( typename uint64d<1>::Type, cuNDArray<float>*, float ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > pad<float,2>( typename uint64d<2>::Type, cuNDArray<float>*, float ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > pad<float,3>( typename uint64d<3>::Type, cuNDArray<float>*, float ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > pad<float,4>( typename uint64d<4>::Type, cuNDArray<float>*, float ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > pad<float_complext,1>( typename uint64d<1>::Type, cuNDArray<float_complext>*, float_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > pad<float_complext,2>( typename uint64d<2>::Type, cuNDArray<float_complext>*, float_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > pad<float_complext,3>( typename uint64d<3>::Type, cuNDArray<float_complext>*, float_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > pad<float_complext,4>( typename uint64d<4>::Type, cuNDArray<float_complext>*, float_complext ); template EXPORTGPUCORE void pad<float,1>( cuNDArray<float>*, cuNDArray<float>*, float); template EXPORTGPUCORE void pad<float,2>( cuNDArray<float>*, cuNDArray<float>*, float); template EXPORTGPUCORE void pad<float,3>( cuNDArray<float>*, cuNDArray<float>*, float); template EXPORTGPUCORE void pad<float,4>( cuNDArray<float>*, cuNDArray<float>*, float); template EXPORTGPUCORE void pad<float_complext,1>( cuNDArray<float_complext>*, cuNDArray<float_complext>*, float_complext); template EXPORTGPUCORE void pad<float_complext,2>( cuNDArray<float_complext>*, cuNDArray<float_complext>*, float_complext); template EXPORTGPUCORE void pad<float_complext,3>( cuNDArray<float_complext>*, cuNDArray<float_complext>*, float_complext); template EXPORTGPUCORE void pad<float_complext,4>( cuNDArray<float_complext>*, cuNDArray<float_complext>*, float_complext); template EXPORTGPUCORE void fill_border<float,1>(uint64d1, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,2>(uint64d2, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,3>(uint64d3, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,4>(uint64d4, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,1>(float, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,2>(float, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,3>(float, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float,4>(float, cuNDArray<float>*,float); template EXPORTGPUCORE void fill_border<float_complext,1>(uint64d1, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,2>(uint64d2, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,3>(uint64d3, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,4>(uint64d4, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,1>(float, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,2>(float, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,3>(float, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE void fill_border<float_complext,4>(float, cuNDArray<float_complext>*,float_complext); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > crop<double,1>( typename uint64d<1>::Type, typename uint64d<1>::Type, cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > crop<double,2>( typename uint64d<2>::Type, typename uint64d<2>::Type, cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > crop<double,3>( typename uint64d<3>::Type, typename uint64d<3>::Type, cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > crop<double,4>( typename uint64d<4>::Type, typename uint64d<4>::Type, cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > crop<double_complext,1>( typename uint64d<1>::Type, typename uint64d<1>::Type, cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > crop<double_complext,2>( typename uint64d<2>::Type, typename uint64d<2>::Type, cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > crop<double_complext,3>( typename uint64d<3>::Type, typename uint64d<3>::Type, cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > crop<double_complext,4>( typename uint64d<4>::Type, typename uint64d<4>::Type, cuNDArray<double_complext>*); template EXPORTGPUCORE void crop<double,1>( uint64d1, cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void crop<double,2>( uint64d2, cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void crop<double,3>( uint64d3, cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void crop<double,4>( uint64d4, cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void crop<complext<double>,1>( uint64d1, cuNDArray<complext<double> >*, cuNDArray< complext<double> >*); template EXPORTGPUCORE void crop<complext<double>,2>( uint64d2, cuNDArray<complext<double> >*, cuNDArray< complext<double> >*); template EXPORTGPUCORE void crop<complext<double>,3>( uint64d3, cuNDArray<complext<double> >*, cuNDArray< complext<double> >*); template EXPORTGPUCORE void crop<complext<double>,4>( uint64d4, cuNDArray<complext<double> >*, cuNDArray< complext<double> >*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > pad<double,1>( typename uint64d<1>::Type, cuNDArray<double>*, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > pad<double,2>( typename uint64d<2>::Type, cuNDArray<double>*, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > pad<double,3>( typename uint64d<3>::Type, cuNDArray<double>*, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > pad<double,4>( typename uint64d<4>::Type, cuNDArray<double>*, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > pad<double_complext,1>( typename uint64d<1>::Type, cuNDArray<double_complext>*, double_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > pad<double_complext,2>( typename uint64d<2>::Type, cuNDArray<double_complext>*, double_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > pad<double_complext,3>( typename uint64d<3>::Type, cuNDArray<double_complext>*, double_complext ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > pad<double_complext,4>( typename uint64d<4>::Type, cuNDArray<double_complext>*, double_complext ); template EXPORTGPUCORE void pad<double,1>( cuNDArray<double>*, cuNDArray<double>*, double); template EXPORTGPUCORE void pad<double,2>( cuNDArray<double>*, cuNDArray<double>*, double); template EXPORTGPUCORE void pad<double,3>( cuNDArray<double>*, cuNDArray<double>*, double); template EXPORTGPUCORE void pad<double,4>( cuNDArray<double>*, cuNDArray<double>*, double); template EXPORTGPUCORE void pad<double_complext,1>( cuNDArray<double_complext>*, cuNDArray<double_complext>*, double_complext); template EXPORTGPUCORE void pad<double_complext,2>( cuNDArray<double_complext>*, cuNDArray<double_complext>*, double_complext); template EXPORTGPUCORE void pad<double_complext,3>( cuNDArray<double_complext>*, cuNDArray<double_complext>*, double_complext); template EXPORTGPUCORE void pad<double_complext,4>( cuNDArray<double_complext>*, cuNDArray<double_complext>*, double_complext); template EXPORTGPUCORE void fill_border<double,1>(uint64d1, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,2>(uint64d2, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,3>(uint64d3, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,4>(uint64d4, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,1>(double, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,2>(double, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,3>(double, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double,4>(double, cuNDArray<double>*,double); template EXPORTGPUCORE void fill_border<double_complext,1>(uint64d1, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,2>(uint64d2, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,3>(uint64d3, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,4>(uint64d4, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,1>(double, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,2>(double, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,3>(double, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE void fill_border<double_complext,4>(double, cuNDArray<double_complext>*,double_complext); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > upsample<float,1>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > upsample<float,2>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > upsample<float,3>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > upsample<float,4>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > upsample<float_complext,1>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > upsample<float_complext,2>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > upsample<float_complext,3>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > upsample<float_complext,4>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > upsample<double,1>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > upsample<double,2>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > upsample<double,3>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > upsample<double,4>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > upsample<double_complext,1>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > upsample<double_complext,2>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > upsample<double_complext,3>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > upsample<double_complext,4>(cuNDArray<double_complext>*); template EXPORTGPUCORE void upsample<float,1>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void upsample<float,2>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void upsample<float,3>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void upsample<float,4>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void upsample<float_complext,1>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void upsample<float_complext,2>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void upsample<float_complext,3>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void upsample<float_complext,4>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void upsample<double,1>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void upsample<double,2>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void upsample<double,3>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void upsample<double,4>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void upsample<double_complext,1>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void upsample<double_complext,2>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void upsample<double_complext,3>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void upsample<double_complext,4>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > downsample<float,1>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > downsample<float,2>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > downsample<float,3>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > downsample<float,4>(cuNDArray<float>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > downsample<float_complext,1>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > downsample<float_complext,2>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > downsample<float_complext,3>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > downsample<float_complext,4>(cuNDArray<float_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > downsample<double,1>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > downsample<double,2>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > downsample<double,3>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > downsample<double,4>(cuNDArray<double>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > downsample<double_complext,1>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > downsample<double_complext,2>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > downsample<double_complext,3>(cuNDArray<double_complext>*); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > downsample<double_complext,4>(cuNDArray<double_complext>*); template EXPORTGPUCORE void downsample<float,1>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void downsample<float,2>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void downsample<float,3>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void downsample<float,4>(cuNDArray<float>*, cuNDArray<float>*); template EXPORTGPUCORE void downsample<float_complext,1>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void downsample<float_complext,2>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void downsample<float_complext,3>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void downsample<float_complext,4>(cuNDArray<float_complext>*, cuNDArray<float_complext>*); template EXPORTGPUCORE void downsample<double,1>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void downsample<double,2>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void downsample<double,3>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void downsample<double,4>(cuNDArray<double>*, cuNDArray<double>*); template EXPORTGPUCORE void downsample<double_complext,1>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void downsample<double_complext,2>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void downsample<double_complext,3>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); template EXPORTGPUCORE void downsample<double_complext,4>(cuNDArray<double_complext>*, cuNDArray<double_complext>*); // We can probably instantiate the functions below functionsfor many more types? E.g. arrays of floatd2. // For now we just introduce what we have needed... // template EXPORTGPUCORE boost::shared_ptr< cuNDArray<floatd2> > expand<floatd2>( cuNDArray<floatd2>*, size_t); }
e5ffad6347217c7f718ccfde9e64c2bc9462f5ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //naive matrix multiply //Global memory only //use 1-D A, B, C on both GPU and CPU #include <stdlib.h> #include <unistd.h> #include <stdio.h> #include <string.h> #include <omp.h> #include <iostream> #include <complex.h> #include <math.h> #define N 10//max 2500 (for time)// try N=10, 100, 1000, 2000 using namespace std; void warmUpGPU(); void compareMatrices(double * C_GPU, double * C_CPU); __global__ void matrixmulti(double *A, double *B, double *C); int main(int argc, char *argv[]) { warmUpGPU(); //change OpenMP settings: omp_set_num_threads(1); int i,j; //seed random number generator srand(time(NULL)); double * A; double * B; double * C; double * C_CPU; A=(double *)malloc(sizeof(double)*N*N); B=(double *)malloc(sizeof(double)*N*N); C=(double *)calloc(N*N,sizeof(double)); C_CPU=(double *)calloc(N*N,sizeof(double)); //init matrices for (i=0; i<N*N; i++){ A[i]=i; B[i]=i; } printf("\nMemory requested for 3x NxN matrices (GiB) %f", (3.0*N*N*sizeof(double)/(1024.0*1024.0*1024.0))); /////////////////////////// //CPU version: /////////////////////////// double tstartcpu=omp_get_wtime(); int ROW=0; int COL=0; for (ROW=0; ROW<N; ROW++) for (COL=0; COL<N; COL++) for (int k=0; k<N; k++) { C_CPU[(ROW*N)+COL]+=A[ROW*N+k]*B[COL+(k*N)]; } double tendcpu=omp_get_wtime(); printf("\nTime CPU: %f",tendcpu - tstartcpu); //print matrix if N is less than 10x10 int cnt=0; if (N<=10) { printf("\n CPU matrix is: \n"); for (i=0; i<N; i++){ for (j=0; j<N; j++){ printf("%.2f, ",C_CPU[cnt]); cnt++; } printf("\n"); } } ///////////////////////////// //GPU //////////////////////////// double tstart=omp_get_wtime(); hipError_t errCode=hipSuccess; if(errCode != hipSuccess) { cout << "\nLast error: " << errCode << endl; } double * dev_A; double * dev_B; double * dev_C; unsigned int * debug; debug=(unsigned int *)malloc(sizeof(unsigned int)); *debug=0; //allocate on the device: A, B, C errCode=hipMalloc((double**)&dev_A, sizeof(double)*N*N); if(errCode != hipSuccess) { cout << "\nError: A error with code " << errCode << endl; } errCode=hipMalloc((double**)&dev_B, sizeof(double)*N*N); if(errCode != hipSuccess) { cout << "\nError: B error with code " << errCode << endl; } errCode=hipMalloc((double**)&dev_C, sizeof(double)*N*N); if(errCode != hipSuccess) { cout << "\nError: C error with code " << errCode << endl; } //copy A to device errCode=hipMemcpy( dev_A, A, sizeof(double)*N*N, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: A memcpy error with code " << errCode << endl; } //copy B to device errCode=hipMemcpy( dev_B, B, sizeof(double)*N*N, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: B memcpy error with code " << errCode << endl; } //copy C to device (initialized to 0) errCode=hipMemcpy( dev_C, C, sizeof(double)*N*N, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: C memcpy error with code " << errCode << endl; } //setup blocks const unsigned int totalBlocks=ceil(N*N*1.0/1024.0); printf("\ntotal blocks: %d",totalBlocks); //execute kernel hipLaunchKernelGGL(( matrixmulti), dim3(totalBlocks),dim3(1024), 0, 0, dev_A, dev_B, dev_C); if(errCode != hipSuccess){ cout<<"Error afrer kernel launch "<<errCode<<endl; } // copy C from the GPU errCode=hipMemcpy( C, dev_C, sizeof(double)*N*N, hipMemcpyDeviceToHost); if(errCode != hipSuccess) { cout << "\nError: getting result form GPU error with code " << errCode << endl; } double tend=omp_get_wtime(); //print matrix if N is less than 10 // int cnt=0; cnt=0; if (N<=10) { printf("\nGPU Matrix: \n"); for (i=0; i<N; i++){ for (j=0; j<N; j++) { printf("%.2f, ",C[cnt]); cnt++; } printf("\n"); } } printf("\nTotal time GPU (s): %f",tend-tstart); compareMatrices(C, C_CPU); printf("\n"); return 0; } void compareMatrices(double * C_GPU, double * C_CPU) { double delta=0; for (int i=0; i<N*N; i++) { delta+=fabs(C_CPU[i]-C_GPU[i]); } printf("\nDelta between matrices: %f",delta); } //matrix multiply //each thread computes a single element of C using a row of A and column of B __global__ void matrixmulti(double *A, double *B, double *C) { unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x); int ROW = tid/N; //row int COL = tid%N; //col if ((ROW < N) && (COL < N)){ double tmp_sum = 0; for (unsigned int k = 0; k < N; k++) { double a = A[ROW * N + k]; double b = B[k * N + COL]; tmp_sum += a * b; } C[ROW * N + COL] = tmp_sum; } return; } __global__ void warmup(unsigned int * tmp) { if (threadIdx.x==0) *tmp=555; return; } void warmUpGPU(){ printf("\nWarming up GPU for time trialing...\n"); unsigned int * dev_tmp; unsigned int * tmp; tmp=(unsigned int*)malloc(sizeof(unsigned int)); *tmp=0; hipError_t errCode=hipSuccess; errCode=hipMalloc((unsigned int**)&dev_tmp, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: dev_tmp error with code " << errCode << endl; } hipLaunchKernelGGL(( warmup), dim3(1),dim3(256), 0, 0, dev_tmp); //copy data from device to host errCode=hipMemcpy( tmp, dev_tmp, sizeof(unsigned int), hipMemcpyDeviceToHost); if(errCode != hipSuccess) { cout << "\nError: getting tmp result form GPU error with code " << errCode << endl; } printf("\ntmp (changed to 555 on GPU): %d",*tmp); hipFree(dev_tmp); return; }
e5ffad6347217c7f718ccfde9e64c2bc9462f5ab.cu
//naive matrix multiply //Global memory only //use 1-D A, B, C on both GPU and CPU #include <stdlib.h> #include <unistd.h> #include <stdio.h> #include <string.h> #include <omp.h> #include <iostream> #include <complex.h> #include <math.h> #define N 10//max 2500 (for time)// try N=10, 100, 1000, 2000 using namespace std; void warmUpGPU(); void compareMatrices(double * C_GPU, double * C_CPU); __global__ void matrixmulti(double *A, double *B, double *C); int main(int argc, char *argv[]) { warmUpGPU(); //change OpenMP settings: omp_set_num_threads(1); int i,j; //seed random number generator srand(time(NULL)); double * A; double * B; double * C; double * C_CPU; A=(double *)malloc(sizeof(double)*N*N); B=(double *)malloc(sizeof(double)*N*N); C=(double *)calloc(N*N,sizeof(double)); C_CPU=(double *)calloc(N*N,sizeof(double)); //init matrices for (i=0; i<N*N; i++){ A[i]=i; B[i]=i; } printf("\nMemory requested for 3x NxN matrices (GiB) %f", (3.0*N*N*sizeof(double)/(1024.0*1024.0*1024.0))); /////////////////////////// //CPU version: /////////////////////////// double tstartcpu=omp_get_wtime(); int ROW=0; int COL=0; for (ROW=0; ROW<N; ROW++) for (COL=0; COL<N; COL++) for (int k=0; k<N; k++) { C_CPU[(ROW*N)+COL]+=A[ROW*N+k]*B[COL+(k*N)]; } double tendcpu=omp_get_wtime(); printf("\nTime CPU: %f",tendcpu - tstartcpu); //print matrix if N is less than 10x10 int cnt=0; if (N<=10) { printf("\n CPU matrix is: \n"); for (i=0; i<N; i++){ for (j=0; j<N; j++){ printf("%.2f, ",C_CPU[cnt]); cnt++; } printf("\n"); } } ///////////////////////////// //GPU //////////////////////////// double tstart=omp_get_wtime(); cudaError_t errCode=cudaSuccess; if(errCode != cudaSuccess) { cout << "\nLast error: " << errCode << endl; } double * dev_A; double * dev_B; double * dev_C; unsigned int * debug; debug=(unsigned int *)malloc(sizeof(unsigned int)); *debug=0; //allocate on the device: A, B, C errCode=cudaMalloc((double**)&dev_A, sizeof(double)*N*N); if(errCode != cudaSuccess) { cout << "\nError: A error with code " << errCode << endl; } errCode=cudaMalloc((double**)&dev_B, sizeof(double)*N*N); if(errCode != cudaSuccess) { cout << "\nError: B error with code " << errCode << endl; } errCode=cudaMalloc((double**)&dev_C, sizeof(double)*N*N); if(errCode != cudaSuccess) { cout << "\nError: C error with code " << errCode << endl; } //copy A to device errCode=cudaMemcpy( dev_A, A, sizeof(double)*N*N, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: A memcpy error with code " << errCode << endl; } //copy B to device errCode=cudaMemcpy( dev_B, B, sizeof(double)*N*N, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: B memcpy error with code " << errCode << endl; } //copy C to device (initialized to 0) errCode=cudaMemcpy( dev_C, C, sizeof(double)*N*N, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: C memcpy error with code " << errCode << endl; } //setup blocks const unsigned int totalBlocks=ceil(N*N*1.0/1024.0); printf("\ntotal blocks: %d",totalBlocks); //execute kernel matrixmulti<<<totalBlocks,1024>>>(dev_A, dev_B, dev_C); if(errCode != cudaSuccess){ cout<<"Error afrer kernel launch "<<errCode<<endl; } // copy C from the GPU errCode=cudaMemcpy( C, dev_C, sizeof(double)*N*N, cudaMemcpyDeviceToHost); if(errCode != cudaSuccess) { cout << "\nError: getting result form GPU error with code " << errCode << endl; } double tend=omp_get_wtime(); //print matrix if N is less than 10 // int cnt=0; cnt=0; if (N<=10) { printf("\nGPU Matrix: \n"); for (i=0; i<N; i++){ for (j=0; j<N; j++) { printf("%.2f, ",C[cnt]); cnt++; } printf("\n"); } } printf("\nTotal time GPU (s): %f",tend-tstart); compareMatrices(C, C_CPU); printf("\n"); return 0; } void compareMatrices(double * C_GPU, double * C_CPU) { double delta=0; for (int i=0; i<N*N; i++) { delta+=fabs(C_CPU[i]-C_GPU[i]); } printf("\nDelta between matrices: %f",delta); } //matrix multiply //each thread computes a single element of C using a row of A and column of B __global__ void matrixmulti(double *A, double *B, double *C) { unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x); int ROW = tid/N; //row int COL = tid%N; //col if ((ROW < N) && (COL < N)){ double tmp_sum = 0; for (unsigned int k = 0; k < N; k++) { double a = A[ROW * N + k]; double b = B[k * N + COL]; tmp_sum += a * b; } C[ROW * N + COL] = tmp_sum; } return; } __global__ void warmup(unsigned int * tmp) { if (threadIdx.x==0) *tmp=555; return; } void warmUpGPU(){ printf("\nWarming up GPU for time trialing...\n"); unsigned int * dev_tmp; unsigned int * tmp; tmp=(unsigned int*)malloc(sizeof(unsigned int)); *tmp=0; cudaError_t errCode=cudaSuccess; errCode=cudaMalloc((unsigned int**)&dev_tmp, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: dev_tmp error with code " << errCode << endl; } warmup<<<1,256>>>(dev_tmp); //copy data from device to host errCode=cudaMemcpy( tmp, dev_tmp, sizeof(unsigned int), cudaMemcpyDeviceToHost); if(errCode != cudaSuccess) { cout << "\nError: getting tmp result form GPU error with code " << errCode << endl; } printf("\ntmp (changed to 555 on GPU): %d",*tmp); cudaFree(dev_tmp); return; }
7d03d8e4e482e9adbb3603e1bbcf86f3e1604f97.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
7d03d8e4e482e9adbb3603e1bbcf86f3e1604f97.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
c0cf782e1a5141a9c89344fe5fb6096e28f2b91e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author AbdelRauf // #include <helpers/ConstantTadHelper.h> #include <helpers/LoopsCoordsHelper.h> #include <helpers/PointersManager.h> #include <ops/declarable/helpers/adjust_hue.h> #include <ops/declarable/helpers/imagesHelpers.h> #include <ops/declarable/helpers/transforms.h> #include <system/op_boilerplate.h> #include "execution/cuda/LaunchDims.h" namespace sd { namespace ops { namespace helpers { template <typename X> SD_HOST_DEVICE static uint8_t pack(const X* buff, const X& threshold) { uint8_t res; res = (buff[0] > threshold) << 7; res = res | ((buff[1] > threshold) << 6); res = res | ((buff[2] > threshold) << 5); res = res | ((buff[3] > threshold) << 4); res = res | ((buff[4] > threshold) << 3); res = res | ((buff[5] > threshold) << 2); res = res | ((buff[6] > threshold) << 1); res = res | (buff[7] > threshold); return res; } template <> SD_HOST_DEVICE uint8_t pack<bool>(const bool* buff, const bool& threshold) { // ignore threshold uint8_t res; res = buff[0] << 7; res = res | (buff[1] << 6); res = res | (buff[2] << 5); res = res | (buff[3] << 4); res = res | (buff[4] << 3); res = res | (buff[5] << 2); res = res | (buff[6] << 1); res = res | buff[7]; return res; } template <typename X> SD_HOST_DEVICE static uint8_t pack(const X* buff, int stride, const X& threshold) { uint8_t res; res = (buff[0] > threshold) << 7; res = res | ((buff[1 * stride] > threshold) << 6); res = res | ((buff[2 * stride] > threshold) << 5); res = res | ((buff[3 * stride] > threshold) << 4); res = res | ((buff[4 * stride] > threshold) << 3); res = res | ((buff[5 * stride] > threshold) << 2); res = res | ((buff[6 * stride] > threshold) << 1); res = res | (buff[7 * stride] > threshold); return res; } template <> SD_HOST_DEVICE uint8_t pack<bool>(const bool* buff, int stride, const bool& threshold) { // ignore threshold uint8_t res; res = buff[0] << 7; res = res | (buff[1 * stride] << 6); res = res | (buff[2 * stride] << 5); res = res | (buff[3 * stride] << 4); res = res | (buff[4 * stride] << 3); res = res | (buff[5 * stride] << 2); res = res | (buff[6 * stride] << 1); res = res | buff[7 * stride]; return res; } /////////////////////////////////////////////////////////////////// template <typename T> static void SD_KERNEL cmpBitpack(const void* vx, void* vz, int rank, int len, const sd::LongType* xStridesExtended, const sd::LongType* outPutShapeInfo, T threshold) { const T* x = reinterpret_cast<const T*>(vx); uint8_t* z = reinterpret_cast<uint8_t*>(vz); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto shapes = shape::shapeOf(outPutShapeInfo); auto zStrides = shape::stride(outPutShapeInfo); sd::LongType coords[SD_MAX_RANK] = {}; sd::LongType* ptr_coords = (sd::LongType*)&coords; // its extended as {rank+1} so xStridesExtended[rank] is valid auto inLastStride = xStridesExtended[rank]; for (auto k = tid; k < len; k += gridDim.x * blockDim.x) { sd::index2coords_C(k, rank, shapes, ptr_coords); auto offset = sd::offset_from_coords(xStridesExtended, zStrides, ptr_coords, rank); auto buffPart = &(x[offset.first]); auto outBuffPart = &(z[offset.second]); *outBuffPart = pack<T>(buffPart, inLastStride, threshold); } } template <typename T> static void SD_KERNEL cmpBitpackEws(const void* vx, void* vz, int len, const sd::LongType xStride, const sd::LongType yStride, T threshold) { const T* x = reinterpret_cast<const T*>(vx); uint8_t* z = reinterpret_cast<uint8_t*>(vz); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; if (xStride == 1) { for (auto k = tid; k < len; k += gridDim.x * blockDim.x) { auto buffPart = &(x[k * 8]); auto outBuffPart = &(z[k * yStride]); *outBuffPart = pack<T>(buffPart, threshold); } } else { for (auto k = tid; k < len; k += gridDim.x * blockDim.x) { auto buffPart = &(x[k * 8 * xStride]); auto outBuffPart = &(z[k * yStride]); *outBuffPart = pack<T>(buffPart, xStride, threshold); } } } /////////////////////////////////////////////////////////////////// template <typename T> static SD_HOST void cmpBitpackCudaLauncher(sd::graph::Context& block, const NDArray& input, const NDArray& thresholdScalar, NDArray& output) { T threshold = thresholdScalar.e<T>(0); auto inStrides = input.stridesOf(); auto rank = output.rankOf(); // threadblock size // grid size auto stream = block.launchContext()->getCudaStream(); dim3 compareAndBitpackDims = getCompareAndBitpackDims(output.lengthOf()); PointersManager manager(block.launchContext(), "compare_and_bitpack"); NDArray::prepareSpecialUse({&output}, {&input}); if (input.ews() > 0 && output.ews() > 0 && input.ordering() == 'c' && output.ordering() == 'c') { hipLaunchKernelGGL(( cmpBitpackEws<T>), dim3(compareAndBitpackDims.y), dim3(compareAndBitpackDims.x),compareAndBitpackDims.z, 0, input.specialBuffer(), output.specialBuffer(), output.lengthOf(), inStrides[rank - 1], output.stridesOf()[rank - 1], threshold); } else { // if output shape is {n1, n2, n3} then input shape is { n1. n2, n3 * 8} // therefore we can split input shape {n1, n2, n3 , 8} and correct its stride // as we do not need last shape info. lets just extend and correct its stride sd::LongType extendedStrides[SD_MAX_RANK]; for (int i = 0; i < rank; i++) { extendedStrides[i] = inStrides[i]; } // lets correct new stride extendedStrides[rank - 1] = 8 * inStrides[rank - 1]; extendedStrides[rank] = inStrides[rank - 1]; auto strideSize = (rank + 1) * sizeof(sd::LongType); sd::LongType* extendedStridesDevPtr = reinterpret_cast<sd::LongType*>(manager.replicatePointer(extendedStrides, strideSize)); hipLaunchKernelGGL(( cmpBitpack<T>), dim3(compareAndBitpackDims.y), dim3(compareAndBitpackDims.x),compareAndBitpackDims.z, 0, input.specialBuffer(), output.specialBuffer(), rank, output.lengthOf(), extendedStridesDevPtr, output.specialShapeInfo(), threshold); } NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } void compareAndBitpack(sd::graph::Context& block, const NDArray& input, const NDArray& threshold, NDArray& output) { BUILD_SINGLE_SELECTOR(input.dataType(), cmpBitpackCudaLauncher, (block, input, threshold, output), SD_COMMON_TYPES); } } // namespace helpers } // namespace ops } // namespace sd
c0cf782e1a5141a9c89344fe5fb6096e28f2b91e.cu
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author AbdelRauf // #include <helpers/ConstantTadHelper.h> #include <helpers/LoopsCoordsHelper.h> #include <helpers/PointersManager.h> #include <ops/declarable/helpers/adjust_hue.h> #include <ops/declarable/helpers/imagesHelpers.h> #include <ops/declarable/helpers/transforms.h> #include <system/op_boilerplate.h> #include "execution/cuda/LaunchDims.h" namespace sd { namespace ops { namespace helpers { template <typename X> SD_HOST_DEVICE static uint8_t pack(const X* buff, const X& threshold) { uint8_t res; res = (buff[0] > threshold) << 7; res = res | ((buff[1] > threshold) << 6); res = res | ((buff[2] > threshold) << 5); res = res | ((buff[3] > threshold) << 4); res = res | ((buff[4] > threshold) << 3); res = res | ((buff[5] > threshold) << 2); res = res | ((buff[6] > threshold) << 1); res = res | (buff[7] > threshold); return res; } template <> SD_HOST_DEVICE uint8_t pack<bool>(const bool* buff, const bool& threshold) { // ignore threshold uint8_t res; res = buff[0] << 7; res = res | (buff[1] << 6); res = res | (buff[2] << 5); res = res | (buff[3] << 4); res = res | (buff[4] << 3); res = res | (buff[5] << 2); res = res | (buff[6] << 1); res = res | buff[7]; return res; } template <typename X> SD_HOST_DEVICE static uint8_t pack(const X* buff, int stride, const X& threshold) { uint8_t res; res = (buff[0] > threshold) << 7; res = res | ((buff[1 * stride] > threshold) << 6); res = res | ((buff[2 * stride] > threshold) << 5); res = res | ((buff[3 * stride] > threshold) << 4); res = res | ((buff[4 * stride] > threshold) << 3); res = res | ((buff[5 * stride] > threshold) << 2); res = res | ((buff[6 * stride] > threshold) << 1); res = res | (buff[7 * stride] > threshold); return res; } template <> SD_HOST_DEVICE uint8_t pack<bool>(const bool* buff, int stride, const bool& threshold) { // ignore threshold uint8_t res; res = buff[0] << 7; res = res | (buff[1 * stride] << 6); res = res | (buff[2 * stride] << 5); res = res | (buff[3 * stride] << 4); res = res | (buff[4 * stride] << 3); res = res | (buff[5 * stride] << 2); res = res | (buff[6 * stride] << 1); res = res | buff[7 * stride]; return res; } /////////////////////////////////////////////////////////////////// template <typename T> static void SD_KERNEL cmpBitpack(const void* vx, void* vz, int rank, int len, const sd::LongType* xStridesExtended, const sd::LongType* outPutShapeInfo, T threshold) { const T* x = reinterpret_cast<const T*>(vx); uint8_t* z = reinterpret_cast<uint8_t*>(vz); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto shapes = shape::shapeOf(outPutShapeInfo); auto zStrides = shape::stride(outPutShapeInfo); sd::LongType coords[SD_MAX_RANK] = {}; sd::LongType* ptr_coords = (sd::LongType*)&coords; // its extended as {rank+1} so xStridesExtended[rank] is valid auto inLastStride = xStridesExtended[rank]; for (auto k = tid; k < len; k += gridDim.x * blockDim.x) { sd::index2coords_C(k, rank, shapes, ptr_coords); auto offset = sd::offset_from_coords(xStridesExtended, zStrides, ptr_coords, rank); auto buffPart = &(x[offset.first]); auto outBuffPart = &(z[offset.second]); *outBuffPart = pack<T>(buffPart, inLastStride, threshold); } } template <typename T> static void SD_KERNEL cmpBitpackEws(const void* vx, void* vz, int len, const sd::LongType xStride, const sd::LongType yStride, T threshold) { const T* x = reinterpret_cast<const T*>(vx); uint8_t* z = reinterpret_cast<uint8_t*>(vz); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; if (xStride == 1) { for (auto k = tid; k < len; k += gridDim.x * blockDim.x) { auto buffPart = &(x[k * 8]); auto outBuffPart = &(z[k * yStride]); *outBuffPart = pack<T>(buffPart, threshold); } } else { for (auto k = tid; k < len; k += gridDim.x * blockDim.x) { auto buffPart = &(x[k * 8 * xStride]); auto outBuffPart = &(z[k * yStride]); *outBuffPart = pack<T>(buffPart, xStride, threshold); } } } /////////////////////////////////////////////////////////////////// template <typename T> static SD_HOST void cmpBitpackCudaLauncher(sd::graph::Context& block, const NDArray& input, const NDArray& thresholdScalar, NDArray& output) { T threshold = thresholdScalar.e<T>(0); auto inStrides = input.stridesOf(); auto rank = output.rankOf(); // threadblock size // grid size auto stream = block.launchContext()->getCudaStream(); dim3 compareAndBitpackDims = getCompareAndBitpackDims(output.lengthOf()); PointersManager manager(block.launchContext(), "compare_and_bitpack"); NDArray::prepareSpecialUse({&output}, {&input}); if (input.ews() > 0 && output.ews() > 0 && input.ordering() == 'c' && output.ordering() == 'c') { cmpBitpackEws<T><<<compareAndBitpackDims.y, compareAndBitpackDims.x,compareAndBitpackDims.z>>>(input.specialBuffer(), output.specialBuffer(), output.lengthOf(), inStrides[rank - 1], output.stridesOf()[rank - 1], threshold); } else { // if output shape is {n1, n2, n3} then input shape is { n1. n2, n3 * 8} // therefore we can split input shape {n1, n2, n3 , 8} and correct its stride // as we do not need last shape info. lets just extend and correct its stride sd::LongType extendedStrides[SD_MAX_RANK]; for (int i = 0; i < rank; i++) { extendedStrides[i] = inStrides[i]; } // lets correct new stride extendedStrides[rank - 1] = 8 * inStrides[rank - 1]; extendedStrides[rank] = inStrides[rank - 1]; auto strideSize = (rank + 1) * sizeof(sd::LongType); sd::LongType* extendedStridesDevPtr = reinterpret_cast<sd::LongType*>(manager.replicatePointer(extendedStrides, strideSize)); cmpBitpack<T><<<compareAndBitpackDims.y, compareAndBitpackDims.x,compareAndBitpackDims.z>>>(input.specialBuffer(), output.specialBuffer(), rank, output.lengthOf(), extendedStridesDevPtr, output.specialShapeInfo(), threshold); } NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } void compareAndBitpack(sd::graph::Context& block, const NDArray& input, const NDArray& threshold, NDArray& output) { BUILD_SINGLE_SELECTOR(input.dataType(), cmpBitpackCudaLauncher, (block, input, threshold, output), SD_COMMON_TYPES); } } // namespace helpers } // namespace ops } // namespace sd
827ab389132e937206a3f66dbab6ae53b8053359.hip
// !!! This is a file automatically generated by hipify!!! #include <hipfft.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "debug.h" #include "timer.h" #include "utils_cuda.h" #include "utils_file.h" #include "params.h" //#define REORDER #define WARP 32 int device=0; __device__ __inline__ float2 Get_W_value(int N, int m){ float2 ctemp; ctemp.x=-cosf( 6.283185f*fdividef( (float) m, (float) N ) - 3.141592654f ); ctemp.y=sinf( 6.283185f*fdividef( (float) m, (float) N ) - 3.141592654f ); return(ctemp); } __device__ __inline__ float2 Get_W_value_float(float N, float m){ float2 ctemp; ctemp.x=-cosf( 6.283185f*fdividef( m, N) - 3.141592654f ); ctemp.y=sinf( 6.283185f*fdividef( m, N) - 3.141592654f ); return(ctemp); } __device__ void do_FFT(float2 *s_input){ // in-place float2 A_DFT_value, B_DFT_value, ftemp2, ftemp; float2 WB; int r, j, k, PoTm1, A_read_index, B_read_index, A_write_index, B_write_index, Nhalf; int An, A_load_id; Nhalf=FFT_LENGTH>>1; //----------------------------------------------- //----- First N-1 iteration PoTm1=1; A_read_index=threadIdx.x; B_read_index=threadIdx.x + Nhalf; A_write_index=2*threadIdx.x; B_write_index=2*threadIdx.x+1; A_load_id = 2*threadIdx.x; An=2*threadIdx.x; for(r=1;r<FFT_EXP;r++){ An >>= 1; A_load_id <<= 1; A_load_id |= An & 1; j=(threadIdx.x)>>(r-1); k=PoTm1*j; ftemp = s_input[A_read_index]; ftemp2 = s_input[B_read_index]; A_DFT_value.x=ftemp.x + ftemp2.x; A_DFT_value.y=ftemp.y + ftemp2.y; WB = Get_W_value(FFT_LENGTH,k); B_DFT_value.x=WB.x*(ftemp.x - ftemp2.x) - WB.y*(ftemp.y - ftemp2.y); B_DFT_value.y=WB.x*(ftemp.y - ftemp2.y) + WB.y*(ftemp.x - ftemp2.x); PoTm1=PoTm1<<1; __syncthreads(); s_input[A_write_index]=A_DFT_value; s_input[B_write_index]=B_DFT_value; __syncthreads(); } A_load_id &= FFT_LENGTH-1; //----- First N-1 iteration //----------------------------------------------- //----------------------------------------------- //----- Last exchange ftemp = s_input[A_read_index]; ftemp2 = s_input[B_read_index]; A_DFT_value.x = ftemp.x + ftemp2.x; A_DFT_value.y = ftemp.y + ftemp2.y; B_DFT_value.x = ftemp.x - ftemp2.x; B_DFT_value.y = ftemp.y - ftemp2.y; __syncthreads(); s_input[A_write_index]=A_DFT_value; s_input[B_write_index]=B_DFT_value; __syncthreads(); //----- Last exchange //----------------------------------------------- #ifdef REORDER //----------------------------------------------- //----- De-shuffle ftemp=s_input[A_load_id]; ftemp2=s_input[A_load_id+Nhalf]; __syncthreads(); s_input[2*threadIdx.x]=ftemp; s_input[2*threadIdx.x+1]=ftemp2; //----- De-shuffle //----------------------------------------------- #endif //-------> END } __global__ void FFT_GPU_external(float2 *d_input, float2* d_output) { extern __shared__ float2 s_input[]; s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH]; s_input[threadIdx.x + FFT_LENGTH/2]=d_input[threadIdx.x + FFT_LENGTH/2 + blockIdx.x*FFT_LENGTH]; __syncthreads(); do_FFT(s_input); __syncthreads(); d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x]; d_output[threadIdx.x + FFT_LENGTH/2 + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x + FFT_LENGTH/2]; } __global__ void FFT_GPU_multiple(float2 *d_input, float2* d_output) { extern __shared__ float2 s_input[]; s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH]; s_input[threadIdx.x + FFT_LENGTH/2]=d_input[threadIdx.x + FFT_LENGTH/2 + blockIdx.x*FFT_LENGTH]; __syncthreads(); for(int f=0;f<100;f++){ do_FFT(s_input); } __syncthreads(); d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x]; d_output[threadIdx.x + FFT_LENGTH/2 + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x + FFT_LENGTH/2]; } //***************************************************************************** //***************************************************************************** //***************************************************************************** int Max_columns_in_memory_shared(int nSamples, int nSpectra) { long int nColumns,maxgrid_x; size_t free_mem,total_mem; hipDeviceProp_t devProp; checkCudaErrors(hipSetDevice(device)); checkCudaErrors(hipGetDeviceProperties(&devProp,device)); maxgrid_x = devProp.maxGridSize[0]; hipMemGetInfo(&free_mem,&total_mem); nColumns=((long int) free_mem)/(2.0*sizeof(float2)*nSamples); if(nColumns>maxgrid_x) nColumns=maxgrid_x; nColumns=(int) nColumns*0.9; return(nColumns); } void FFT_init(){ //---------> Specific nVidia stuff hipDeviceSetCacheConfig(hipFuncCachePreferEqual); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); } void FFT_external_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){ GpuTimer timer; //---------> CUDA block and CUDA grid parameters int nCUDAblocks_x=nSpectra; int nCUDAblocks_y=1; //Head size dim3 gridSize(nCUDAblocks_x, nCUDAblocks_y, 1); dim3 blockSize(nSamples/2, 1, 1); //---------> FIR filter part timer.Start(); hipLaunchKernelGGL(( FFT_GPU_external), dim3(gridSize), dim3(blockSize),nSamples*8, 0, d_input, d_output); timer.Stop(); *FFT_time += timer.Elapsed(); } void FFT_multiple_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){ GpuTimer timer; //---------> CUDA block and CUDA grid parameters dim3 gridSize_multiple(1000, 1, 1); dim3 blockSize(nSamples/2, 1, 1); //---------> FIR filter part timer.Start(); hipLaunchKernelGGL(( FFT_GPU_multiple), dim3(gridSize_multiple), dim3(blockSize),nSamples*8, 0, d_input, d_output); timer.Stop(); *FFT_time += timer.Elapsed(); } //***************************************************************************** //***************************************************************************** //***************************************************************************** int GPU_FFT(float2 *h_input, float2 *h_output, int nSamples, int nSpectra, int inverse){ //---------> Initial nVidia stuff int devCount; size_t free_mem,total_mem; checkCudaErrors(hipGetDeviceCount(&devCount)); checkCudaErrors(hipSetDevice(device)); hipMemGetInfo(&free_mem,&total_mem); if(DEBUG) printf("\nDevice has %ld MB of total memory, which %ld MB is available.\n", (long int) total_mem/(1000*1000), (long int) free_mem/(1000*1000)); //---------> Checking memory int nElements=nSamples*nSpectra; int input_size=nElements; int output_size=nElements; float free_memory = (float) free_mem/(1024.0*1024.0); float memory_required=((2*input_size + 2*output_size)*sizeof(float))/(1024.0*1024.0); if(DEBUG) printf("DEBUG: Device has %0.3f MB of total memory, which %0.3f MB is available. Memory required %0.3f MB\n", (float) total_mem/(1024.0*1024.0), free_memory ,memory_required); if(memory_required>free_memory) {printf("\n \n Array is too big for the device! \n \n"); return(-3);} //---------> Measurements double transfer_in, transfer_out, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time,cuFFT_time,FFT_multiple_reuse_registers_time; GpuTimer timer; // if set before set device getting errors - invalid handle //------------------------------------------------------------------------------ //---------> Shared memory kernel transfer_in=0.0; transfer_out=0.0; FFT_time=0.0; FFT_external_time=0.0; FFT_multiple_time=0.0; FFT_multiple_reuse_time=0.0; cuFFT_time=0.0; FFT_multiple_reuse_registers_time=0.0; //---------> Memory allocation if (DEBUG) printf("Device memory allocation...: \t\t"); float2 *d_output; float2 *d_input; timer.Start(); checkCudaErrors(hipMalloc((void **) &d_input, sizeof(float2)*input_size)); checkCudaErrors(hipMalloc((void **) &d_output, sizeof(float2)*output_size)); timer.Stop(); if (DEBUG) printf("done in %g ms.\n", timer.Elapsed()); //---------> FFT calculation if (DEBUG) printf("Transferring data to device...: \t"); timer.Start(); checkCudaErrors(hipMemcpy(d_input, h_input, input_size*sizeof(float2), hipMemcpyHostToDevice)); timer.Stop(); transfer_in+=timer.Elapsed(); if (DEBUG) printf("done in %g ms.\n", timer.Elapsed()); //-----> Compute FFT on the chunk if(CUFFT){ //---------> FFT hipfftHandle plan; hipfftResult error; error = hipfftPlan1d(&plan, nSamples, HIPFFT_C2C, nSpectra); if (HIPFFT_SUCCESS != error){ printf("CUFFT error: %d", error); } timer.Start(); hipfftExecC2C(plan, (hipfftComplex *)d_input, (hipfftComplex *)d_output, HIPFFT_FORWARD); timer.Stop(); cuFFT_time += timer.Elapsed(); hipfftDestroy(plan); } if(MULTIPLE){ if (DEBUG) printf("Multiple FFT...: \t\t\t"); FFT_init(); FFT_multiple_benchmark(d_input, d_output, nSamples, nSpectra, &FFT_multiple_time); if (DEBUG) printf("done in %g ms.\n", FFT_multiple_time); } if(EXTERNAL){ if (DEBUG) printf("FFT...: \t\t\t\t"); FFT_init(); FFT_external_benchmark(d_input, d_output, nSamples, nSpectra, &FFT_external_time); if (DEBUG) printf("done in %g ms.\n", FFT_external_time); } //-----> Copy chunk of output data to host if (DEBUG) printf("Transferring data to host...: \t\t"); timer.Start(); checkCudaErrors(hipMemcpy( h_output, d_output, output_size*sizeof(float2), hipMemcpyDeviceToHost)); timer.Stop(); transfer_out+=timer.Elapsed(); if (DEBUG) printf("done in %g ms.\n", timer.Elapsed()); //---------> error check ----- checkCudaErrors(hipGetLastError()); //---------> Feeing allocated resources checkCudaErrors(hipFree(d_input)); checkCudaErrors(hipFree(d_output)); if (DEBUG || WRITE) printf("nSpectra:%d; nSamples:%d cuFFT:%0.3f ms; FFT:%0.3f ms; FFT external:%0.3f ms; FFT multiple:%0.3f ms; FFT multiple reuse:%0.3f ms; FFT_multiple_reuse_registers_time:%0.3fms; HostToDevice:%0.3f ms; DeviceToHost:%0.3f ms\n",nSpectra,nSamples,cuFFT_time, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time, FFT_multiple_reuse_registers_time, transfer_in, transfer_out); if (WRITE){ char str[200]; sprintf(str,"GPU-FFT-Pease.dat"); if (DEBUG) printf("\n Write results into file...\t"); save_time(str, nSpectra,nSamples, cuFFT_time, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time, FFT_multiple_reuse_registers_time, transfer_in, transfer_out); if (DEBUG) printf("\t done.\n-------------------------------------\n"); } return(1); }
827ab389132e937206a3f66dbab6ae53b8053359.cu
#include <cufft.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "debug.h" #include "timer.h" #include "utils_cuda.h" #include "utils_file.h" #include "params.h" //#define REORDER #define WARP 32 int device=0; __device__ __inline__ float2 Get_W_value(int N, int m){ float2 ctemp; ctemp.x=-cosf( 6.283185f*fdividef( (float) m, (float) N ) - 3.141592654f ); ctemp.y=sinf( 6.283185f*fdividef( (float) m, (float) N ) - 3.141592654f ); return(ctemp); } __device__ __inline__ float2 Get_W_value_float(float N, float m){ float2 ctemp; ctemp.x=-cosf( 6.283185f*fdividef( m, N) - 3.141592654f ); ctemp.y=sinf( 6.283185f*fdividef( m, N) - 3.141592654f ); return(ctemp); } __device__ void do_FFT(float2 *s_input){ // in-place float2 A_DFT_value, B_DFT_value, ftemp2, ftemp; float2 WB; int r, j, k, PoTm1, A_read_index, B_read_index, A_write_index, B_write_index, Nhalf; int An, A_load_id; Nhalf=FFT_LENGTH>>1; //----------------------------------------------- //----- First N-1 iteration PoTm1=1; A_read_index=threadIdx.x; B_read_index=threadIdx.x + Nhalf; A_write_index=2*threadIdx.x; B_write_index=2*threadIdx.x+1; A_load_id = 2*threadIdx.x; An=2*threadIdx.x; for(r=1;r<FFT_EXP;r++){ An >>= 1; A_load_id <<= 1; A_load_id |= An & 1; j=(threadIdx.x)>>(r-1); k=PoTm1*j; ftemp = s_input[A_read_index]; ftemp2 = s_input[B_read_index]; A_DFT_value.x=ftemp.x + ftemp2.x; A_DFT_value.y=ftemp.y + ftemp2.y; WB = Get_W_value(FFT_LENGTH,k); B_DFT_value.x=WB.x*(ftemp.x - ftemp2.x) - WB.y*(ftemp.y - ftemp2.y); B_DFT_value.y=WB.x*(ftemp.y - ftemp2.y) + WB.y*(ftemp.x - ftemp2.x); PoTm1=PoTm1<<1; __syncthreads(); s_input[A_write_index]=A_DFT_value; s_input[B_write_index]=B_DFT_value; __syncthreads(); } A_load_id &= FFT_LENGTH-1; //----- First N-1 iteration //----------------------------------------------- //----------------------------------------------- //----- Last exchange ftemp = s_input[A_read_index]; ftemp2 = s_input[B_read_index]; A_DFT_value.x = ftemp.x + ftemp2.x; A_DFT_value.y = ftemp.y + ftemp2.y; B_DFT_value.x = ftemp.x - ftemp2.x; B_DFT_value.y = ftemp.y - ftemp2.y; __syncthreads(); s_input[A_write_index]=A_DFT_value; s_input[B_write_index]=B_DFT_value; __syncthreads(); //----- Last exchange //----------------------------------------------- #ifdef REORDER //----------------------------------------------- //----- De-shuffle ftemp=s_input[A_load_id]; ftemp2=s_input[A_load_id+Nhalf]; __syncthreads(); s_input[2*threadIdx.x]=ftemp; s_input[2*threadIdx.x+1]=ftemp2; //----- De-shuffle //----------------------------------------------- #endif //-------> END } __global__ void FFT_GPU_external(float2 *d_input, float2* d_output) { extern __shared__ float2 s_input[]; s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH]; s_input[threadIdx.x + FFT_LENGTH/2]=d_input[threadIdx.x + FFT_LENGTH/2 + blockIdx.x*FFT_LENGTH]; __syncthreads(); do_FFT(s_input); __syncthreads(); d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x]; d_output[threadIdx.x + FFT_LENGTH/2 + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x + FFT_LENGTH/2]; } __global__ void FFT_GPU_multiple(float2 *d_input, float2* d_output) { extern __shared__ float2 s_input[]; s_input[threadIdx.x]=d_input[threadIdx.x + blockIdx.x*FFT_LENGTH]; s_input[threadIdx.x + FFT_LENGTH/2]=d_input[threadIdx.x + FFT_LENGTH/2 + blockIdx.x*FFT_LENGTH]; __syncthreads(); for(int f=0;f<100;f++){ do_FFT(s_input); } __syncthreads(); d_output[threadIdx.x + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x]; d_output[threadIdx.x + FFT_LENGTH/2 + blockIdx.x*FFT_LENGTH]=s_input[threadIdx.x + FFT_LENGTH/2]; } //***************************************************************************** //***************************************************************************** //***************************************************************************** int Max_columns_in_memory_shared(int nSamples, int nSpectra) { long int nColumns,maxgrid_x; size_t free_mem,total_mem; cudaDeviceProp devProp; checkCudaErrors(cudaSetDevice(device)); checkCudaErrors(cudaGetDeviceProperties(&devProp,device)); maxgrid_x = devProp.maxGridSize[0]; cudaMemGetInfo(&free_mem,&total_mem); nColumns=((long int) free_mem)/(2.0*sizeof(float2)*nSamples); if(nColumns>maxgrid_x) nColumns=maxgrid_x; nColumns=(int) nColumns*0.9; return(nColumns); } void FFT_init(){ //---------> Specific nVidia stuff cudaDeviceSetCacheConfig(cudaFuncCachePreferEqual); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); } void FFT_external_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){ GpuTimer timer; //---------> CUDA block and CUDA grid parameters int nCUDAblocks_x=nSpectra; int nCUDAblocks_y=1; //Head size dim3 gridSize(nCUDAblocks_x, nCUDAblocks_y, 1); dim3 blockSize(nSamples/2, 1, 1); //---------> FIR filter part timer.Start(); FFT_GPU_external<<<gridSize, blockSize,nSamples*8>>>( d_input, d_output); timer.Stop(); *FFT_time += timer.Elapsed(); } void FFT_multiple_benchmark(float2 *d_input, float2 *d_output, int nSamples, int nSpectra, double *FFT_time){ GpuTimer timer; //---------> CUDA block and CUDA grid parameters dim3 gridSize_multiple(1000, 1, 1); dim3 blockSize(nSamples/2, 1, 1); //---------> FIR filter part timer.Start(); FFT_GPU_multiple<<<gridSize_multiple, blockSize,nSamples*8>>>( d_input, d_output); timer.Stop(); *FFT_time += timer.Elapsed(); } //***************************************************************************** //***************************************************************************** //***************************************************************************** int GPU_FFT(float2 *h_input, float2 *h_output, int nSamples, int nSpectra, int inverse){ //---------> Initial nVidia stuff int devCount; size_t free_mem,total_mem; checkCudaErrors(cudaGetDeviceCount(&devCount)); checkCudaErrors(cudaSetDevice(device)); cudaMemGetInfo(&free_mem,&total_mem); if(DEBUG) printf("\nDevice has %ld MB of total memory, which %ld MB is available.\n", (long int) total_mem/(1000*1000), (long int) free_mem/(1000*1000)); //---------> Checking memory int nElements=nSamples*nSpectra; int input_size=nElements; int output_size=nElements; float free_memory = (float) free_mem/(1024.0*1024.0); float memory_required=((2*input_size + 2*output_size)*sizeof(float))/(1024.0*1024.0); if(DEBUG) printf("DEBUG: Device has %0.3f MB of total memory, which %0.3f MB is available. Memory required %0.3f MB\n", (float) total_mem/(1024.0*1024.0), free_memory ,memory_required); if(memory_required>free_memory) {printf("\n \n Array is too big for the device! \n \n"); return(-3);} //---------> Measurements double transfer_in, transfer_out, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time,cuFFT_time,FFT_multiple_reuse_registers_time; GpuTimer timer; // if set before set device getting errors - invalid handle //------------------------------------------------------------------------------ //---------> Shared memory kernel transfer_in=0.0; transfer_out=0.0; FFT_time=0.0; FFT_external_time=0.0; FFT_multiple_time=0.0; FFT_multiple_reuse_time=0.0; cuFFT_time=0.0; FFT_multiple_reuse_registers_time=0.0; //---------> Memory allocation if (DEBUG) printf("Device memory allocation...: \t\t"); float2 *d_output; float2 *d_input; timer.Start(); checkCudaErrors(cudaMalloc((void **) &d_input, sizeof(float2)*input_size)); checkCudaErrors(cudaMalloc((void **) &d_output, sizeof(float2)*output_size)); timer.Stop(); if (DEBUG) printf("done in %g ms.\n", timer.Elapsed()); //---------> FFT calculation if (DEBUG) printf("Transferring data to device...: \t"); timer.Start(); checkCudaErrors(cudaMemcpy(d_input, h_input, input_size*sizeof(float2), cudaMemcpyHostToDevice)); timer.Stop(); transfer_in+=timer.Elapsed(); if (DEBUG) printf("done in %g ms.\n", timer.Elapsed()); //-----> Compute FFT on the chunk if(CUFFT){ //---------> FFT cufftHandle plan; cufftResult error; error = cufftPlan1d(&plan, nSamples, CUFFT_C2C, nSpectra); if (CUFFT_SUCCESS != error){ printf("CUFFT error: %d", error); } timer.Start(); cufftExecC2C(plan, (cufftComplex *)d_input, (cufftComplex *)d_output, CUFFT_FORWARD); timer.Stop(); cuFFT_time += timer.Elapsed(); cufftDestroy(plan); } if(MULTIPLE){ if (DEBUG) printf("Multiple FFT...: \t\t\t"); FFT_init(); FFT_multiple_benchmark(d_input, d_output, nSamples, nSpectra, &FFT_multiple_time); if (DEBUG) printf("done in %g ms.\n", FFT_multiple_time); } if(EXTERNAL){ if (DEBUG) printf("FFT...: \t\t\t\t"); FFT_init(); FFT_external_benchmark(d_input, d_output, nSamples, nSpectra, &FFT_external_time); if (DEBUG) printf("done in %g ms.\n", FFT_external_time); } //-----> Copy chunk of output data to host if (DEBUG) printf("Transferring data to host...: \t\t"); timer.Start(); checkCudaErrors(cudaMemcpy( h_output, d_output, output_size*sizeof(float2), cudaMemcpyDeviceToHost)); timer.Stop(); transfer_out+=timer.Elapsed(); if (DEBUG) printf("done in %g ms.\n", timer.Elapsed()); //---------> error check ----- checkCudaErrors(cudaGetLastError()); //---------> Feeing allocated resources checkCudaErrors(cudaFree(d_input)); checkCudaErrors(cudaFree(d_output)); if (DEBUG || WRITE) printf("nSpectra:%d; nSamples:%d cuFFT:%0.3f ms; FFT:%0.3f ms; FFT external:%0.3f ms; FFT multiple:%0.3f ms; FFT multiple reuse:%0.3f ms; FFT_multiple_reuse_registers_time:%0.3fms; HostToDevice:%0.3f ms; DeviceToHost:%0.3f ms\n",nSpectra,nSamples,cuFFT_time, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time, FFT_multiple_reuse_registers_time, transfer_in, transfer_out); if (WRITE){ char str[200]; sprintf(str,"GPU-FFT-Pease.dat"); if (DEBUG) printf("\n Write results into file...\t"); save_time(str, nSpectra,nSamples, cuFFT_time, FFT_time, FFT_external_time, FFT_multiple_time, FFT_multiple_reuse_time, FFT_multiple_reuse_registers_time, transfer_in, transfer_out); if (DEBUG) printf("\t done.\n-------------------------------------\n"); } return(1); }
791feffa2522f282c73d970a32dc7766f3df5961.hip
// !!! This is a file automatically generated by hipify!!! #include "activations.h" #include "hip/hip_runtime.h" #include "blas.h" __device__ float lhtan_activate_kernel(float x) { if(x < 0) return .001f*x; if(x > 1) return .001f*(x-1.f) + 1.f; return x; } __device__ float hardtan_activate_kernel(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } __device__ float linear_activate_kernel(float x){return x;} __device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));} __device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;} __device__ float relu_activate_kernel(float x){return x*(x>0);} __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);} __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;} __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;} __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;} __device__ float tanh_activate_kernel(float x){return (2.f/(1 + expf(-2*x)) - 1);} __device__ float plse_activate_kernel(float x) { if(x < -4) return .01f * (x + 4); if(x > 4) return .01f * (x - 4) + 1; return .125f*x + .5f; } __device__ float stair_activate_kernel(float x) { int n = floorf(x); if (n%2 == 0) return floorf(x/2); else return (x - n) + floorf(x/2); } __device__ float activate_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate_kernel(x); case LOGISTIC: return logistic_activate_kernel(x); case LOGGY: return loggy_activate_kernel(x); case RELU: return relu_activate_kernel(x); case ELU: return elu_activate_kernel(x); case RELIE: return relie_activate_kernel(x); case RAMP: return ramp_activate_kernel(x); case LEAKY: return leaky_activate_kernel(x); case TANH: return tanh_activate_kernel(x); case PLSE: return plse_activate_kernel(x); case STAIR: return stair_activate_kernel(x); case HARDTAN: return hardtan_activate_kernel(x); case LHTAN: return lhtan_activate_kernel(x); } return 0; } __global__ void activate_array_kernel(float *x, int n, ACTIVATION a) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) x[i] = activate_kernel(x[i], a); } void activate_array_gpu(float *x, int n, ACTIVATION a) { hipLaunchKernelGGL(( activate_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a); check_error(hipPeekAtLastError()); }
791feffa2522f282c73d970a32dc7766f3df5961.cu
#include "activations.h" #include "cuda.h" #include "blas.h" __device__ float lhtan_activate_kernel(float x) { if(x < 0) return .001f*x; if(x > 1) return .001f*(x-1.f) + 1.f; return x; } __device__ float hardtan_activate_kernel(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } __device__ float linear_activate_kernel(float x){return x;} __device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));} __device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;} __device__ float relu_activate_kernel(float x){return x*(x>0);} __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);} __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;} __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;} __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;} __device__ float tanh_activate_kernel(float x){return (2.f/(1 + expf(-2*x)) - 1);} __device__ float plse_activate_kernel(float x) { if(x < -4) return .01f * (x + 4); if(x > 4) return .01f * (x - 4) + 1; return .125f*x + .5f; } __device__ float stair_activate_kernel(float x) { int n = floorf(x); if (n%2 == 0) return floorf(x/2); else return (x - n) + floorf(x/2); } __device__ float activate_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate_kernel(x); case LOGISTIC: return logistic_activate_kernel(x); case LOGGY: return loggy_activate_kernel(x); case RELU: return relu_activate_kernel(x); case ELU: return elu_activate_kernel(x); case RELIE: return relie_activate_kernel(x); case RAMP: return ramp_activate_kernel(x); case LEAKY: return leaky_activate_kernel(x); case TANH: return tanh_activate_kernel(x); case PLSE: return plse_activate_kernel(x); case STAIR: return stair_activate_kernel(x); case HARDTAN: return hardtan_activate_kernel(x); case LHTAN: return lhtan_activate_kernel(x); } return 0; } __global__ void activate_array_kernel(float *x, int n, ACTIVATION a) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) x[i] = activate_kernel(x[i], a); } void activate_array_gpu(float *x, int n, ACTIVATION a) { activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a); check_error(cudaPeekAtLastError()); }
34936028d62f7be54f9b2db9727cb41a0a163a4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Borrowed from https://github.com/s4lesman, slightly modification #include <wb.h> #define HISTOGRAM_LENGTH 256 __global__ void convertToChar(float * input, unsigned char * ucharInput, int width, int height) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*blockDim.y+ty; int col = bx*blockDim.x+tx; int index = row*width + col; if(row < height && col < width) { ucharInput[index*3] = (unsigned char) (255 * input[index*3]); //r ucharInput[index*3+1] = (unsigned char) (255 * input[index*3+1]); //g ucharInput[index*3+2] = (unsigned char) (255 * input[index*3+2]); //b } } __global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*blockDim.y+ty; int col = bx*blockDim.x+tx; int index = row*width + col; if(row < height && col < width) { grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]); } } __global__ void hist_eq(unsigned char * deviceCharImg, float * output, float* cdf, float cdfmin, int size) { int bx = blockIdx.x; int tx = threadIdx.x; int i = tx+blockDim.x*bx; if(i < size) { deviceCharImg[i] = min(max(255*(cdf[deviceCharImg[i]] - cdfmin)/(1 - cdfmin),0.0),255.0); output[i] = (float) (deviceCharImg[i]/255.0); } } __global__ void histo_kernel(unsigned char * buffer, unsigned int * histo, long size) { // compute histogram with a private version in each block __shared__ unsigned int histo_private[HISTOGRAM_LENGTH]; int bx = blockIdx.x; int tx = threadIdx.x; // index of current pixel int index = tx+bx*blockDim.x; // set initial values of histogram to zero if (tx < HISTOGRAM_LENGTH) histo_private[tx] = 0; __syncthreads(); int stride = blockDim.x*gridDim.x; atomicAdd(&(histo_private[buffer[index]]), 1); __syncthreads(); //copy private histogram to device histogram if(tx < 256) { atomicAdd(&(histo[tx]), histo_private[tx]); } } float prob(int x, int width, int height) { return 1.0 * x / (width * height); } int main(int argc, char ** argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; //@@ Insert more code here // device variables float * deviceInputImageData; float * deviceOutputImageData; unsigned char * deviceUCharImage; unsigned char * deviceGrayImg; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); wbTime_stop(Generic, "Importing data and creating memory on host"); //@@ insert code here hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); //allocate memory for device variables hipMalloc((void **) &deviceInputImageData, imageWidth*imageHeight*imageChannels*sizeof(float)); hipMalloc((void **) &deviceOutputImageData, imageWidth*imageHeight*imageChannels*sizeof(float)); hipMalloc((void **) &deviceUCharImage, imageWidth*imageHeight*imageChannels*sizeof(unsigned char)); hipMalloc((void **) &deviceGrayImg, imageWidth*imageHeight*sizeof(unsigned char)); hipMemcpy(deviceInputImageData, hostInputImageData, imageWidth*imageHeight*imageChannels*sizeof(float), hipMemcpyHostToDevice); wbLog(TRACE, "image width: ",imageWidth,", image height: ",imageHeight); //@@ insert code here dim3 dimBlock(12, 12, 1); dim3 dimGrid((imageWidth - 1)/12 + 1, (imageHeight - 1)/12 + 1, 1); //convert the image to unsigned char hipLaunchKernelGGL(( convertToChar), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInputImageData, deviceUCharImage, imageWidth, imageHeight); // need to convert image to grayscale hipLaunchKernelGGL(( convertToGrayScale), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceUCharImage, deviceGrayImg, imageWidth, imageHeight); // allocate histogram host and set initial values of array to zero. unsigned int * hostHistogram; hostHistogram = (unsigned int *) malloc(HISTOGRAM_LENGTH*sizeof(unsigned int)); for(int i = 0; i < HISTOGRAM_LENGTH; i++) { hostHistogram[i] = 0; } // allocation for histogram from host to device unsigned int * deviceHistogram; hipMalloc((void **) &deviceHistogram,HISTOGRAM_LENGTH*sizeof(unsigned int)); hipMemcpy(deviceHistogram, hostHistogram, HISTOGRAM_LENGTH*sizeof(unsigned int), hipMemcpyHostToDevice); // size in 1D, gray image should only have one channel dim3 histoGrid((imageWidth*imageHeight-1)/HISTOGRAM_LENGTH + 1, 1, 1); dim3 histoBlock(HISTOGRAM_LENGTH,1,1); //compute the histogram hipLaunchKernelGGL(( histo_kernel), dim3(histoGrid), dim3(histoBlock), 0, 0, deviceGrayImg, deviceHistogram, imageWidth*imageHeight); //copy result back to host histogram hipMemcpy(hostHistogram, deviceHistogram, HISTOGRAM_LENGTH*sizeof(unsigned int), hipMemcpyDeviceToHost); // compute scan operation for histogram float * hostCDF; hostCDF = (float *)malloc(HISTOGRAM_LENGTH*sizeof(float)); hostCDF[0] = prob(hostHistogram[0], imageWidth, imageHeight); for(int i = 1; i < HISTOGRAM_LENGTH; i++) { hostCDF[i] = hostCDF[i-1]+prob(hostHistogram[i],imageWidth,imageHeight); } // compute cdfmin float cdfmin = hostCDF[0]; // copy host cdf to device float *deviceCDF; hipMalloc((void **) &deviceCDF, HISTOGRAM_LENGTH*sizeof(float)); hipMemcpy(deviceCDF, hostCDF, HISTOGRAM_LENGTH*sizeof(float), hipMemcpyHostToDevice); // histogram equalization function dim3 dimGrid2((imageWidth*imageHeight*imageChannels - 1)/HISTOGRAM_LENGTH + 1, 1, 1); dim3 dimBlock2(HISTOGRAM_LENGTH, 1, 1); hipLaunchKernelGGL(( hist_eq), dim3(dimGrid2), dim3(dimBlock2), 0, 0, deviceUCharImage, deviceOutputImageData, deviceCDF, cdfmin, imageWidth*imageHeight*imageChannels); // copy results back to host hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth*imageHeight*imageChannels*sizeof(float), hipMemcpyDeviceToHost); wbSolution(args, outputImage); hipFree(deviceUCharImage); hipFree(deviceGrayImg); hipFree(deviceInputImageData); hipFree(deviceOutputImageData); free(hostInputImageData); free(hostOutputImageData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
34936028d62f7be54f9b2db9727cb41a0a163a4c.cu
// Borrowed from https://github.com/s4lesman, slightly modification #include <wb.h> #define HISTOGRAM_LENGTH 256 __global__ void convertToChar(float * input, unsigned char * ucharInput, int width, int height) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*blockDim.y+ty; int col = bx*blockDim.x+tx; int index = row*width + col; if(row < height && col < width) { ucharInput[index*3] = (unsigned char) (255 * input[index*3]); //r ucharInput[index*3+1] = (unsigned char) (255 * input[index*3+1]); //g ucharInput[index*3+2] = (unsigned char) (255 * input[index*3+2]); //b } } __global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*blockDim.y+ty; int col = bx*blockDim.x+tx; int index = row*width + col; if(row < height && col < width) { grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]); } } __global__ void hist_eq(unsigned char * deviceCharImg, float * output, float* cdf, float cdfmin, int size) { int bx = blockIdx.x; int tx = threadIdx.x; int i = tx+blockDim.x*bx; if(i < size) { deviceCharImg[i] = min(max(255*(cdf[deviceCharImg[i]] - cdfmin)/(1 - cdfmin),0.0),255.0); output[i] = (float) (deviceCharImg[i]/255.0); } } __global__ void histo_kernel(unsigned char * buffer, unsigned int * histo, long size) { // compute histogram with a private version in each block __shared__ unsigned int histo_private[HISTOGRAM_LENGTH]; int bx = blockIdx.x; int tx = threadIdx.x; // index of current pixel int index = tx+bx*blockDim.x; // set initial values of histogram to zero if (tx < HISTOGRAM_LENGTH) histo_private[tx] = 0; __syncthreads(); int stride = blockDim.x*gridDim.x; atomicAdd(&(histo_private[buffer[index]]), 1); __syncthreads(); //copy private histogram to device histogram if(tx < 256) { atomicAdd(&(histo[tx]), histo_private[tx]); } } float prob(int x, int width, int height) { return 1.0 * x / (width * height); } int main(int argc, char ** argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; //@@ Insert more code here // device variables float * deviceInputImageData; float * deviceOutputImageData; unsigned char * deviceUCharImage; unsigned char * deviceGrayImg; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); wbTime_stop(Generic, "Importing data and creating memory on host"); //@@ insert code here hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); //allocate memory for device variables cudaMalloc((void **) &deviceInputImageData, imageWidth*imageHeight*imageChannels*sizeof(float)); cudaMalloc((void **) &deviceOutputImageData, imageWidth*imageHeight*imageChannels*sizeof(float)); cudaMalloc((void **) &deviceUCharImage, imageWidth*imageHeight*imageChannels*sizeof(unsigned char)); cudaMalloc((void **) &deviceGrayImg, imageWidth*imageHeight*sizeof(unsigned char)); cudaMemcpy(deviceInputImageData, hostInputImageData, imageWidth*imageHeight*imageChannels*sizeof(float), cudaMemcpyHostToDevice); wbLog(TRACE, "image width: ",imageWidth,", image height: ",imageHeight); //@@ insert code here dim3 dimBlock(12, 12, 1); dim3 dimGrid((imageWidth - 1)/12 + 1, (imageHeight - 1)/12 + 1, 1); //convert the image to unsigned char convertToChar<<<dimGrid, dimBlock>>>(deviceInputImageData, deviceUCharImage, imageWidth, imageHeight); // need to convert image to grayscale convertToGrayScale<<<dimGrid, dimBlock>>>(deviceUCharImage, deviceGrayImg, imageWidth, imageHeight); // allocate histogram host and set initial values of array to zero. unsigned int * hostHistogram; hostHistogram = (unsigned int *) malloc(HISTOGRAM_LENGTH*sizeof(unsigned int)); for(int i = 0; i < HISTOGRAM_LENGTH; i++) { hostHistogram[i] = 0; } // allocation for histogram from host to device unsigned int * deviceHistogram; cudaMalloc((void **) &deviceHistogram,HISTOGRAM_LENGTH*sizeof(unsigned int)); cudaMemcpy(deviceHistogram, hostHistogram, HISTOGRAM_LENGTH*sizeof(unsigned int), cudaMemcpyHostToDevice); // size in 1D, gray image should only have one channel dim3 histoGrid((imageWidth*imageHeight-1)/HISTOGRAM_LENGTH + 1, 1, 1); dim3 histoBlock(HISTOGRAM_LENGTH,1,1); //compute the histogram histo_kernel<<<histoGrid, histoBlock>>>(deviceGrayImg, deviceHistogram, imageWidth*imageHeight); //copy result back to host histogram cudaMemcpy(hostHistogram, deviceHistogram, HISTOGRAM_LENGTH*sizeof(unsigned int), cudaMemcpyDeviceToHost); // compute scan operation for histogram float * hostCDF; hostCDF = (float *)malloc(HISTOGRAM_LENGTH*sizeof(float)); hostCDF[0] = prob(hostHistogram[0], imageWidth, imageHeight); for(int i = 1; i < HISTOGRAM_LENGTH; i++) { hostCDF[i] = hostCDF[i-1]+prob(hostHistogram[i],imageWidth,imageHeight); } // compute cdfmin float cdfmin = hostCDF[0]; // copy host cdf to device float *deviceCDF; cudaMalloc((void **) &deviceCDF, HISTOGRAM_LENGTH*sizeof(float)); cudaMemcpy(deviceCDF, hostCDF, HISTOGRAM_LENGTH*sizeof(float), cudaMemcpyHostToDevice); // histogram equalization function dim3 dimGrid2((imageWidth*imageHeight*imageChannels - 1)/HISTOGRAM_LENGTH + 1, 1, 1); dim3 dimBlock2(HISTOGRAM_LENGTH, 1, 1); hist_eq<<<dimGrid2, dimBlock2>>>(deviceUCharImage, deviceOutputImageData, deviceCDF, cdfmin, imageWidth*imageHeight*imageChannels); // copy results back to host cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth*imageHeight*imageChannels*sizeof(float), cudaMemcpyDeviceToHost); wbSolution(args, outputImage); cudaFree(deviceUCharImage); cudaFree(deviceGrayImg); cudaFree(deviceInputImageData); cudaFree(deviceOutputImageData); free(hostInputImageData); free(hostOutputImageData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
f2aa7ec8488b01f905f9490a0394db489a6a8321.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2017 the arraydiff authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime_api.h> #include <stdint.h> #include <stdlib.h> __global__ void rect_fwd_kernel_f32( uint32_t dim, const float *x, float *y) { uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < dim) { float x_i = x[idx]; y[idx] = x_i * (x_i > 0.0f); } } extern "C" void arraydiff_cuda_kernel_rect_fwd_f32( size_t dim, const float *x, float *y, hipStream_t stream) { hipLaunchKernelGGL(( rect_fwd_kernel_f32), dim3((dim+1024-1)/1024), dim3(1024), 0, stream, dim, x, y); } __global__ void rect_bwd_kernel_f32( uint32_t dim, const float *x, const float *dy, float *dx) { uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < dim) { dx[idx] += dy[idx] * (x[idx] > 0.0f); } } extern "C" void arraydiff_cuda_kernel_rect_bwd_f32( size_t dim, const float *x, const float *dy, float *dx, hipStream_t stream) { hipLaunchKernelGGL(( rect_bwd_kernel_f32), dim3((dim+1024-1)/1024), dim3(1024), 0, stream, dim, x, dy, dx); }
f2aa7ec8488b01f905f9490a0394db489a6a8321.cu
/* Copyright 2017 the arraydiff authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda_runtime_api.h> #include <stdint.h> #include <stdlib.h> __global__ void rect_fwd_kernel_f32( uint32_t dim, const float *x, float *y) { uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < dim) { float x_i = x[idx]; y[idx] = x_i * (x_i > 0.0f); } } extern "C" void arraydiff_cuda_kernel_rect_fwd_f32( size_t dim, const float *x, float *y, cudaStream_t stream) { rect_fwd_kernel_f32<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, y); } __global__ void rect_bwd_kernel_f32( uint32_t dim, const float *x, const float *dy, float *dx) { uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < dim) { dx[idx] += dy[idx] * (x[idx] > 0.0f); } } extern "C" void arraydiff_cuda_kernel_rect_bwd_f32( size_t dim, const float *x, const float *dy, float *dx, cudaStream_t stream) { rect_bwd_kernel_f32<<<(dim+1024-1)/1024, 1024, 0, stream>>>( dim, x, dy, dx); }
11c4556d4f18a2a5bf49e1b849e838ec88feb0cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "include/mpz.h" // // Nearly minimal CUDA example. // Compile with: // // nvcc -o example example.cu // #define N 1000 // // A function marked __global__ // runs on the GPU but can be called from // the CPU. // // This function multiplies the elements of an array // of ints by 2. // // The entire computation can be thought of as running // with one thread per array element with blockIdx.x // identifying the thread. // // The comparison i<N is because often it isn't convenient // to have an exact 1-1 correspondence between threads // and array elements. Not strictly necessary here. // // Note how we're mixing GPU and CPU code in the same source // file. An alternative way to use CUDA is to keep // C/C++ code separate from CUDA code and dynamically // compile and load the CUDA code at runtime, a little // like how you compile and load OpenGL shaders from // C/C++ code. // __global__ void add(mpz_t* a, mpz_t* b) { mpz_t tmp1; mpz_init(a); mpz_init(b); mpz_init(&tmp1); mpz_set_i(a,2); mpz_set_i(b,3); mpz_mult(&tmp1, a, b); mpz_set(a, &tmp1); //mpz_get_str(&am, addr, N); //int i = blockIdx.x; //if (i<N) { // b[i] = 2*a[i]; //} } __global__ void assign(int* a) { *a = 777; } int main() { // mpz_t a, d; // mpz_init(&a); // mpz_init(&d); // mpz_set_i(a,2); // mpz_set_i(b,3); // // Create int arrays on the CPU. // ('h' stands for "host".) // //int hb[N]; // // Create corresponding int arrays on the GPU. // ('d' stands for "device".) // //char *db; //hipMalloc((void **)&da, N*sizeof(int)); //hipMallocManaged((void **)&db, N*sizeof(char)); mpz_t* a, *b; hipMallocManaged(&a, sizeof(mpz_t)); hipMallocManaged(&b, sizeof(mpz_t)); // int * i; // hipMallocManaged(&i, sizeof(int)); // printf("i: %d\n", *i); // assign<<<1, 1>>>(i); // hipDeviceSynchronize(); // printf("i: %d\n", *i); // // Initialise the input data on the CPU. // // for (int i = 0; i<N; ++i) { // ha[i] = i; // } // // Copy input data to array on GPU. // //hipMemcpy(da, ha, N*sizeof(int), hipMemcpyHostToDevice); // // Launch GPU code with N threads, one per // array element. // hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, a, b); hipDeviceSynchronize(); char buf[N]; //mpz_init(a); //mpz_set_i(a,2); mpz_get_str(a, buf, N); //sprintf(buf, "%d", 100); printf("Hello: %s\n", buf); // // Copy output array from GPU back to CPU. // //hipMemcpy(hb, db, N*sizeof(char), hipMemcpyDeviceToHost); //char buf [N]; //mpz_get_str(a, buf, N); //printf("%s\n", buf); mpz_set_i(a,2); mpz_print(a); /*for (int i = 0; i<1; ++i) { printf("%hhx\n", hb[i]); }*/ // // Free up the arrays on the GPU. // //hipFree(da); hipFree(&a); hipFree(&b); return 0; }
11c4556d4f18a2a5bf49e1b849e838ec88feb0cc.cu
#include <stdio.h> #include "include/mpz.h" // // Nearly minimal CUDA example. // Compile with: // // nvcc -o example example.cu // #define N 1000 // // A function marked __global__ // runs on the GPU but can be called from // the CPU. // // This function multiplies the elements of an array // of ints by 2. // // The entire computation can be thought of as running // with one thread per array element with blockIdx.x // identifying the thread. // // The comparison i<N is because often it isn't convenient // to have an exact 1-1 correspondence between threads // and array elements. Not strictly necessary here. // // Note how we're mixing GPU and CPU code in the same source // file. An alternative way to use CUDA is to keep // C/C++ code separate from CUDA code and dynamically // compile and load the CUDA code at runtime, a little // like how you compile and load OpenGL shaders from // C/C++ code. // __global__ void add(mpz_t* a, mpz_t* b) { mpz_t tmp1; mpz_init(a); mpz_init(b); mpz_init(&tmp1); mpz_set_i(a,2); mpz_set_i(b,3); mpz_mult(&tmp1, a, b); mpz_set(a, &tmp1); //mpz_get_str(&am, addr, N); //int i = blockIdx.x; //if (i<N) { // b[i] = 2*a[i]; //} } __global__ void assign(int* a) { *a = 777; } int main() { // mpz_t a, d; // mpz_init(&a); // mpz_init(&d); // mpz_set_i(a,2); // mpz_set_i(b,3); // // Create int arrays on the CPU. // ('h' stands for "host".) // //int hb[N]; // // Create corresponding int arrays on the GPU. // ('d' stands for "device".) // //char *db; //cudaMalloc((void **)&da, N*sizeof(int)); //cudaMallocManaged((void **)&db, N*sizeof(char)); mpz_t* a, *b; cudaMallocManaged(&a, sizeof(mpz_t)); cudaMallocManaged(&b, sizeof(mpz_t)); // int * i; // cudaMallocManaged(&i, sizeof(int)); // printf("i: %d\n", *i); // assign<<<1, 1>>>(i); // cudaDeviceSynchronize(); // printf("i: %d\n", *i); // // Initialise the input data on the CPU. // // for (int i = 0; i<N; ++i) { // ha[i] = i; // } // // Copy input data to array on GPU. // //cudaMemcpy(da, ha, N*sizeof(int), cudaMemcpyHostToDevice); // // Launch GPU code with N threads, one per // array element. // add<<<1, 1>>>(a, b); cudaDeviceSynchronize(); char buf[N]; //mpz_init(a); //mpz_set_i(a,2); mpz_get_str(a, buf, N); //sprintf(buf, "%d", 100); printf("Hello: %s\n", buf); // // Copy output array from GPU back to CPU. // //cudaMemcpy(hb, db, N*sizeof(char), cudaMemcpyDeviceToHost); //char buf [N]; //mpz_get_str(a, buf, N); //printf("%s\n", buf); mpz_set_i(a,2); mpz_print(a); /*for (int i = 0; i<1; ++i) { printf("%hhx\n", hb[i]); }*/ // // Free up the arrays on the GPU. // //cudaFree(da); cudaFree(&a); cudaFree(&b); return 0; }
df821bbf19cce05cc370d247bd335b191667d62e.hip
// !!! This is a file automatically generated by hipify!!! #include "warpPerPointTasks.cuh" #include "warpPerPointKernel.cuh" #include "atomicKernels.cuh" #include "helpers.h" #include <time.h> #include <stdio.h> #include <iostream> hipError_t countKMeansWarpPerPoint(const uint32_t iterations, const uint32_t dataSize_u32, const value_t* data, const uint32_t meansSize_u32, value_t* means, uint32_t* assignedClusters, uint64_t dimension_u64, std::string version) { value_t* dev_means = 0, *dev_data = 0, *dev_meansSums = 0;//, *dev_temp = 0; uint32_t* dev_assignedClusters = 0, *dev_counts = 0; const my_size_t dataSize = static_cast<my_size_t>(dataSize_u32); const my_size_t meansSize = static_cast<my_size_t>(meansSize_u32); const my_size_t dimension = static_cast<my_size_t>(dimension_u64); hipError_t cudaStatus = hipSuccess; // Launch a kernel on the GPU with one thread for each element. int pointsPerBlock= BLOCK_SIZE > meansSize ? WARP_SIZE / meansSize : 1; dim3 blockSizeN(meansSize, pointsPerBlock); int nBlocksN = (dataSize - 1) / pointsPerBlock + 1; auto findNearestClusterKernel = &findNearestWarpPerPointKernel; int sharedMemomrySize = sizeof(value_t)* (/*dimension * pointsPerWarp + */blockSizeN.x * blockSizeN.y); if (version == "--sharedMemory") { findNearestClusterKernel = &findNearestWarpPerPointSMKernel; sharedMemomrySize = sizeof(value_t)* (dimension * pointsPerBlock + blockSizeN.x * blockSizeN.y); std::cout << "Shared memory" << std::endl; } #if __CUDA_ARCH__ >= 300 if (version == "--shuffle") { findNearestClusterKernel = &findNearestWarpPerPointShuffleKernel; sharedMemomrySize = 0; std::cout << "Shuffle" << std::endl; } #endif // for DivMeansKernel int meansPerBlock = BLOCK_SIZE > dimension ? BLOCK_SIZE / dimension : 1; int meansBlocks = (meansSize - 1) / meansPerBlock + 1; clock_t start, end; start = clock(); //std::vector<uint32_t> testVector(meansSize); try { // Choose which GPU to run on, change this on a multi-GPU system. setDevice(DEVICE_ID); // Allocate GPU buffers for three vectors (two input, one output) . allocateMemory((void**)&dev_means, meansSize * dimension * sizeof(value_t)); allocateAndSetMemory((void**)&dev_meansSums, meansSize * dimension * sizeof(value_t), 0); allocateMemory((void**)&dev_data, dataSize * dimension * sizeof(value_t)); allocateMemory((void**)&dev_assignedClusters, dataSize * sizeof(uint32_t)); allocateAndSetMemory((void**)&dev_counts, meansSize * sizeof(uint32_t), 0); // Copy input vectors from host memory to GPU buffers. copyMemory(dev_means, means, meansSize * dimension * sizeof(value_t), hipMemcpyHostToDevice); copyMemory(dev_data, data, dataSize * dimension * sizeof(value_t), hipMemcpyHostToDevice); //uint32_t* test = (uint32_t*)calloc(meansSize, sizeof(uint32_t)); //value_t* testMeans = (value_t*)calloc(meansSize * dimension , sizeof(value_t)); //int blockSizeM = 16; //int nBlocksM = (meansSize - 1) / blockSizeM + 1; std::cout << "Starting execution" << std::endl; for (int32_t i = 0; i < iterations; ++i) { findNearestClusterKernel << <nBlocksN, blockSizeN, sharedMemomrySize >> >(dev_means, dev_meansSums, dev_data, dev_counts, dimension); synchronizeDevice(); countDivMeansKernel << <meansBlocks, meansPerBlock * dimension >> >(dev_counts, dev_means, dev_meansSums, dimension, meansPerBlock); synchronizeDevice(); hipMemset(dev_meansSums, 0, meansSize * dimension * sizeof(value_t)); hipMemset(dev_counts, 0, meansSize * sizeof(uint32_t)); } // Check for any errors launching the kernel checkErrors(); copyMemory(means, dev_means, meansSize * dimension * sizeof(value_t), hipMemcpyDeviceToHost); copyMemory(assignedClusters, dev_assignedClusters, dataSize * sizeof(uint32_t), hipMemcpyDeviceToHost); } catch (ICUDAException &e) { fprintf(stderr, "CUDA exception: %s\n", e.what()); cudaStatus = e.getError(); } catch (std::exception &e) { fprintf(stderr, "STD exception: %s\n", e.what()); cudaStatus = hipGetLastError(); } hipFree(dev_data); hipFree(dev_means); hipFree(dev_meansSums); hipFree(dev_assignedClusters); hipFree(dev_counts); end = clock(); std::cout << "Time required for execution: " << (double)(end - start) / CLOCKS_PER_SEC << " seconds." << "\n\n"; return cudaStatus; }
df821bbf19cce05cc370d247bd335b191667d62e.cu
#include "warpPerPointTasks.cuh" #include "warpPerPointKernel.cuh" #include "atomicKernels.cuh" #include "helpers.h" #include <time.h> #include <stdio.h> #include <iostream> cudaError_t countKMeansWarpPerPoint(const uint32_t iterations, const uint32_t dataSize_u32, const value_t* data, const uint32_t meansSize_u32, value_t* means, uint32_t* assignedClusters, uint64_t dimension_u64, std::string version) { value_t* dev_means = 0, *dev_data = 0, *dev_meansSums = 0;//, *dev_temp = 0; uint32_t* dev_assignedClusters = 0, *dev_counts = 0; const my_size_t dataSize = static_cast<my_size_t>(dataSize_u32); const my_size_t meansSize = static_cast<my_size_t>(meansSize_u32); const my_size_t dimension = static_cast<my_size_t>(dimension_u64); cudaError_t cudaStatus = cudaSuccess; // Launch a kernel on the GPU with one thread for each element. int pointsPerBlock= BLOCK_SIZE > meansSize ? WARP_SIZE / meansSize : 1; dim3 blockSizeN(meansSize, pointsPerBlock); int nBlocksN = (dataSize - 1) / pointsPerBlock + 1; auto findNearestClusterKernel = &findNearestWarpPerPointKernel; int sharedMemomrySize = sizeof(value_t)* (/*dimension * pointsPerWarp + */blockSizeN.x * blockSizeN.y); if (version == "--sharedMemory") { findNearestClusterKernel = &findNearestWarpPerPointSMKernel; sharedMemomrySize = sizeof(value_t)* (dimension * pointsPerBlock + blockSizeN.x * blockSizeN.y); std::cout << "Shared memory" << std::endl; } #if __CUDA_ARCH__ >= 300 if (version == "--shuffle") { findNearestClusterKernel = &findNearestWarpPerPointShuffleKernel; sharedMemomrySize = 0; std::cout << "Shuffle" << std::endl; } #endif // for DivMeansKernel int meansPerBlock = BLOCK_SIZE > dimension ? BLOCK_SIZE / dimension : 1; int meansBlocks = (meansSize - 1) / meansPerBlock + 1; clock_t start, end; start = clock(); //std::vector<uint32_t> testVector(meansSize); try { // Choose which GPU to run on, change this on a multi-GPU system. setDevice(DEVICE_ID); // Allocate GPU buffers for three vectors (two input, one output) . allocateMemory((void**)&dev_means, meansSize * dimension * sizeof(value_t)); allocateAndSetMemory((void**)&dev_meansSums, meansSize * dimension * sizeof(value_t), 0); allocateMemory((void**)&dev_data, dataSize * dimension * sizeof(value_t)); allocateMemory((void**)&dev_assignedClusters, dataSize * sizeof(uint32_t)); allocateAndSetMemory((void**)&dev_counts, meansSize * sizeof(uint32_t), 0); // Copy input vectors from host memory to GPU buffers. copyMemory(dev_means, means, meansSize * dimension * sizeof(value_t), cudaMemcpyHostToDevice); copyMemory(dev_data, data, dataSize * dimension * sizeof(value_t), cudaMemcpyHostToDevice); //uint32_t* test = (uint32_t*)calloc(meansSize, sizeof(uint32_t)); //value_t* testMeans = (value_t*)calloc(meansSize * dimension , sizeof(value_t)); //int blockSizeM = 16; //int nBlocksM = (meansSize - 1) / blockSizeM + 1; std::cout << "Starting execution" << std::endl; for (int32_t i = 0; i < iterations; ++i) { findNearestClusterKernel << <nBlocksN, blockSizeN, sharedMemomrySize >> >(dev_means, dev_meansSums, dev_data, dev_counts, dimension); synchronizeDevice(); countDivMeansKernel << <meansBlocks, meansPerBlock * dimension >> >(dev_counts, dev_means, dev_meansSums, dimension, meansPerBlock); synchronizeDevice(); cudaMemset(dev_meansSums, 0, meansSize * dimension * sizeof(value_t)); cudaMemset(dev_counts, 0, meansSize * sizeof(uint32_t)); } // Check for any errors launching the kernel checkErrors(); copyMemory(means, dev_means, meansSize * dimension * sizeof(value_t), cudaMemcpyDeviceToHost); copyMemory(assignedClusters, dev_assignedClusters, dataSize * sizeof(uint32_t), cudaMemcpyDeviceToHost); } catch (ICUDAException &e) { fprintf(stderr, "CUDA exception: %s\n", e.what()); cudaStatus = e.getError(); } catch (std::exception &e) { fprintf(stderr, "STD exception: %s\n", e.what()); cudaStatus = cudaGetLastError(); } cudaFree(dev_data); cudaFree(dev_means); cudaFree(dev_meansSums); cudaFree(dev_assignedClusters); cudaFree(dev_counts); end = clock(); std::cout << "Time required for execution: " << (double)(end - start) / CLOCKS_PER_SEC << " seconds." << "\n\n"; return cudaStatus; }
8233307941658139766a1d764848edf722c242db.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <iostream> #include "ped_crowd.h" #include "tick_cuda.h" #include <hip/hip_runtime.h> using namespace std; __global__ void mAdd(float *A, float *B,float *C, float *D){ int id = (blockIdx.x * blockDim.x) + threadIdx.x; A[id] = llroundf(A[id] + B[id]); C[id] = llroundf(C[id] + D[id]); } __global__ void mComputeDirection(float *mDestX, float *mDestY, float *mDestR, float *mAgentsX, float *mAgentsY, float *mMoveForceX, float *mMoveForceY, int *mCurrWay, float *mWaypointX, float *mWaypointY, float *mWaypointR, int *mNumberOfWaypoints) { int Offset = (blockIdx.x * blockDim.x) + threadIdx.x; bool reached = false; mSetForce(&mDestX[Offset], &mDestY[Offset], &mDestR[Offset], &mAgentsX[Offset], &mAgentsY[Offset], &mMoveForceX[Offset], &mMoveForceY[Offset], &reached); //Check which vectors have arrived at dest if (reached == true) { mSetNextDestination(&mCurrWay[Offset], &mDestX[Offset], &mDestY[Offset], &mDestR[Offset], mWaypointX, mWaypointY, mWaypointR, *mNumberOfWaypoints); } float length = mVectorLength(mMoveForceX[Offset], mMoveForceY[Offset]); mVectorNormalized(&mMoveForceX[Offset], &mMoveForceY[Offset], length); } __device__ void mSetNextDestination(int *mCurrWay, float *mDestX, float *mDestY, float *mDestR, float *mWaypointX,float *mWaypointY, float *mWaypointR, int mNumberOfWaypoints) { *mCurrWay+=1; int NextWaypoint = fmodf(*mCurrWay, mNumberOfWaypoints); *mDestX = mWaypointX[NextWaypoint]; *mDestY = mWaypointY[NextWaypoint]; *mDestR = mWaypointR[NextWaypoint]; } __device__ void mSetForce(float *mTempX, float *mTempY, float *mTempZ, float *mX, float *mY, float *mMoveForceX, float *mMoveForceY, bool *reached){ float diffX = *mTempX - *mX; float diffY = *mTempY - *mY; float length = mVectorLength(diffX, diffY); if(length < *mTempZ) *reached = true; else *reached = false; mVectorNormalized(&diffX, &diffY, length); //Update MoveForce *mMoveForceX = diffX; *mMoveForceY = diffY; } __device__ float mVectorLength(float mX, float mY){ return sqrt(mX*mX + mY*mY); } __device__ void mVectorNormalized(float *mX, float *mY, float length){ if(length != 0){ *mX = *mX/length; *mY = *mY/length; }else{ *mX = 0; *mY = 0; } } void kernel_go(int blocksPerGrid, int threadsPerBlock, float *d_AgentsX, float *d_MoveForceX, float *d_AgentsY, float *d_MoveForceY){ hipLaunchKernelGGL(( mAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_AgentsX, d_MoveForceX, d_AgentsY, d_MoveForceY); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); } void kernel_wtg(int blocksPerGrid, int threadsPerBlock, float *d_DestX, float *d_DestY, float *d_DestR, float *d_AgentsX, float *d_AgentsY, float *d_MoveForceX, float *d_MoveForceY, int *d_CurrWay, float *d_WaypointX, float *d_WaypointY, float *d_WaypointR, int *d_NumberOfWaypoints){ hipLaunchKernelGGL(( mComputeDirection), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_DestX, d_DestY, d_DestR, d_AgentsX, d_AgentsY, d_MoveForceX, d_MoveForceY, d_CurrWay, d_WaypointX, d_WaypointY, d_WaypointR, d_NumberOfWaypoints); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); }
8233307941658139766a1d764848edf722c242db.cu
#include <cstdio> #include <iostream> #include "ped_crowd.h" #include "tick_cuda.h" #include <cuda_runtime.h> using namespace std; __global__ void mAdd(float *A, float *B,float *C, float *D){ int id = (blockIdx.x * blockDim.x) + threadIdx.x; A[id] = llroundf(A[id] + B[id]); C[id] = llroundf(C[id] + D[id]); } __global__ void mComputeDirection(float *mDestX, float *mDestY, float *mDestR, float *mAgentsX, float *mAgentsY, float *mMoveForceX, float *mMoveForceY, int *mCurrWay, float *mWaypointX, float *mWaypointY, float *mWaypointR, int *mNumberOfWaypoints) { int Offset = (blockIdx.x * blockDim.x) + threadIdx.x; bool reached = false; mSetForce(&mDestX[Offset], &mDestY[Offset], &mDestR[Offset], &mAgentsX[Offset], &mAgentsY[Offset], &mMoveForceX[Offset], &mMoveForceY[Offset], &reached); //Check which vectors have arrived at dest if (reached == true) { mSetNextDestination(&mCurrWay[Offset], &mDestX[Offset], &mDestY[Offset], &mDestR[Offset], mWaypointX, mWaypointY, mWaypointR, *mNumberOfWaypoints); } float length = mVectorLength(mMoveForceX[Offset], mMoveForceY[Offset]); mVectorNormalized(&mMoveForceX[Offset], &mMoveForceY[Offset], length); } __device__ void mSetNextDestination(int *mCurrWay, float *mDestX, float *mDestY, float *mDestR, float *mWaypointX,float *mWaypointY, float *mWaypointR, int mNumberOfWaypoints) { *mCurrWay+=1; int NextWaypoint = fmodf(*mCurrWay, mNumberOfWaypoints); *mDestX = mWaypointX[NextWaypoint]; *mDestY = mWaypointY[NextWaypoint]; *mDestR = mWaypointR[NextWaypoint]; } __device__ void mSetForce(float *mTempX, float *mTempY, float *mTempZ, float *mX, float *mY, float *mMoveForceX, float *mMoveForceY, bool *reached){ float diffX = *mTempX - *mX; float diffY = *mTempY - *mY; float length = mVectorLength(diffX, diffY); if(length < *mTempZ) *reached = true; else *reached = false; mVectorNormalized(&diffX, &diffY, length); //Update MoveForce *mMoveForceX = diffX; *mMoveForceY = diffY; } __device__ float mVectorLength(float mX, float mY){ return sqrt(mX*mX + mY*mY); } __device__ void mVectorNormalized(float *mX, float *mY, float length){ if(length != 0){ *mX = *mX/length; *mY = *mY/length; }else{ *mX = 0; *mY = 0; } } void kernel_go(int blocksPerGrid, int threadsPerBlock, float *d_AgentsX, float *d_MoveForceX, float *d_AgentsY, float *d_MoveForceY){ mAdd<<<blocksPerGrid, threadsPerBlock>>> (d_AgentsX, d_MoveForceX, d_AgentsY, d_MoveForceY); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); } void kernel_wtg(int blocksPerGrid, int threadsPerBlock, float *d_DestX, float *d_DestY, float *d_DestR, float *d_AgentsX, float *d_AgentsY, float *d_MoveForceX, float *d_MoveForceY, int *d_CurrWay, float *d_WaypointX, float *d_WaypointY, float *d_WaypointR, int *d_NumberOfWaypoints){ mComputeDirection<<<blocksPerGrid, threadsPerBlock>>>(d_DestX, d_DestY, d_DestR, d_AgentsX, d_AgentsY, d_MoveForceX, d_MoveForceY, d_CurrWay, d_WaypointX, d_WaypointY, d_WaypointR, d_NumberOfWaypoints); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); }
f48239f246ade68104b139dd12830a8050196a10.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> bool allocate (void **ptr, size_t size){ // since hipMalloc accepts double pointer, casting is required. hipError_t stat = hipMalloc(ptr, size); if (stat == hipSuccess) return true; // if no success, print the error std::cout << "allocation stat: " << stat << std::endl; return false; } /* enum hipMemcpyKind: hipMemcpyHostToHost = 0 hipMemcpyHostToDevice = 1 hipMemcpyDeviceToHost = 2 hipMemcpyDeviceToDevice = 3 hipMemcpyDefault = 4 */ bool copy(void *dst, void *src, size_t size, int cudaCpyKind){ hipMemcpyKind dir; switch (cudaCpyKind) { case 0: dir = hipMemcpyHostToHost; break; case 1: dir = hipMemcpyHostToDevice; break; case 2: dir = hipMemcpyDeviceToHost; break; case 3: dir = hipMemcpyDeviceToDevice; break; default: dir = hipMemcpyHostToHost; break; } hipError_t stat = hipMemcpy(dst, src, size, dir); if (stat == hipSuccess) return true; // if no success, print the error std::cout << "copy stat: " << stat << std::endl; return false; } void release(void *ptr){ hipFree(ptr);}
f48239f246ade68104b139dd12830a8050196a10.cu
#include <cstdlib> #include <cstdio> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> bool allocate (void **ptr, size_t size){ // since cudaMalloc accepts double pointer, casting is required. cudaError_t stat = cudaMalloc(ptr, size); if (stat == cudaSuccess) return true; // if no success, print the error std::cout << "allocation stat: " << stat << std::endl; return false; } /* enum cudaMemcpyKind: cudaMemcpyHostToHost = 0 cudaMemcpyHostToDevice = 1 cudaMemcpyDeviceToHost = 2 cudaMemcpyDeviceToDevice = 3 cudaMemcpyDefault = 4 */ bool copy(void *dst, void *src, size_t size, int cudaCpyKind){ cudaMemcpyKind dir; switch (cudaCpyKind) { case 0: dir = cudaMemcpyHostToHost; break; case 1: dir = cudaMemcpyHostToDevice; break; case 2: dir = cudaMemcpyDeviceToHost; break; case 3: dir = cudaMemcpyDeviceToDevice; break; default: dir = cudaMemcpyHostToHost; break; } cudaError_t stat = cudaMemcpy(dst, src, size, dir); if (stat == cudaSuccess) return true; // if no success, print the error std::cout << "copy stat: " << stat << std::endl; return false; } void release(void *ptr){ cudaFree(ptr);}
e63591bf1f5cba7d38fc0900fd8cb31d8dafa25e.hip
// !!! This is a file automatically generated by hipify!!! #define CODE_START "2015.12.20" #define CODE_AUTOHOR "Sli ron" #define CODE_NAME "regnb" #define CODE_VERSION "0.1" // includes system #include <cmath> #include <ctime> #include <iomanip> #include <iostream> #include <fstream> #include <memory> // includes CUDA #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" // includes project #include "integrator.h" #include "int_rungekutta8.h" #include "nbody.h" #include "options.h" #include "red_type.h" #include "red_constants.h" #include "redutilcu.h" using namespace std; using namespace redutilcu; string create_prefix(const options& opt) { static const char* integrator_type_short_name[] = { "E", "RK2", "RK4", "RK5", "RKF8", "RKN" }; string prefix; if (opt.ef) { char sep = '_'; string config; #ifdef _DEBUG config = "D"; #else config = "R"; #endif string dev = (opt.comp_dev == COMPUTING_DEVICE_CPU ? "cpu" : "gpu"); // as: adaptive step-size, fs: fix step-size string adapt = (opt.param->adaptive == true ? "as" : "fs"); // collision detection model string cdm; switch (opt.param->cdm) { case COLLISION_DETECTION_MODEL_STEP: // bs: between step cdm = "bs"; break; case COLLISION_DETECTION_MODEL_SUB_STEP: // bs: sub-step cdm = "ss"; break; case COLLISION_DETECTION_MODEL_INTERPOLATION: throw string("COLLISION_DETECTION_MODEL_INTERPOLATION is not implemented."); default: throw string("Parameter 'cdm' is out of range."); } string int_name(integrator_type_short_name[opt.param->int_type]); prefix += config + sep + dev + sep + cdm + sep + adapt + sep + int_name + sep; } return prefix; } void open_streams(const options& opt, ofstream** output) { string path; string prefix = create_prefix(opt); string ext = "txt"; path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_RESULT]) + "." + ext; output[OUTPUT_NAME_RESULT] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_RESULT]) { throw string("Cannot open " + path + "."); } path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INFO]) + "." + ext; output[OUTPUT_NAME_INFO] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_INFO]) { throw string("Cannot open " + path + "."); } path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_EVENT]) + "." + ext; output[OUTPUT_NAME_EVENT] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_EVENT]) { throw string("Cannot open " + path + "."); } path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_LOG]) + "." + ext; output[OUTPUT_NAME_LOG] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_LOG]) { throw string("Cannot open " + path + "."); } path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INTEGRAL]) + "." + ext; output[OUTPUT_NAME_INTEGRAL] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_INTEGRAL]) { throw string("Cannot open " + path + "."); } path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INTEGRAL_EVENT]) + "." + ext; output[OUTPUT_NAME_INTEGRAL_EVENT] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_INTEGRAL_EVENT]) { throw string("Cannot open " + path + "."); } } void print_info(ofstream& sout, const nbody* nbd, integrator *intgr, ttt_t dt, clock_t* T_CPU, clock_t* dT_CPU) { static const string header_str = "dev,date ,time ,t [d] ,dt [d] ,dt_avg [d] ,dT [s] ,dT_avg [s] ,Nc ,Ne ,Nh ,nb_p ,nb_i,nb_r ,ns_t ,ns_p ,ns_f ,ns_a,ns_r ,ngp_a,ngp_r,nrp_a,nrp_r,npp_a,npp_r,nspl_a,nspl_r,npl_a,npl_r,ntp_a,ntp_r"; static bool first_call = true; static string cvs = ","; cout.setf(ios::right); cout.setf(ios::scientific); sout.setf(ios::right); sout.setf(ios::scientific); number_of_bodies* nb = nbd->n_bodies; string dev = (intgr->get_computing_device() == COMPUTING_DEVICE_CPU ? "CPU" : "GPU"); cout << "[" << dev << "] " << tools::get_time_stamp(false) << " t: " << setprecision(4) << setw(10) << nbd->t / constants::Gauss << ", dt: " << setprecision(4) << setw(10) << dt / constants::Gauss << " (" << setprecision(4) << setw(10) << (nbd->t / constants::Gauss)/intgr->get_n_passed_step() << ") [d]"; cout << ", dT: " << setprecision(4) << setw(10) << *dT_CPU / (double)CLOCKS_PER_SEC; cout << " (" << setprecision(4) << setw(10) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << ") [s]"; cout << ", Nc: " << setw(5) << nbd->n_collision[ EVENT_COUNTER_NAME_TOTAL] << ", Ne: " << setw(5) << nbd->n_ejection[ EVENT_COUNTER_NAME_TOTAL] << ", Nh: " << setw(5) << nbd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL] << ", N : " << setw(6) << nb->get_n_total_playing() << "(" << setw(3) << nb->get_n_total_inactive() << ", " << setw(5) << nb->get_n_total_removed() << ")" << ", nt: " << setw(11) << intgr->get_n_tried_step() << ", np: " << setw(11) << intgr->get_n_passed_step() << ", nf: " << setw(11) << intgr->get_n_failed_step() << endl; if (first_call) { first_call = false; sout << header_str << endl; } sout << dev << cvs << tools::get_time_stamp(true) << cvs << setprecision(4) << nbd->t / constants::Gauss << cvs << setprecision(4) << dt / constants::Gauss << cvs << setprecision(4) << (nbd->t / constants::Gauss)/intgr->get_n_passed_step() << cvs; sout << setprecision(4) << (*dT_CPU / (double)CLOCKS_PER_SEC) << cvs; sout << setprecision(4) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << cvs; sout << nbd->n_collision[ EVENT_COUNTER_NAME_TOTAL] << cvs << nbd->n_ejection[ EVENT_COUNTER_NAME_TOTAL] << cvs << nbd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL] << cvs << nb->get_n_total_playing() << cvs << nb->get_n_total_inactive() << cvs << nb->get_n_total_removed() << cvs << intgr->get_n_tried_step() << cvs << intgr->get_n_passed_step() << cvs << intgr->get_n_failed_step() << cvs; for (int i = 0; i < BODY_TYPE_N; i++) { sout << nb->playing[i] - nb->inactive[i] << cvs << nb->removed[i] << (i < BODY_TYPE_TESTPARTICLE ? cvs : ""); } sout << endl; } void run_simulation(const options& opt, nbody* nbd, integrator* intgr, ofstream** output) { ttt_t ps = 0.0; ttt_t dt = 0.0; clock_t T_CPU = 0; clock_t dT_CPU = 0; time_t time_last_info = clock(); time_t time_last_dump = clock(); integral_t integrals[2]; uint32_t n_removed = 0; uint32_t n_dump = 1; nbd->print_result(*output[OUTPUT_NAME_RESULT], DATA_REPRESENTATION_ASCII); /* main cycle */ #if 1 while (nbd->t <= opt.param->stop_time && 1 < nbd->n_bodies->get_n_total_active()) { // make the integration step, and measure the time it takes clock_t T0_CPU = clock(); dt = intgr->step(); dT_CPU = (clock() - T0_CPU); T_CPU += dT_CPU; ps += fabs(dt); if (0.0 < opt.param->threshold[THRESHOLD_RADII_ENHANCE_FACTOR]) { integral_t I; bool collision = nbd->check_for_collision(); if (collision) { integral_t I; nbd->calc_integral(false, I); nbd->print_integral_data(I, *output[OUTPUT_NAME_INTEGRAL_EVENT]); { // Restore the state before the collision nbd->swap(); nbd->t -= dt; if (COMPUTING_DEVICE_GPU == opt.comp_dev) { nbd->copy_event_data_to_host(); } nbd->populate_sp_events(); for (uint32_t i = 0; i < nbd->sp_events.size(); i++) { // Create the subsystem containing the colliding bodies nbody* reg_tbp = nbd->create_reg_tbp(nbd->sp_events[i].idx1, nbd->sp_events[i].idx2); integrator* reg_int = new rungekutta8(reg_tbp, dt, true, 1.0e-15, opt.comp_dev); cout << "Subsystem with " << nbd->sp_events[i].id1 << " and " << nbd->sp_events[i].id2 << " was created." << endl; //uint32_t ictv_idx = nbd->sp_events[i] .idx2; //handle_collision_pair(i, &sp_events[i]); //increment_event_counter(n_collision); //// Make the merged body inactive //sim_data->h_body_md[ictv_idx].id *= -1; //// Copy it up to GPU //if (COMPUTING_DEVICE_GPU == comp_dev) //{ // copy_vector_to_device((void **)&sim_data->d_body_md[ictv_idx], (void **)&sim_data->h_body_md[ictv_idx], sizeof(body_metadata_t)); //} //// Update number of inactive bodies //n_bodies->inactive[sim_data->h_body_md[ictv_idx].body_type]++; } } //nbd->handle_collision(); //nbd->calc_integral(false, I); //nbd->print_integral_data(I, *output[OUTPUT_NAME_INTEGRAL_EVENT]); //nbd->print_event_data(*output[OUTPUT_NAME_EVENT], *output[OUTPUT_NAME_LOG]); nbd->clear_event_counter(); } } if (opt.param->output_interval <= fabs(ps)) { integral_t I; ps = 0.0; nbd->print_result(*output[OUTPUT_NAME_RESULT], DATA_REPRESENTATION_ASCII); nbd->calc_integral(false, I); nbd->print_integral_data(I, *output[OUTPUT_NAME_INTEGRAL]); } if (opt.info_dt < (clock() - time_last_info) / (double)CLOCKS_PER_SEC) { time_last_info = clock(); print_info(*output[OUTPUT_NAME_INFO], nbd, intgr, dt, &T_CPU, &dT_CPU); } } /* while */ #endif print_info(*output[OUTPUT_NAME_INFO], nbd, intgr, dt, &T_CPU, &dT_CPU); // To avoid duplicate save at the end of the simulation if (0.0 < ps) { ps = 0.0; nbd->print_result(*output[OUTPUT_NAME_RESULT], DATA_REPRESENTATION_ASCII); } } int main(int argc, const char** argv, const char** env) { time_t start = time(NULL); ofstream* output[OUTPUT_NAME_N]; memset(output, 0x0, sizeof(output)); try { options opt = options(argc, argv); open_streams(opt, output); file::log_start(*output[OUTPUT_NAME_LOG], argc, argv, env, opt.param->cdm, opt.print_to_screen); nbody *nbd = opt.create_nbody(); ttt_t dt = 0.1; // [day] integrator *intgr = opt.create_integrator(nbd, dt); run_simulation(opt, nbd, intgr, output); // Needed by nvprof.exe if (COMPUTING_DEVICE_GPU == nbd->get_computing_device()) { hipDeviceReset(); } } /* try */ catch (const string& msg) { if (0x0 != output[OUTPUT_NAME_LOG]) { file::log_message(*output[OUTPUT_NAME_LOG], "Error: " + msg, false); } cerr << "Error: " << msg << endl; } if (0x0 != output[OUTPUT_NAME_LOG]) { file::log_message(*output[OUTPUT_NAME_LOG], "Total time: " + tools::convert_time_t(time(NULL) - start) + " s", false); } cout << "Total time: " << time(NULL) - start << " s" << endl; return (EXIT_SUCCESS); }
e63591bf1f5cba7d38fc0900fd8cb31d8dafa25e.cu
#define CODE_START "2015.12.20" #define CODE_AUTOHOR "Süli Áron" #define CODE_NAME "regnb" #define CODE_VERSION "0.1" // includes system #include <cmath> #include <ctime> #include <iomanip> #include <iostream> #include <fstream> #include <memory> // includes CUDA #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" // includes project #include "integrator.h" #include "int_rungekutta8.h" #include "nbody.h" #include "options.h" #include "red_type.h" #include "red_constants.h" #include "redutilcu.h" using namespace std; using namespace redutilcu; string create_prefix(const options& opt) { static const char* integrator_type_short_name[] = { "E", "RK2", "RK4", "RK5", "RKF8", "RKN" }; string prefix; if (opt.ef) { char sep = '_'; string config; #ifdef _DEBUG config = "D"; #else config = "R"; #endif string dev = (opt.comp_dev == COMPUTING_DEVICE_CPU ? "cpu" : "gpu"); // as: adaptive step-size, fs: fix step-size string adapt = (opt.param->adaptive == true ? "as" : "fs"); // collision detection model string cdm; switch (opt.param->cdm) { case COLLISION_DETECTION_MODEL_STEP: // bs: between step cdm = "bs"; break; case COLLISION_DETECTION_MODEL_SUB_STEP: // bs: sub-step cdm = "ss"; break; case COLLISION_DETECTION_MODEL_INTERPOLATION: throw string("COLLISION_DETECTION_MODEL_INTERPOLATION is not implemented."); default: throw string("Parameter 'cdm' is out of range."); } string int_name(integrator_type_short_name[opt.param->int_type]); prefix += config + sep + dev + sep + cdm + sep + adapt + sep + int_name + sep; } return prefix; } void open_streams(const options& opt, ofstream** output) { string path; string prefix = create_prefix(opt); string ext = "txt"; path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_RESULT]) + "." + ext; output[OUTPUT_NAME_RESULT] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_RESULT]) { throw string("Cannot open " + path + "."); } path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INFO]) + "." + ext; output[OUTPUT_NAME_INFO] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_INFO]) { throw string("Cannot open " + path + "."); } path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_EVENT]) + "." + ext; output[OUTPUT_NAME_EVENT] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_EVENT]) { throw string("Cannot open " + path + "."); } path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_LOG]) + "." + ext; output[OUTPUT_NAME_LOG] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_LOG]) { throw string("Cannot open " + path + "."); } path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INTEGRAL]) + "." + ext; output[OUTPUT_NAME_INTEGRAL] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_INTEGRAL]) { throw string("Cannot open " + path + "."); } path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INTEGRAL_EVENT]) + "." + ext; output[OUTPUT_NAME_INTEGRAL_EVENT] = new ofstream(path.c_str(), ios::out); if (!*output[OUTPUT_NAME_INTEGRAL_EVENT]) { throw string("Cannot open " + path + "."); } } void print_info(ofstream& sout, const nbody* nbd, integrator *intgr, ttt_t dt, clock_t* T_CPU, clock_t* dT_CPU) { static const string header_str = "dev,date ,time ,t [d] ,dt [d] ,dt_avg [d] ,dT [s] ,dT_avg [s] ,Nc ,Ne ,Nh ,nb_p ,nb_i,nb_r ,ns_t ,ns_p ,ns_f ,ns_a,ns_r ,ngp_a,ngp_r,nrp_a,nrp_r,npp_a,npp_r,nspl_a,nspl_r,npl_a,npl_r,ntp_a,ntp_r"; static bool first_call = true; static string cvs = ","; cout.setf(ios::right); cout.setf(ios::scientific); sout.setf(ios::right); sout.setf(ios::scientific); number_of_bodies* nb = nbd->n_bodies; string dev = (intgr->get_computing_device() == COMPUTING_DEVICE_CPU ? "CPU" : "GPU"); cout << "[" << dev << "] " << tools::get_time_stamp(false) << " t: " << setprecision(4) << setw(10) << nbd->t / constants::Gauss << ", dt: " << setprecision(4) << setw(10) << dt / constants::Gauss << " (" << setprecision(4) << setw(10) << (nbd->t / constants::Gauss)/intgr->get_n_passed_step() << ") [d]"; cout << ", dT: " << setprecision(4) << setw(10) << *dT_CPU / (double)CLOCKS_PER_SEC; cout << " (" << setprecision(4) << setw(10) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << ") [s]"; cout << ", Nc: " << setw(5) << nbd->n_collision[ EVENT_COUNTER_NAME_TOTAL] << ", Ne: " << setw(5) << nbd->n_ejection[ EVENT_COUNTER_NAME_TOTAL] << ", Nh: " << setw(5) << nbd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL] << ", N : " << setw(6) << nb->get_n_total_playing() << "(" << setw(3) << nb->get_n_total_inactive() << ", " << setw(5) << nb->get_n_total_removed() << ")" << ", nt: " << setw(11) << intgr->get_n_tried_step() << ", np: " << setw(11) << intgr->get_n_passed_step() << ", nf: " << setw(11) << intgr->get_n_failed_step() << endl; if (first_call) { first_call = false; sout << header_str << endl; } sout << dev << cvs << tools::get_time_stamp(true) << cvs << setprecision(4) << nbd->t / constants::Gauss << cvs << setprecision(4) << dt / constants::Gauss << cvs << setprecision(4) << (nbd->t / constants::Gauss)/intgr->get_n_passed_step() << cvs; sout << setprecision(4) << (*dT_CPU / (double)CLOCKS_PER_SEC) << cvs; sout << setprecision(4) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << cvs; sout << nbd->n_collision[ EVENT_COUNTER_NAME_TOTAL] << cvs << nbd->n_ejection[ EVENT_COUNTER_NAME_TOTAL] << cvs << nbd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL] << cvs << nb->get_n_total_playing() << cvs << nb->get_n_total_inactive() << cvs << nb->get_n_total_removed() << cvs << intgr->get_n_tried_step() << cvs << intgr->get_n_passed_step() << cvs << intgr->get_n_failed_step() << cvs; for (int i = 0; i < BODY_TYPE_N; i++) { sout << nb->playing[i] - nb->inactive[i] << cvs << nb->removed[i] << (i < BODY_TYPE_TESTPARTICLE ? cvs : ""); } sout << endl; } void run_simulation(const options& opt, nbody* nbd, integrator* intgr, ofstream** output) { ttt_t ps = 0.0; ttt_t dt = 0.0; clock_t T_CPU = 0; clock_t dT_CPU = 0; time_t time_last_info = clock(); time_t time_last_dump = clock(); integral_t integrals[2]; uint32_t n_removed = 0; uint32_t n_dump = 1; nbd->print_result(*output[OUTPUT_NAME_RESULT], DATA_REPRESENTATION_ASCII); /* main cycle */ #if 1 while (nbd->t <= opt.param->stop_time && 1 < nbd->n_bodies->get_n_total_active()) { // make the integration step, and measure the time it takes clock_t T0_CPU = clock(); dt = intgr->step(); dT_CPU = (clock() - T0_CPU); T_CPU += dT_CPU; ps += fabs(dt); if (0.0 < opt.param->threshold[THRESHOLD_RADII_ENHANCE_FACTOR]) { integral_t I; bool collision = nbd->check_for_collision(); if (collision) { integral_t I; nbd->calc_integral(false, I); nbd->print_integral_data(I, *output[OUTPUT_NAME_INTEGRAL_EVENT]); { // Restore the state before the collision nbd->swap(); nbd->t -= dt; if (COMPUTING_DEVICE_GPU == opt.comp_dev) { nbd->copy_event_data_to_host(); } nbd->populate_sp_events(); for (uint32_t i = 0; i < nbd->sp_events.size(); i++) { // Create the subsystem containing the colliding bodies nbody* reg_tbp = nbd->create_reg_tbp(nbd->sp_events[i].idx1, nbd->sp_events[i].idx2); integrator* reg_int = new rungekutta8(reg_tbp, dt, true, 1.0e-15, opt.comp_dev); cout << "Subsystem with " << nbd->sp_events[i].id1 << " and " << nbd->sp_events[i].id2 << " was created." << endl; //uint32_t ictv_idx = nbd->sp_events[i] .idx2; //handle_collision_pair(i, &sp_events[i]); //increment_event_counter(n_collision); //// Make the merged body inactive //sim_data->h_body_md[ictv_idx].id *= -1; //// Copy it up to GPU //if (COMPUTING_DEVICE_GPU == comp_dev) //{ // copy_vector_to_device((void **)&sim_data->d_body_md[ictv_idx], (void **)&sim_data->h_body_md[ictv_idx], sizeof(body_metadata_t)); //} //// Update number of inactive bodies //n_bodies->inactive[sim_data->h_body_md[ictv_idx].body_type]++; } } //nbd->handle_collision(); //nbd->calc_integral(false, I); //nbd->print_integral_data(I, *output[OUTPUT_NAME_INTEGRAL_EVENT]); //nbd->print_event_data(*output[OUTPUT_NAME_EVENT], *output[OUTPUT_NAME_LOG]); nbd->clear_event_counter(); } } if (opt.param->output_interval <= fabs(ps)) { integral_t I; ps = 0.0; nbd->print_result(*output[OUTPUT_NAME_RESULT], DATA_REPRESENTATION_ASCII); nbd->calc_integral(false, I); nbd->print_integral_data(I, *output[OUTPUT_NAME_INTEGRAL]); } if (opt.info_dt < (clock() - time_last_info) / (double)CLOCKS_PER_SEC) { time_last_info = clock(); print_info(*output[OUTPUT_NAME_INFO], nbd, intgr, dt, &T_CPU, &dT_CPU); } } /* while */ #endif print_info(*output[OUTPUT_NAME_INFO], nbd, intgr, dt, &T_CPU, &dT_CPU); // To avoid duplicate save at the end of the simulation if (0.0 < ps) { ps = 0.0; nbd->print_result(*output[OUTPUT_NAME_RESULT], DATA_REPRESENTATION_ASCII); } } int main(int argc, const char** argv, const char** env) { time_t start = time(NULL); ofstream* output[OUTPUT_NAME_N]; memset(output, 0x0, sizeof(output)); try { options opt = options(argc, argv); open_streams(opt, output); file::log_start(*output[OUTPUT_NAME_LOG], argc, argv, env, opt.param->cdm, opt.print_to_screen); nbody *nbd = opt.create_nbody(); ttt_t dt = 0.1; // [day] integrator *intgr = opt.create_integrator(nbd, dt); run_simulation(opt, nbd, intgr, output); // Needed by nvprof.exe if (COMPUTING_DEVICE_GPU == nbd->get_computing_device()) { cudaDeviceReset(); } } /* try */ catch (const string& msg) { if (0x0 != output[OUTPUT_NAME_LOG]) { file::log_message(*output[OUTPUT_NAME_LOG], "Error: " + msg, false); } cerr << "Error: " << msg << endl; } if (0x0 != output[OUTPUT_NAME_LOG]) { file::log_message(*output[OUTPUT_NAME_LOG], "Total time: " + tools::convert_time_t(time(NULL) - start) + " s", false); } cout << "Total time: " << time(NULL) - start << " s" << endl; return (EXIT_SUCCESS); }
d1123e9df23c61d1fde66fea1e3e43bd3a0bb29e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<cuda.h> using namespace std; __global__ void add(int *a,const int *b){ int i=blockIdx.x; a[i]+=b[i]; } int main(){ const int N=10; int *a,*b,*temp; temp=new int[N]; hipMalloc(&a,N*sizeof(int)); hipMalloc(&b,N*sizeof(int)); for(int i=0;i<N;i++) temp[i]=i; hipMemcpy(a,temp,N*sizeof(int),hipMemcpyHostToDevice); for(int i=0;i<N;i++) temp[i]=2*i; hipMemcpy(b,temp,N*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, a,b); hipMemcpy(temp,a,N*sizeof(int),hipMemcpyDeviceToHost); for(int i=0;i<N;i++) cout<<temp[i]<<endl; delete[] temp; hipFree(a); hipFree(b); }
d1123e9df23c61d1fde66fea1e3e43bd3a0bb29e.cu
#include<iostream> #include<cuda.h> using namespace std; __global__ void add(int *a,const int *b){ int i=blockIdx.x; a[i]+=b[i]; } int main(){ const int N=10; int *a,*b,*temp; temp=new int[N]; cudaMalloc(&a,N*sizeof(int)); cudaMalloc(&b,N*sizeof(int)); for(int i=0;i<N;i++) temp[i]=i; cudaMemcpy(a,temp,N*sizeof(int),cudaMemcpyHostToDevice); for(int i=0;i<N;i++) temp[i]=2*i; cudaMemcpy(b,temp,N*sizeof(int),cudaMemcpyHostToDevice); add<<<N,1>>>(a,b); cudaMemcpy(temp,a,N*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<N;i++) cout<<temp[i]<<endl; delete[] temp; cudaFree(a); cudaFree(b); }
891f6390c39776cd59a78966ccc4fb2bea4a7f34.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #include <chrono> void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "CUDA Error: %s: %s.\n", msg, hipGetErrorString(err) ); exit(EXIT_FAILURE); } } #define BLOCKSIZE 1024 __global__ void reduce(unsigned int* dVec, unsigned int* dAux, size_t N) { __shared__ unsigned int sdata[BLOCKSIZE]; size_t tid = threadIdx.x; size_t i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = dVec[i]; __syncthreads(); for(size_t s = 1; s < blockDim.x; s *= 2) { if (tid % (s*2) == 0) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid == 0) dAux[blockIdx.x] = sdata[0]; } int main(int argc, char** argv) { unsigned int *vec; unsigned int *dVec, *dAux; size_t N0 = 32768; size_t N = N0*N0; vec = (unsigned int*) malloc (sizeof(unsigned int)*N); for (size_t i = 0; i < N; i++) vec[i] = i; hipMalloc(&dVec, sizeof(unsigned int)*N); checkCUDAError("Error allocating dVec"); hipMalloc(&dAux, sizeof(unsigned int)*N); checkCUDAError("Error allocating dAux"); hipMemcpy(dVec, vec, sizeof(unsigned int)*N, hipMemcpyHostToDevice); checkCUDAError("Error copying vec"); auto startTime = std::chrono::system_clock::now(); for (size_t n = N; n > 1; n = n / BLOCKSIZE) { size_t bSize = BLOCKSIZE; if (bSize > n) bSize = n; size_t gSize = ceil((double)n / (double)BLOCKSIZE); if (bSize > n) gSize = 1; printf("bSize: %lu - gSize: %lu\n", bSize, gSize); hipLaunchKernelGGL(( reduce), dim3(gSize), dim3(bSize), 0, 0, dVec, dAux, n); checkCUDAError("Failed Kernel Launch"); unsigned int *tmp = dVec; dVec = dAux; dAux = tmp; } hipDeviceSynchronize(); auto endTime = std::chrono::system_clock::now(); unsigned int result = 0.0; hipMemcpy(&result, dVec, sizeof(unsigned int), hipMemcpyDeviceToHost); checkCUDAError("Error getting result"); printf("[GPU] Result: %u - Elapsed Time: %fs\n", result, std::chrono::duration<double>(endTime-startTime).count()); return 0; }
891f6390c39776cd59a78966ccc4fb2bea4a7f34.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #include <chrono> void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "CUDA Error: %s: %s.\n", msg, cudaGetErrorString(err) ); exit(EXIT_FAILURE); } } #define BLOCKSIZE 1024 __global__ void reduce(unsigned int* dVec, unsigned int* dAux, size_t N) { __shared__ unsigned int sdata[BLOCKSIZE]; size_t tid = threadIdx.x; size_t i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = dVec[i]; __syncthreads(); for(size_t s = 1; s < blockDim.x; s *= 2) { if (tid % (s*2) == 0) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid == 0) dAux[blockIdx.x] = sdata[0]; } int main(int argc, char** argv) { unsigned int *vec; unsigned int *dVec, *dAux; size_t N0 = 32768; size_t N = N0*N0; vec = (unsigned int*) malloc (sizeof(unsigned int)*N); for (size_t i = 0; i < N; i++) vec[i] = i; cudaMalloc(&dVec, sizeof(unsigned int)*N); checkCUDAError("Error allocating dVec"); cudaMalloc(&dAux, sizeof(unsigned int)*N); checkCUDAError("Error allocating dAux"); cudaMemcpy(dVec, vec, sizeof(unsigned int)*N, cudaMemcpyHostToDevice); checkCUDAError("Error copying vec"); auto startTime = std::chrono::system_clock::now(); for (size_t n = N; n > 1; n = n / BLOCKSIZE) { size_t bSize = BLOCKSIZE; if (bSize > n) bSize = n; size_t gSize = ceil((double)n / (double)BLOCKSIZE); if (bSize > n) gSize = 1; printf("bSize: %lu - gSize: %lu\n", bSize, gSize); reduce<<<gSize, bSize>>>(dVec, dAux, n); checkCUDAError("Failed Kernel Launch"); unsigned int *tmp = dVec; dVec = dAux; dAux = tmp; } cudaDeviceSynchronize(); auto endTime = std::chrono::system_clock::now(); unsigned int result = 0.0; cudaMemcpy(&result, dVec, sizeof(unsigned int), cudaMemcpyDeviceToHost); checkCUDAError("Error getting result"); printf("[GPU] Result: %u - Elapsed Time: %fs\n", result, std::chrono::duration<double>(endTime-startTime).count()); return 0; }
f0983ea03236244be4cd8e105908351fa5acbc8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <algorithm> #include <numeric> #include <string> #include <fstream> #include <cassert> #include <vector> #include <cmath> #include "timer.hpp" typedef float value_type; typedef std::size_t size_type; static const int moment_block_x = 256; static const int diffusion_block_x = 16; static const int diffusion_block_y = 16; __global__ void diffusion_kernel(value_type * rho_out, value_type const * rho, value_type fac, int N) { __shared__ value_type rho_loc[(diffusion_block_x+2)*(diffusion_block_y+2)]; int const gj = blockIdx.x*blockDim.x + threadIdx.x; int const gi = blockIdx.y*blockDim.y + threadIdx.y; int const lN = diffusion_block_y+2; int const lj = threadIdx.x + 1; int const li = threadIdx.y + 1; if(gi < N && gj < N) { // Load the bulk rho_loc[li*lN + lj] = rho[gi*N + gj]; // Load the ghost cells if(threadIdx.y == 0) { rho_loc[(li-1)*lN + lj] = (gi == 0 ? 0 : rho[(gi-1)*N + gj]); } if(threadIdx.y == blockDim.y-1) { rho_loc[(lN-1)*lN + lj] = (gi == N-1 ? 0 : rho[(gi+1)*N + gj]); } if(threadIdx.x == 0) { rho_loc[li*lN + lj-1] = (gj == 0 ? 0 : rho[gi*N + gj-1]); } if(threadIdx.x == blockDim.x-1) { rho_loc[li*lN + lN-1] = (gj == N-1 ? 0 : rho[gi*N + gj+1]); } } __syncthreads(); if(gi < N && gj < N) { rho_out[gi*N + gj] = rho_loc[li*lN + lj] + fac * ( rho_loc[li*lN + (lj+1)] + rho_loc[li*lN + (lj-1)] + rho_loc[(li+1)*lN + lj] + rho_loc[(li-1)*lN + lj] - 4*rho_loc[li*lN + lj] ); } } __global__ void get_moment_kernel(value_type * result, value_type const * rho, value_type rmin, value_type dr, int N) { __shared__ value_type partial_sums[256]; partial_sums[threadIdx.x] = 0; int j = blockIdx.x * blockDim.x + threadIdx.x; if(j < N) { value_type x = j*dr + rmin; // Note: One could improve this loop by replacing it with another // parallel reduction along this direction. We do not bother, // because this kernel is not the bottleneck of our simulation. for(int i=0; i < N; ++i) { value_type y = i*dr + rmin; partial_sums[threadIdx.x] += rho[i*N+j] * (x*x + y*y); // The index i (instead of j) of this loop may seem funny because // it jumps in strides of N. However, contrary to locality the CPU, // locality on the GPU means neighboring threads (threadIdx.x) // access neighboring memory locations. } } __syncthreads(); // Simple reduction if(threadIdx.x < 128) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+128]; __syncthreads(); if(threadIdx.x < 64) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+64]; __syncthreads(); if(threadIdx.x < 32) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+32]; __syncthreads(); if(threadIdx.x < 16) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+16]; __syncthreads(); if(threadIdx.x < 8) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+8]; __syncthreads(); if(threadIdx.x < 4) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+4]; __syncthreads(); if(threadIdx.x < 2) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+2]; __syncthreads(); if(threadIdx.x < 1) { partial_sums[threadIdx.x] += partial_sums[threadIdx.x+1]; result[blockIdx.x] = dr*dr*partial_sums[threadIdx.x]; } } class Diffusion2D { public: Diffusion2D( const value_type D, const value_type rmax, const value_type rmin, const size_type N ) : D_(D) , rmax_(rmax) , rmin_(rmin) , N_(N) , N_tot(N*N) , d_rho_(0) , d_rho_tmp_(0) { N_tot = N_*N_; /// real space grid spacing dr_ = (rmax_ - rmin_) / (N_ - 1); /// dt < dx*dx / (4*D) for stability dt_ = dr_ * dr_ / (6 * D_); /// stencil factor fac_ = dt_ * D_ / (dr_ * dr_); // Allocate memory on Device hipMalloc(&d_rho_, N_tot*sizeof(value_type)); hipMalloc(&d_rho_tmp_, N_tot*sizeof(value_type)); hipMalloc(&d_moment_, (N/moment_block_x) * sizeof(value_type) ); hipMemset(d_rho_,0,N_tot); hipMemset(d_rho_tmp_,0,N_tot); InitializeSystem(); } ~Diffusion2D() { hipFree(d_moment_); hipFree(d_rho_tmp_); hipFree(d_rho_); } void PropagateDensity(int steps); value_type GetMoment() const { int const blocks = (N_+moment_block_x-1)/moment_block_x; std::vector<value_type> moment(blocks); hipLaunchKernelGGL(( get_moment_kernel), dim3(blocks),dim3(moment_block_x), 0, 0, d_moment_, d_rho_, rmin_, dr_, N_); hipMemcpy(&moment[0], d_moment_, blocks * sizeof(value_type), hipMemcpyDeviceToHost); return std::accumulate(moment.begin(),moment.end(),0.0); } value_type GetTime() const {return time_;} void WriteDensity(const std::string file_name) const; private: void InitializeSystem(); const value_type D_, rmax_, rmin_; const size_type N_; size_type N_tot; value_type dr_, dt_, fac_; value_type time_; value_type *d_rho_, *d_rho_tmp_; value_type *d_moment_; }; void Diffusion2D::WriteDensity(const std::string file_name) const { // Get data from device std::vector<value_type> rho(N_*N_); hipMemcpy(&rho[0], d_rho_, rho.size() * sizeof(value_type), hipMemcpyDeviceToHost); std::ofstream out_file; out_file.open(file_name.c_str(), std::ios::out); if(out_file.good()) { for(size_type i = 0; i < N_; ++i){ for(size_type j = 0; j < N_; ++j) out_file << (i*dr_+rmin_) << '\t' << (j*dr_+rmin_) << '\t' << rho[i*N_ + j] << "\n"; out_file << "\n"; } } out_file.close(); } void Diffusion2D::PropagateDensity(int steps) { using std::swap; /// Dirichlet boundaries; central differences in space, forward Euler /// in time dim3 block_size(diffusion_block_x,diffusion_block_y,1); dim3 grid_size((N_+diffusion_block_x-1)/diffusion_block_x,(N_+diffusion_block_y-1)/diffusion_block_y,1); // Round-up needed number of blocks (N/block_size) for(int s = 0; s < steps; ++s) { hipLaunchKernelGGL(( diffusion_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_rho_tmp_, d_rho_, fac_, N_); swap(d_rho_, d_rho_tmp_); time_ += dt_; } } void Diffusion2D::InitializeSystem() { std::vector<value_type> rho(N_*N_); time_ = 0; /// initialize rho(x,y,t=0) value_type bound = 1./2; for(size_type i = 0; i < N_; ++i){ for(size_type j = 0; j < N_; ++j){ if(::fabs(i*dr_+rmin_) < bound && ::fabs(j*dr_+rmin_) < bound){ rho[i*N_ + j] = 1; } else{ rho[i*N_ + j] = 0; } } } hipMemcpy(d_rho_, &rho[0], rho.size() * sizeof(value_type), hipMemcpyHostToDevice); } int main(int argc, char* argv[]) { if(argc != 2) { std::cerr << "usage: " << argv[0] << " <log2(size)>" << std::endl; return 1; } const value_type D = 1; const value_type tmax = 0.01; const value_type rmax = 1; const value_type rmin = -1; const size_type N_ = 1 << std::atoi(argv[1]); const int steps_between_measurements = 100; Diffusion2D System(D, rmax, rmin, N_); value_type time = 0; timer runtime; runtime.start(); while(time < tmax){ System.PropagateDensity(steps_between_measurements); time = System.GetTime(); value_type moment = System.GetMoment(); std::cout << time << '\t' << moment << std::endl; } runtime.stop(); double elapsed = runtime.get_timing(); std::cerr << argv[0] << "\t N=" <<N_ << "\t time=" << elapsed << "s" << std::endl; std::string density_file = "Density.dat"; System.WriteDensity(density_file); return 0; }
f0983ea03236244be4cd8e105908351fa5acbc8b.cu
#include <iostream> #include <algorithm> #include <numeric> #include <string> #include <fstream> #include <cassert> #include <vector> #include <cmath> #include "timer.hpp" typedef float value_type; typedef std::size_t size_type; static const int moment_block_x = 256; static const int diffusion_block_x = 16; static const int diffusion_block_y = 16; __global__ void diffusion_kernel(value_type * rho_out, value_type const * rho, value_type fac, int N) { __shared__ value_type rho_loc[(diffusion_block_x+2)*(diffusion_block_y+2)]; int const gj = blockIdx.x*blockDim.x + threadIdx.x; int const gi = blockIdx.y*blockDim.y + threadIdx.y; int const lN = diffusion_block_y+2; int const lj = threadIdx.x + 1; int const li = threadIdx.y + 1; if(gi < N && gj < N) { // Load the bulk rho_loc[li*lN + lj] = rho[gi*N + gj]; // Load the ghost cells if(threadIdx.y == 0) { rho_loc[(li-1)*lN + lj] = (gi == 0 ? 0 : rho[(gi-1)*N + gj]); } if(threadIdx.y == blockDim.y-1) { rho_loc[(lN-1)*lN + lj] = (gi == N-1 ? 0 : rho[(gi+1)*N + gj]); } if(threadIdx.x == 0) { rho_loc[li*lN + lj-1] = (gj == 0 ? 0 : rho[gi*N + gj-1]); } if(threadIdx.x == blockDim.x-1) { rho_loc[li*lN + lN-1] = (gj == N-1 ? 0 : rho[gi*N + gj+1]); } } __syncthreads(); if(gi < N && gj < N) { rho_out[gi*N + gj] = rho_loc[li*lN + lj] + fac * ( rho_loc[li*lN + (lj+1)] + rho_loc[li*lN + (lj-1)] + rho_loc[(li+1)*lN + lj] + rho_loc[(li-1)*lN + lj] - 4*rho_loc[li*lN + lj] ); } } __global__ void get_moment_kernel(value_type * result, value_type const * rho, value_type rmin, value_type dr, int N) { __shared__ value_type partial_sums[256]; partial_sums[threadIdx.x] = 0; int j = blockIdx.x * blockDim.x + threadIdx.x; if(j < N) { value_type x = j*dr + rmin; // Note: One could improve this loop by replacing it with another // parallel reduction along this direction. We do not bother, // because this kernel is not the bottleneck of our simulation. for(int i=0; i < N; ++i) { value_type y = i*dr + rmin; partial_sums[threadIdx.x] += rho[i*N+j] * (x*x + y*y); // The index i (instead of j) of this loop may seem funny because // it jumps in strides of N. However, contrary to locality the CPU, // locality on the GPU means neighboring threads (threadIdx.x) // access neighboring memory locations. } } __syncthreads(); // Simple reduction if(threadIdx.x < 128) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+128]; __syncthreads(); if(threadIdx.x < 64) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+64]; __syncthreads(); if(threadIdx.x < 32) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+32]; __syncthreads(); if(threadIdx.x < 16) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+16]; __syncthreads(); if(threadIdx.x < 8) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+8]; __syncthreads(); if(threadIdx.x < 4) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+4]; __syncthreads(); if(threadIdx.x < 2) partial_sums[threadIdx.x] += partial_sums[threadIdx.x+2]; __syncthreads(); if(threadIdx.x < 1) { partial_sums[threadIdx.x] += partial_sums[threadIdx.x+1]; result[blockIdx.x] = dr*dr*partial_sums[threadIdx.x]; } } class Diffusion2D { public: Diffusion2D( const value_type D, const value_type rmax, const value_type rmin, const size_type N ) : D_(D) , rmax_(rmax) , rmin_(rmin) , N_(N) , N_tot(N*N) , d_rho_(0) , d_rho_tmp_(0) { N_tot = N_*N_; /// real space grid spacing dr_ = (rmax_ - rmin_) / (N_ - 1); /// dt < dx*dx / (4*D) for stability dt_ = dr_ * dr_ / (6 * D_); /// stencil factor fac_ = dt_ * D_ / (dr_ * dr_); // Allocate memory on Device cudaMalloc(&d_rho_, N_tot*sizeof(value_type)); cudaMalloc(&d_rho_tmp_, N_tot*sizeof(value_type)); cudaMalloc(&d_moment_, (N/moment_block_x) * sizeof(value_type) ); cudaMemset(d_rho_,0,N_tot); cudaMemset(d_rho_tmp_,0,N_tot); InitializeSystem(); } ~Diffusion2D() { cudaFree(d_moment_); cudaFree(d_rho_tmp_); cudaFree(d_rho_); } void PropagateDensity(int steps); value_type GetMoment() const { int const blocks = (N_+moment_block_x-1)/moment_block_x; std::vector<value_type> moment(blocks); get_moment_kernel<<<blocks,moment_block_x>>>(d_moment_, d_rho_, rmin_, dr_, N_); cudaMemcpy(&moment[0], d_moment_, blocks * sizeof(value_type), cudaMemcpyDeviceToHost); return std::accumulate(moment.begin(),moment.end(),0.0); } value_type GetTime() const {return time_;} void WriteDensity(const std::string file_name) const; private: void InitializeSystem(); const value_type D_, rmax_, rmin_; const size_type N_; size_type N_tot; value_type dr_, dt_, fac_; value_type time_; value_type *d_rho_, *d_rho_tmp_; value_type *d_moment_; }; void Diffusion2D::WriteDensity(const std::string file_name) const { // Get data from device std::vector<value_type> rho(N_*N_); cudaMemcpy(&rho[0], d_rho_, rho.size() * sizeof(value_type), cudaMemcpyDeviceToHost); std::ofstream out_file; out_file.open(file_name.c_str(), std::ios::out); if(out_file.good()) { for(size_type i = 0; i < N_; ++i){ for(size_type j = 0; j < N_; ++j) out_file << (i*dr_+rmin_) << '\t' << (j*dr_+rmin_) << '\t' << rho[i*N_ + j] << "\n"; out_file << "\n"; } } out_file.close(); } void Diffusion2D::PropagateDensity(int steps) { using std::swap; /// Dirichlet boundaries; central differences in space, forward Euler /// in time dim3 block_size(diffusion_block_x,diffusion_block_y,1); dim3 grid_size((N_+diffusion_block_x-1)/diffusion_block_x,(N_+diffusion_block_y-1)/diffusion_block_y,1); // Round-up needed number of blocks (N/block_size) for(int s = 0; s < steps; ++s) { diffusion_kernel<<<grid_size, block_size>>>(d_rho_tmp_, d_rho_, fac_, N_); swap(d_rho_, d_rho_tmp_); time_ += dt_; } } void Diffusion2D::InitializeSystem() { std::vector<value_type> rho(N_*N_); time_ = 0; /// initialize rho(x,y,t=0) value_type bound = 1./2; for(size_type i = 0; i < N_; ++i){ for(size_type j = 0; j < N_; ++j){ if(std::fabs(i*dr_+rmin_) < bound && std::fabs(j*dr_+rmin_) < bound){ rho[i*N_ + j] = 1; } else{ rho[i*N_ + j] = 0; } } } cudaMemcpy(d_rho_, &rho[0], rho.size() * sizeof(value_type), cudaMemcpyHostToDevice); } int main(int argc, char* argv[]) { if(argc != 2) { std::cerr << "usage: " << argv[0] << " <log2(size)>" << std::endl; return 1; } const value_type D = 1; const value_type tmax = 0.01; const value_type rmax = 1; const value_type rmin = -1; const size_type N_ = 1 << std::atoi(argv[1]); const int steps_between_measurements = 100; Diffusion2D System(D, rmax, rmin, N_); value_type time = 0; timer runtime; runtime.start(); while(time < tmax){ System.PropagateDensity(steps_between_measurements); time = System.GetTime(); value_type moment = System.GetMoment(); std::cout << time << '\t' << moment << std::endl; } runtime.stop(); double elapsed = runtime.get_timing(); std::cerr << argv[0] << "\t N=" <<N_ << "\t time=" << elapsed << "s" << std::endl; std::string density_file = "Density.dat"; System.WriteDensity(density_file); return 0; }
0d2362f387ce8e66eb52b5bb193d08443d95296d.hip
// !!! This is a file automatically generated by hipify!!! // C++ #include <iostream> #include <string> // C #include <stdlib.h> #include <stdio.h> #include <math.h> #include <limits.h> // CUDA #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> // Force -Wall after this point, VC only (Check https://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html for GCC) #pragma warning(push,4) /******************************************************************************/ /*** 2-opt with random restarts ***********************************************/ /******************************************************************************/ #define dist(a, b) __float2int_rn(sqrtf((pos[a].x - pos[b].x) * (pos[a].x - pos[b].x) + (pos[a].y - pos[b].y) * (pos[a].y - pos[b].y))) #define swap(a, b) {float tmp = a; a = b; b = tmp;} static __device__ int climbs_d = 0; static __device__ int best_d = INT_MAX; static __device__ unsigned int restart_d = 0; enum ThreadBufferStatus {MORE_THREADS_THAN_BUFFER,EQUAL_SIZE,MORE_BUFFER_THAN_THREADS}; // Data structure used to hold position along path struct __align__(8) Data { float x,y; }; // // Returns a unique integer value with the intial value being 0 // // @return - Returns the next unique int static __device__ inline int nextInt() { __shared__ int next; if(threadIdx.x==0) { next = atomicAdd(&restart_d,1); }__syncthreads(); return next; } // Allocates and initializes my global memory and shared memory. // // @pos - An array that need to be initialized and will hold our path points // @weight - An array that need to be initialized and will hold our edge weights // @cities - The amount of points in our graph // // @return - Returns true if initialization was successful, false otherwise. template <int TileSize> static inline __device__ bool initMemory(const Data* &pos_d, Data* &pos, int * &weight, const int &cities) { __shared__ Data *d; __shared__ int *w; // Allocate my global memory if(threadIdx.x == 0 ) { d = new Data[cities + 1]; if(d != NULL) { w = new int[cities]; if(w == NULL) { printf("Could not allocated for weight"); delete d; d = NULL; } }else{ printf("Could not allocate for position"); } }__syncthreads(); if(d == NULL) { return false; } // Save new memory locations pos = d; weight = w; for (int i = threadIdx.x; i < cities; i += blockDim.x) pos[i] = pos_d[i]; __syncthreads(); return true; } // // Each thread gives some integer value, then the maximum of them is returned. // // @t_val - The number that the thread submits as a candidate for the maximum value // @cities - The number of cities. // // @return - The maximum value of t_val seen from all threads template <ThreadBufferStatus Status, int TileSize> static inline __device__ int maximum(int t_val, const int &cities, int* __restrict__ &w_buffer) { #if 0 // What happens if everyone writes to index 0 then atomicMin w_buffer[0] = t_val; __syncthreads(); atomicMin(w_buffer,t_val); __syncthreads(); #elif 0 // What happens if only thread 0 writes to index 0 then atomicMin if(threadIdx.x==0) w_buffer[0] = t_val; __syncthreads(); atomicMin(w_buffer,t_val); __syncthreads(); #elif 0 // What happens if everyone write to their own index then atomicMin on index 0? if(Status == MORE_THREADS_THAN_BUFFER) { w_buffer[threadIdx.x%TileSize] = t_val; // Serializes the writes }else{ w_buffer[threadIdx.x] = t_val; }__syncthreads(); atomicMin(w_buffer,t_val); __syncthreads(); #elif 0 // What happens if we atomicMin only when more threads than shared // to reduce number of values to test to fit in buffer? if(Status == MORE_THREADS_THAN_BUFFER) { w_buffer[threadIdx.x%TileSize] = t_val; __syncthreads(); atomicMin(threadIdx.x%TileSize + w_buffer,t_val); // serialized }else{ w_buffer[threadIdx.x] = t_val; }__syncthreads(); int upper = min(blockDim.x,min(TileSize,cities)); do { int offset = (upper + 1)/2; if( threadIdx.x < offset) { w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x + offset]); }__syncthreads(); upper = offset; }while( upper > 1); #elif 0 // What happens if we don't use atomicMin to reduce amount of values to test but // rather use a for-loop to reduce the values? if(Status == MORE_THREADS_THAN_BUFFER) { const int Index = threadIdx.x % TileSize; w_buffer[Index] = t_val; __syncthreads(); for(int i = 0 ; i <= blockDim.x/TileSize; ++i) { if(t_val < w_buffer[Index]) { w_buffer[Index] = t_val; } } }else{ w_buffer[threadIdx.x] = t_val; } int upper = min(blockDim.x,min(TileSize,cities)); do { __syncthreads(); int offset = (upper + 1)/2; if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } upper = offset; }while( upper > 1); __syncthreads(); #else // What happens if we try to template out everything and loop unroll? int upper = min(blockDim.x,min(TileSize,cities)); if(Status == MORE_THREADS_THAN_BUFFER) { const int Index = threadIdx.x % TileSize; w_buffer[Index] = t_val; __syncthreads(); for(int i = 0 ; i <= (blockDim.x/TileSize); ++i ) { if(t_val < w_buffer[Index]) { w_buffer[Index] = t_val; } } }else { w_buffer[threadIdx.x] = t_val; }__syncthreads(); // No if (TileSize > 512 && blockDim.x > 512) { int offset = (upper + 1) / 2; // 200 if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } }__syncthreads(); upper = offset; } // No if (TileSize > 256 && blockDim.x > 256) { int offset = (upper + 1) / 2; // 100 if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } }__syncthreads(); upper = offset; } // No if (TileSize > 128 && blockDim.x > 128) { int offset = (upper + 1) / 2; // 50 if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } }__syncthreads(); upper = offset; } // No if (TileSize > 64 && blockDim.x > 64) { int offset = (upper + 1) / 2; // 25 if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } }__syncthreads(); upper = offset; } // 64 and down if(threadIdx.x < 32) { // Yes. upper = 32. w_buffer[tid] = t_val = min(t_val,w_buffer[threadIdx.x + 16] if(TileSize > 32 && blockDim.x > 32) { int tmp = w_buffer[threadIdx.x + (upper+1)/2]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } if(threadIdx.x < 16) { int tmp = w_buffer[threadIdx.x + 16]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } if(threadIdx.x < 8) { int tmp = w_buffer[threadIdx.x + 8]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } if(threadIdx.x < 4) { int tmp = w_buffer[threadIdx.x + 4]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } if(threadIdx.x < 2) { int tmp = w_buffer[threadIdx.x + 2]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } if(threadIdx.x < 1) { int tmp = w_buffer[threadIdx.x + 1]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } }__syncthreads(); #endif return w_buffer[0]; } // // After we find the best four position to reconnect with all we need to // reverse the path between them. // // @start - The first position in the sub-path we have to swap with the end // @end - The last position in the path we have to swap with the start // @pos - The positions in our path // @weights - The edge weights between points static inline __device__ void reverse(int start, int end, Data* &pos, int* &weight) { while(start<end) { int w = weight[start]; Data d = pos[start]; weight[start] = weight[end-1]; pos[start] = pos[end]; weight[end-1] = w; pos[end] = d; start += blockDim.x; end -= blockDim.x; }__syncthreads(); } // // Perform a single iteration of Two-Opt // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @minchange - The current best change we can make // @mini - The ith city in the path that is part of the swap // @minj - The jth city in the path that is part of the swap // @cities - The number of cities along the path (excluding the end point) template <int TileSize> static __device__ void singleIter(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int &cities, float* __restrict__ x_buffer, float* __restrict__ y_buffer, int* __restrict__ w_buffer) { for (int ii = 0; ii < cities - 2; ii += blockDim.x) { int i = ii + threadIdx.x; float pxi0, pyi0, pxi1, pyi1, pxj1, pyj1; if (i < cities - 2) { minchange -= weight[i]; pxi0 = pos[i].x; pyi0 = pos[i].y; pxi1 = pos[i+1].x; pyi1 = pos[i+1].y; pxj1 = pos[0].x; pyj1 = pos[0].y; } for (int jj = cities - 1; jj >= ii + 2; jj -= TileSize) { int bound = jj - TileSize + 1; for(int k = threadIdx.x; k < TileSize; k += blockDim.x) { int index = k + bound; if (index >= (ii + 2)) { x_buffer[k] = pos[index].x; y_buffer[k] = pos[index].y; w_buffer[k] = weight[index]; } }__syncthreads(); int lower = bound; if (lower < i + 2) lower = i + 2; for (int j = jj; j >= lower; j--) { int jm = j - bound; float pxj0 = x_buffer[jm]; float pyj0 = y_buffer[jm]; int change = w_buffer[jm] + __float2int_rn(sqrtf((pxi0 - pxj0) * (pxi0 - pxj0) + (pyi0 - pyj0) * (pyi0 - pyj0))) + __float2int_rn(sqrtf((pxi1 - pxj1) * (pxi1 - pxj1) + (pyi1 - pyj1) * (pyi1 - pyj1))); pxj1 = pxj0; pyj1 = pyj0; if (minchange > change) { minchange = change; mini = i; minj = j; } }__syncthreads(); } if (i < cities - 2) { minchange += weight[i]; } } } // // Perform the swaps to the edges i and j to decrease the total length of our // path and update the weight and pos arrays appropriately. // // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @minchange - The current best change we can make // @mini - The ith city in the path that is part of the swap // @minj - The jth city in the path that is part of the swap // @cities - The number of cities along the path (excluding the end point) template <ThreadBufferStatus Status, int TileSize> static __device__ bool update(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int &cities, int* __restrict__ w_buffer) { maximum<Status,TileSize>(minchange, cities, w_buffer); if(w_buffer[0] >= 0) return false; while(w_buffer[0] < 0) { if (minchange == w_buffer[0]) { w_buffer[1] = threadIdx.x; }__syncthreads(); if(threadIdx.x==w_buffer[1]) { w_buffer[2] = mini; w_buffer[3] = minj; }__syncthreads(); int mi = w_buffer[2]; int mj = w_buffer[3]; if(!(minj < (mi - 1)) && !(mini > (mj + 1))) { minchange = 0; } // Fix path and weights reverse(mi+1+threadIdx.x,mj-threadIdx.x,pos,weight); // Fix connecting points weight[mi] = -dist(mi,mi+1); weight[mj] = -dist(mj,mj+1); __syncthreads(); maximum<Status,TileSize>(minchange, cities, w_buffer); } return true; } // // Given a path we randomly permute it into a new new path and then initialize the weights of the path. // // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @cities - The number of cities along the path (excluding the end point) static __device__ inline void permute(Data* &pos, int* &weight, const int &cities) { if (threadIdx.x == 0) { // serial permutation hiprandState_t rndstate; hiprand_init(blockIdx.x, 0, 0, &rndstate); for (int i = 1; i < cities; i++) { int j = hiprand(&rndstate) % (cities - 1) + 1; Data d = pos[i]; pos[i] = pos[j]; pos[j] = d; } pos[cities] = pos[0]; }__syncthreads(); for (int i = threadIdx.x; i < cities; i += blockDim.x) weight[i] = -dist(i, i + 1); __syncthreads(); } // // Releases memory and saves results // // @pos - Pointer to allocated path memory // @weight - Pointer to allocated edge weight memory // @local_climbs - The number of climbs performed by this block // @best_length - The best length this block found. static __device__ inline void cleanup(Data* &pos, int* &weight, int &local_climbs, int &best_length) { if (threadIdx.x == 0) { // Save data atomicAdd(&climbs_d,local_climbs); atomicMin(&best_d, best_length); // Release memory delete pos; delete weight; } } // // Perform iterative two-opt until there can be no more swaps to reduce the path length. // // @pos_d - The position of each point in the graph. // @cities - The number of vertices in the graph template <ThreadBufferStatus Status, int TileSize> static __global__ __launch_bounds__(1024, 2) void TwoOpt(const int Restarts, const Data *pos_d, const int cities) { Data *pos; int *weight; int local_climbs = 0; int best_length = INT_MAX; if( !initMemory<TileSize>(pos_d,pos,weight,cities) ) { if(threadIdx.x == 0) { printf("Memory initialization error for block %d\n", blockIdx.x); } return; } __shared__ float x_buffer[TileSize]; __shared__ float y_buffer[TileSize]; __shared__ int w_buffer[TileSize]; for(int r = nextInt() ; r < Restarts; r = nextInt()) { int mini,minj,minchange; permute(pos,weight,cities); do { ++local_climbs; minchange = mini = minj = 0; singleIter<TileSize>(pos, weight, minchange, mini, minj, cities, x_buffer, y_buffer, w_buffer); } while (update<Status,TileSize>(pos, weight, minchange, mini, minj, cities,w_buffer)); w_buffer[0] = 0; __syncthreads(); int term = 0; for (int i = threadIdx.x; i < cities; i += blockDim.x) { term += dist(i, i + 1); } atomicAdd(w_buffer,term); __syncthreads(); if(threadIdx.x==0) { if(w_buffer[0] < best_length) { best_length = w_buffer[0]; } } } cleanup(pos, weight, local_climbs, best_length); } // // Checks to see if an error occured with CUDA and if so prints out the message passed and the CUDA // error then quits the application. // // @msg - Message to print out if error occurs static void CudaTest(const char *msg) { hipError_t e; hipDeviceSynchronize(); if (hipSuccess != (e = hipGetLastError())) { fprintf(stderr, "%s: %d\n", msg, e); fprintf(stderr, "%s\n", hipGetErrorString(e)); exit(-1); } } #define mallocOnGPU(addr, size) if (hipSuccess != hipMalloc((void **)&addr, size)) fprintf(stderr, "could not allocate GPU memory\n"); CudaTest("couldn't allocate GPU memory"); #define copyToGPU(to, from, size) if (hipSuccess != hipMemcpy(to, from, size, hipMemcpyHostToDevice)) fprintf(stderr, "copying of data to device failed\n"); CudaTest("data copy to device failed"); // // Read TPS lib files into GPU memory. ATT and CEIL_2D edge weight types are not supported // // @fname - The name of the file to read the TSP data from // @pos_d - Pointer to the pointer that will hold data on GPU // and is modified here to be the address on the GPU // // @return - Returns the number of cities found static int readInput(const char *fname, Data **pos_d) { int ch, cnt, in1, cities; float in2, in3; FILE *f; Data *pos; char str[256]; // potential for buffer overrun f = fopen(fname, "rt"); if (f == NULL) {fprintf(stderr, "could not open file %s\n", fname); exit(-1);} ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f); if( fscanf(f, "%s\n", str) != 1 ) exit(-1); cities = atoi(str); if (cities <= 2) {fprintf(stderr, "only %d cities\n", cities); exit(-1);} pos = new Data[cities]; if (pos == NULL) {fprintf(stderr, "cannot allocate pos\n"); exit(-1);} ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); if(fscanf(f, "%s\n", str) != 1) exit(-1); if (strcmp(str, "NODE_COORD_SECTION") != 0) {fprintf(stderr, "wrong file format\n"); exit(-1);} cnt = 0; while (fscanf(f, "%d %f %f\n", &in1, &in2, &in3)) { pos[cnt].x = in2; pos[cnt].y = in3; ++cnt; if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);} if (cnt != in1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, in1); exit(-1);} } if (cnt != cities) {fprintf(stderr, "read %d instead of %d cities\n", cnt, cities); exit(-1);} if(fscanf(f, "%s", str) != 1) exit(-1); if (strcmp(str, "EOF") != 0) {fprintf(stderr, "didn't see 'EOF' at end of file\n"); exit(-1);} mallocOnGPU(*pos_d, sizeof(Data) * cities); copyToGPU(*pos_d, pos, sizeof(Data) * cities); fclose(f); delete (pos); return cities; } // // Given an enum value return it's string representation // // @status - The enum value to translate // // @return - The enums string representation in the source code static const std::string getName(const ThreadBufferStatus status) { switch(status) { case MORE_THREADS_THAN_BUFFER: return std::string("MORE_THREADS_THAN_BUFFER"); case EQUAL_SIZE: return std::string("EQUAL_SIZE"); case MORE_BUFFER_THAN_THREADS: return std::string("MORE_BUFFER_THAN_THREADS"); }; return std::string("enum value not found."); } // // Calculates the maximum number of resident blocks that the card can hold // // @Threads - Number of threads that each block will have // @Shared_Bytes - The amount of bytes each block will allocate // // @return - Returns the number of blocks the card can have resident static int getMaxBlocks(const int Shared_Bytes, const int Threads) { hipDeviceProp_t props; hipGetDeviceProperties(&props,0); if(props.major < 2) { const int Max_Shared = 16384; const int Block_Shared_Limit = (Max_Shared / Shared_Bytes); return props.multiProcessorCount * min(8,min(Block_Shared_Limit,(int)(2048/Threads))); }else if(props.major < 5) { const int Max_Shared = 32768; const int Block_Shared_Limit = (Max_Shared / Shared_Bytes); return props.multiProcessorCount * min(16,min(Block_Shared_Limit,(int)(2048/Threads))); }else { const int Max_Shared = 65536; const int Block_Shared_Limit = (Max_Shared / Shared_Bytes); return props.multiProcessorCount * min(32,min(Block_Shared_Limit,(int)(2048/Threads))); } } // // Given an integer returns the next multiple of 32 greater than or equal to it. // // @in - The integer to round to next multiple of 32 // // @return - Returns the next multiple of 32 that is greater than or equals to in static int next32(int in) { return ((in + 31) / 32 ) * 32; } // // Handle ThreadBufferStatus kernel selection // template <int TileSize> static float _wrapStatus(const int Restarts, const int Threads, const Data *Pos_d, const int Cities) { const int Shared_Bytes = (sizeof(int) + 2*sizeof(float)) * TileSize; const int Blocks = min(Restarts,getMaxBlocks(Shared_Bytes,Threads)); const ThreadBufferStatus Status = (Threads > TileSize) ? MORE_THREADS_THAN_BUFFER : (Threads < TileSize) ? MORE_BUFFER_THAN_THREADS : EQUAL_SIZE; float time; const int Device_Memory = (sizeof(int) + sizeof(Data)) * (Cities + 1)* 2*Blocks; hipDeviceSetLimit(hipLimitMallocHeapSize, Device_Memory); CudaTest("Change heap size"); // Output runtime configuration std::cout << "Blocks = " << Blocks << ", Threads = " << Threads << ", TileSize = " << TileSize << ", Status = " << getName(Status) << ", Shared Bytes = " << Shared_Bytes << ", Device Memory = " << Device_Memory/(1024.0f*1024.0f) << "MB" << std::endl; hipEvent_t begin,end; hipEventCreate(&begin); hipEventCreate(&end); hipDeviceSetCacheConfig(hipFuncCachePreferEqual); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeFourByte); switch(Status) { case MORE_THREADS_THAN_BUFFER: hipEventRecord(begin,0); hipLaunchKernelGGL(( TwoOpt<MORE_THREADS_THAN_BUFFER,TileSize>), dim3(Blocks),dim3(Threads), 0, 0, Restarts,Pos_d,Cities); CudaTest("Kernel Call"); hipEventRecord(end,0); hipEventSynchronize(end); break; case EQUAL_SIZE: hipEventRecord(begin,0); hipLaunchKernelGGL(( TwoOpt<EQUAL_SIZE,TileSize>), dim3(Blocks),dim3(Threads), 0, 0, Restarts,Pos_d,Cities); CudaTest("Kernel Call"); hipEventRecord(end,0); hipEventSynchronize(end); break; case MORE_BUFFER_THAN_THREADS: hipEventRecord(begin,0); hipLaunchKernelGGL(( TwoOpt<MORE_BUFFER_THAN_THREADS,TileSize>), dim3(Blocks),dim3(Threads), 0, 0, Restarts,Pos_d,Cities); CudaTest("Kernel Call"); hipEventRecord(end,0); hipEventSynchronize(end); break; }; hipEventElapsedTime(&time,begin,end); hipEventDestroy(begin); hipEventDestroy(end); return time; } // // Outer call to kernel selection // static float RunKernel(const int Cities, const Data *Pos, const int Restarts, const int Threads, const int TileSize) { switch(TileSize) { case 32: return _wrapStatus<32>(Restarts,Threads,Pos,Cities); case 64: return _wrapStatus<64>(Restarts,Threads,Pos,Cities); case 96: return _wrapStatus<96>(Restarts,Threads,Pos,Cities); case 128: return _wrapStatus<128>(Restarts,Threads,Pos,Cities); case 160: return _wrapStatus<160>(Restarts,Threads,Pos,Cities); case 192: return _wrapStatus<192>(Restarts,Threads,Pos,Cities); case 224: return _wrapStatus<224>(Restarts,Threads,Pos,Cities); case 256: return _wrapStatus<256>(Restarts,Threads,Pos,Cities); case 288: return _wrapStatus<288>(Restarts,Threads,Pos,Cities); case 320: return _wrapStatus<320>(Restarts,Threads,Pos,Cities); case 352: return _wrapStatus<352>(Restarts,Threads,Pos,Cities); case 384: return _wrapStatus<384>(Restarts,Threads,Pos,Cities); case 416: return _wrapStatus<416>(Restarts,Threads,Pos,Cities); case 448: return _wrapStatus<448>(Restarts,Threads,Pos,Cities); case 480: return _wrapStatus<480>(Restarts,Threads,Pos,Cities); case 512: return _wrapStatus<512>(Restarts,Threads,Pos,Cities); case 544: return _wrapStatus<544>(Restarts,Threads,Pos,Cities); case 576: return _wrapStatus<576>(Restarts,Threads,Pos,Cities); case 608: return _wrapStatus<608>(Restarts,Threads,Pos,Cities); case 640: return _wrapStatus<640>(Restarts,Threads,Pos,Cities); case 672: return _wrapStatus<672>(Restarts,Threads,Pos,Cities); case 704: return _wrapStatus<704>(Restarts,Threads,Pos,Cities); case 736: return _wrapStatus<736>(Restarts,Threads,Pos,Cities); case 768: return _wrapStatus<768>(Restarts,Threads,Pos,Cities); case 800: return _wrapStatus<800>(Restarts,Threads,Pos,Cities); case 832: return _wrapStatus<832>(Restarts,Threads,Pos,Cities); case 864: return _wrapStatus<864>(Restarts,Threads,Pos,Cities); case 896: return _wrapStatus<896>(Restarts,Threads,Pos,Cities); case 928: return _wrapStatus<928>(Restarts,Threads,Pos,Cities); case 960: return _wrapStatus<960>(Restarts,Threads,Pos,Cities); case 992: return _wrapStatus<992>(Restarts,Threads,Pos,Cities); case 1024: return _wrapStatus<1024>(Restarts,Threads,Pos,Cities); default: std::cout << "Invalid TileSize = " << TileSize << std::endl; exit(-1); }; return -1; } // // Main entry point to program. // int main(int argc, char *argv[]) { if (argc < 3 || argc > 5) {fprintf(stderr, "\narguments: input_file restart_count <threads> <tilesize> \n"); exit(-1);} const int Restarts = atoi(argv[2]); if (Restarts < 1) {fprintf(stderr, "restart_count is too small: %d\n", Restarts); exit(-1);} Data *pos_d; const int Cities = readInput(argv[1], &pos_d); printf("configuration: %d cities, %d restarts, %s input\n", Cities, Restarts, argv[1]); const int Threads = (argc >= 4) ? min(1024,next32(atoi(argv[3]))) : min(1024,next32(Cities)); const int TileSize = (argc >= 5) ? min( next32(atoi(argv[4])),1024) : Threads; const float time = RunKernel(Cities,pos_d,Restarts,Threads,TileSize); int hours = (int)(time / (3600.0f * 1000.0f)); int seconds = (int)(time/1000) % 60; int minutes = ((int)(time/1000) / 60) % 60; int climbs,best; hipMemcpyFromSymbol(&climbs,climbs_d,sizeof(int),0,hipMemcpyDeviceToHost); hipMemcpyFromSymbol(&best,best_d,sizeof(int),0,hipMemcpyDeviceToHost); long long moves = 1LL * climbs * (Cities - 2) * (Cities - 1) / 2; std::cout << moves * 0.000001 / time << "Gmoves/s" << std::endl; std::cout << "best found tour length = " << best << std::endl; std::cout << "Total Time : " << time / 1000.0f << "s" << std::endl; std::cout << "Hours = " << hours << ", Minutes = " << minutes << ", Seconds = " << seconds << ", Milliseconds = " << (int)(time) % 1000 << std::endl; hipDeviceReset(); hipFree(pos_d); return 0; }
0d2362f387ce8e66eb52b5bb193d08443d95296d.cu
// C++ #include <iostream> #include <string> // C #include <stdlib.h> #include <stdio.h> #include <math.h> #include <limits.h> // CUDA #include <cuda.h> #include <curand_kernel.h> // Force -Wall after this point, VC only (Check https://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html for GCC) #pragma warning(push,4) /******************************************************************************/ /*** 2-opt with random restarts ***********************************************/ /******************************************************************************/ #define dist(a, b) __float2int_rn(sqrtf((pos[a].x - pos[b].x) * (pos[a].x - pos[b].x) + (pos[a].y - pos[b].y) * (pos[a].y - pos[b].y))) #define swap(a, b) {float tmp = a; a = b; b = tmp;} static __device__ int climbs_d = 0; static __device__ int best_d = INT_MAX; static __device__ unsigned int restart_d = 0; enum ThreadBufferStatus {MORE_THREADS_THAN_BUFFER,EQUAL_SIZE,MORE_BUFFER_THAN_THREADS}; // Data structure used to hold position along path struct __align__(8) Data { float x,y; }; // // Returns a unique integer value with the intial value being 0 // // @return - Returns the next unique int static __device__ inline int nextInt() { __shared__ int next; if(threadIdx.x==0) { next = atomicAdd(&restart_d,1); }__syncthreads(); return next; } // Allocates and initializes my global memory and shared memory. // // @pos - An array that need to be initialized and will hold our path points // @weight - An array that need to be initialized and will hold our edge weights // @cities - The amount of points in our graph // // @return - Returns true if initialization was successful, false otherwise. template <int TileSize> static inline __device__ bool initMemory(const Data* &pos_d, Data* &pos, int * &weight, const int &cities) { __shared__ Data *d; __shared__ int *w; // Allocate my global memory if(threadIdx.x == 0 ) { d = new Data[cities + 1]; if(d != NULL) { w = new int[cities]; if(w == NULL) { printf("Could not allocated for weight"); delete d; d = NULL; } }else{ printf("Could not allocate for position"); } }__syncthreads(); if(d == NULL) { return false; } // Save new memory locations pos = d; weight = w; for (int i = threadIdx.x; i < cities; i += blockDim.x) pos[i] = pos_d[i]; __syncthreads(); return true; } // // Each thread gives some integer value, then the maximum of them is returned. // // @t_val - The number that the thread submits as a candidate for the maximum value // @cities - The number of cities. // // @return - The maximum value of t_val seen from all threads template <ThreadBufferStatus Status, int TileSize> static inline __device__ int maximum(int t_val, const int &cities, int* __restrict__ &w_buffer) { #if 0 // What happens if everyone writes to index 0 then atomicMin w_buffer[0] = t_val; __syncthreads(); atomicMin(w_buffer,t_val); __syncthreads(); #elif 0 // What happens if only thread 0 writes to index 0 then atomicMin if(threadIdx.x==0) w_buffer[0] = t_val; __syncthreads(); atomicMin(w_buffer,t_val); __syncthreads(); #elif 0 // What happens if everyone write to their own index then atomicMin on index 0? if(Status == MORE_THREADS_THAN_BUFFER) { w_buffer[threadIdx.x%TileSize] = t_val; // Serializes the writes }else{ w_buffer[threadIdx.x] = t_val; }__syncthreads(); atomicMin(w_buffer,t_val); __syncthreads(); #elif 0 // What happens if we atomicMin only when more threads than shared // to reduce number of values to test to fit in buffer? if(Status == MORE_THREADS_THAN_BUFFER) { w_buffer[threadIdx.x%TileSize] = t_val; __syncthreads(); atomicMin(threadIdx.x%TileSize + w_buffer,t_val); // serialized }else{ w_buffer[threadIdx.x] = t_val; }__syncthreads(); int upper = min(blockDim.x,min(TileSize,cities)); do { int offset = (upper + 1)/2; if( threadIdx.x < offset) { w_buffer[threadIdx.x] = t_val = min(t_val,w_buffer[threadIdx.x + offset]); }__syncthreads(); upper = offset; }while( upper > 1); #elif 0 // What happens if we don't use atomicMin to reduce amount of values to test but // rather use a for-loop to reduce the values? if(Status == MORE_THREADS_THAN_BUFFER) { const int Index = threadIdx.x % TileSize; w_buffer[Index] = t_val; __syncthreads(); for(int i = 0 ; i <= blockDim.x/TileSize; ++i) { if(t_val < w_buffer[Index]) { w_buffer[Index] = t_val; } } }else{ w_buffer[threadIdx.x] = t_val; } int upper = min(blockDim.x,min(TileSize,cities)); do { __syncthreads(); int offset = (upper + 1)/2; if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } upper = offset; }while( upper > 1); __syncthreads(); #else // What happens if we try to template out everything and loop unroll? int upper = min(blockDim.x,min(TileSize,cities)); if(Status == MORE_THREADS_THAN_BUFFER) { const int Index = threadIdx.x % TileSize; w_buffer[Index] = t_val; __syncthreads(); for(int i = 0 ; i <= (blockDim.x/TileSize); ++i ) { if(t_val < w_buffer[Index]) { w_buffer[Index] = t_val; } } }else { w_buffer[threadIdx.x] = t_val; }__syncthreads(); // No if (TileSize > 512 && blockDim.x > 512) { int offset = (upper + 1) / 2; // 200 if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } }__syncthreads(); upper = offset; } // No if (TileSize > 256 && blockDim.x > 256) { int offset = (upper + 1) / 2; // 100 if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } }__syncthreads(); upper = offset; } // No if (TileSize > 128 && blockDim.x > 128) { int offset = (upper + 1) / 2; // 50 if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } }__syncthreads(); upper = offset; } // No if (TileSize > 64 && blockDim.x > 64) { int offset = (upper + 1) / 2; // 25 if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } }__syncthreads(); upper = offset; } // 64 and down if(threadIdx.x < 32) { // Yes. upper = 32. w_buffer[tid] = t_val = min(t_val,w_buffer[threadIdx.x + 16] if(TileSize > 32 && blockDim.x > 32) { int tmp = w_buffer[threadIdx.x + (upper+1)/2]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } if(threadIdx.x < 16) { int tmp = w_buffer[threadIdx.x + 16]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } if(threadIdx.x < 8) { int tmp = w_buffer[threadIdx.x + 8]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } if(threadIdx.x < 4) { int tmp = w_buffer[threadIdx.x + 4]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } if(threadIdx.x < 2) { int tmp = w_buffer[threadIdx.x + 2]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } if(threadIdx.x < 1) { int tmp = w_buffer[threadIdx.x + 1]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } }__syncthreads(); #endif return w_buffer[0]; } // // After we find the best four position to reconnect with all we need to // reverse the path between them. // // @start - The first position in the sub-path we have to swap with the end // @end - The last position in the path we have to swap with the start // @pos - The positions in our path // @weights - The edge weights between points static inline __device__ void reverse(int start, int end, Data* &pos, int* &weight) { while(start<end) { int w = weight[start]; Data d = pos[start]; weight[start] = weight[end-1]; pos[start] = pos[end]; weight[end-1] = w; pos[end] = d; start += blockDim.x; end -= blockDim.x; }__syncthreads(); } // // Perform a single iteration of Two-Opt // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @minchange - The current best change we can make // @mini - The ith city in the path that is part of the swap // @minj - The jth city in the path that is part of the swap // @cities - The number of cities along the path (excluding the end point) template <int TileSize> static __device__ void singleIter(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int &cities, float* __restrict__ x_buffer, float* __restrict__ y_buffer, int* __restrict__ w_buffer) { for (int ii = 0; ii < cities - 2; ii += blockDim.x) { int i = ii + threadIdx.x; float pxi0, pyi0, pxi1, pyi1, pxj1, pyj1; if (i < cities - 2) { minchange -= weight[i]; pxi0 = pos[i].x; pyi0 = pos[i].y; pxi1 = pos[i+1].x; pyi1 = pos[i+1].y; pxj1 = pos[0].x; pyj1 = pos[0].y; } for (int jj = cities - 1; jj >= ii + 2; jj -= TileSize) { int bound = jj - TileSize + 1; for(int k = threadIdx.x; k < TileSize; k += blockDim.x) { int index = k + bound; if (index >= (ii + 2)) { x_buffer[k] = pos[index].x; y_buffer[k] = pos[index].y; w_buffer[k] = weight[index]; } }__syncthreads(); int lower = bound; if (lower < i + 2) lower = i + 2; for (int j = jj; j >= lower; j--) { int jm = j - bound; float pxj0 = x_buffer[jm]; float pyj0 = y_buffer[jm]; int change = w_buffer[jm] + __float2int_rn(sqrtf((pxi0 - pxj0) * (pxi0 - pxj0) + (pyi0 - pyj0) * (pyi0 - pyj0))) + __float2int_rn(sqrtf((pxi1 - pxj1) * (pxi1 - pxj1) + (pyi1 - pyj1) * (pyi1 - pyj1))); pxj1 = pxj0; pyj1 = pyj0; if (minchange > change) { minchange = change; mini = i; minj = j; } }__syncthreads(); } if (i < cities - 2) { minchange += weight[i]; } } } // // Perform the swaps to the edges i and j to decrease the total length of our // path and update the weight and pos arrays appropriately. // // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @minchange - The current best change we can make // @mini - The ith city in the path that is part of the swap // @minj - The jth city in the path that is part of the swap // @cities - The number of cities along the path (excluding the end point) template <ThreadBufferStatus Status, int TileSize> static __device__ bool update(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int &cities, int* __restrict__ w_buffer) { maximum<Status,TileSize>(minchange, cities, w_buffer); if(w_buffer[0] >= 0) return false; while(w_buffer[0] < 0) { if (minchange == w_buffer[0]) { w_buffer[1] = threadIdx.x; }__syncthreads(); if(threadIdx.x==w_buffer[1]) { w_buffer[2] = mini; w_buffer[3] = minj; }__syncthreads(); int mi = w_buffer[2]; int mj = w_buffer[3]; if(!(minj < (mi - 1)) && !(mini > (mj + 1))) { minchange = 0; } // Fix path and weights reverse(mi+1+threadIdx.x,mj-threadIdx.x,pos,weight); // Fix connecting points weight[mi] = -dist(mi,mi+1); weight[mj] = -dist(mj,mj+1); __syncthreads(); maximum<Status,TileSize>(minchange, cities, w_buffer); } return true; } // // Given a path we randomly permute it into a new new path and then initialize the weights of the path. // // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @cities - The number of cities along the path (excluding the end point) static __device__ inline void permute(Data* &pos, int* &weight, const int &cities) { if (threadIdx.x == 0) { // serial permutation curandState rndstate; curand_init(blockIdx.x, 0, 0, &rndstate); for (int i = 1; i < cities; i++) { int j = curand(&rndstate) % (cities - 1) + 1; Data d = pos[i]; pos[i] = pos[j]; pos[j] = d; } pos[cities] = pos[0]; }__syncthreads(); for (int i = threadIdx.x; i < cities; i += blockDim.x) weight[i] = -dist(i, i + 1); __syncthreads(); } // // Releases memory and saves results // // @pos - Pointer to allocated path memory // @weight - Pointer to allocated edge weight memory // @local_climbs - The number of climbs performed by this block // @best_length - The best length this block found. static __device__ inline void cleanup(Data* &pos, int* &weight, int &local_climbs, int &best_length) { if (threadIdx.x == 0) { // Save data atomicAdd(&climbs_d,local_climbs); atomicMin(&best_d, best_length); // Release memory delete pos; delete weight; } } // // Perform iterative two-opt until there can be no more swaps to reduce the path length. // // @pos_d - The position of each point in the graph. // @cities - The number of vertices in the graph template <ThreadBufferStatus Status, int TileSize> static __global__ __launch_bounds__(1024, 2) void TwoOpt(const int Restarts, const Data *pos_d, const int cities) { Data *pos; int *weight; int local_climbs = 0; int best_length = INT_MAX; if( !initMemory<TileSize>(pos_d,pos,weight,cities) ) { if(threadIdx.x == 0) { printf("Memory initialization error for block %d\n", blockIdx.x); } return; } __shared__ float x_buffer[TileSize]; __shared__ float y_buffer[TileSize]; __shared__ int w_buffer[TileSize]; for(int r = nextInt() ; r < Restarts; r = nextInt()) { int mini,minj,minchange; permute(pos,weight,cities); do { ++local_climbs; minchange = mini = minj = 0; singleIter<TileSize>(pos, weight, minchange, mini, minj, cities, x_buffer, y_buffer, w_buffer); } while (update<Status,TileSize>(pos, weight, minchange, mini, minj, cities,w_buffer)); w_buffer[0] = 0; __syncthreads(); int term = 0; for (int i = threadIdx.x; i < cities; i += blockDim.x) { term += dist(i, i + 1); } atomicAdd(w_buffer,term); __syncthreads(); if(threadIdx.x==0) { if(w_buffer[0] < best_length) { best_length = w_buffer[0]; } } } cleanup(pos, weight, local_climbs, best_length); } // // Checks to see if an error occured with CUDA and if so prints out the message passed and the CUDA // error then quits the application. // // @msg - Message to print out if error occurs static void CudaTest(const char *msg) { cudaError_t e; cudaThreadSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "%s: %d\n", msg, e); fprintf(stderr, "%s\n", cudaGetErrorString(e)); exit(-1); } } #define mallocOnGPU(addr, size) if (cudaSuccess != cudaMalloc((void **)&addr, size)) fprintf(stderr, "could not allocate GPU memory\n"); CudaTest("couldn't allocate GPU memory"); #define copyToGPU(to, from, size) if (cudaSuccess != cudaMemcpy(to, from, size, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of data to device failed\n"); CudaTest("data copy to device failed"); // // Read TPS lib files into GPU memory. ATT and CEIL_2D edge weight types are not supported // // @fname - The name of the file to read the TSP data from // @pos_d - Pointer to the pointer that will hold data on GPU // and is modified here to be the address on the GPU // // @return - Returns the number of cities found static int readInput(const char *fname, Data **pos_d) { int ch, cnt, in1, cities; float in2, in3; FILE *f; Data *pos; char str[256]; // potential for buffer overrun f = fopen(fname, "rt"); if (f == NULL) {fprintf(stderr, "could not open file %s\n", fname); exit(-1);} ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f); if( fscanf(f, "%s\n", str) != 1 ) exit(-1); cities = atoi(str); if (cities <= 2) {fprintf(stderr, "only %d cities\n", cities); exit(-1);} pos = new Data[cities]; if (pos == NULL) {fprintf(stderr, "cannot allocate pos\n"); exit(-1);} ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f); if(fscanf(f, "%s\n", str) != 1) exit(-1); if (strcmp(str, "NODE_COORD_SECTION") != 0) {fprintf(stderr, "wrong file format\n"); exit(-1);} cnt = 0; while (fscanf(f, "%d %f %f\n", &in1, &in2, &in3)) { pos[cnt].x = in2; pos[cnt].y = in3; ++cnt; if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);} if (cnt != in1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, in1); exit(-1);} } if (cnt != cities) {fprintf(stderr, "read %d instead of %d cities\n", cnt, cities); exit(-1);} if(fscanf(f, "%s", str) != 1) exit(-1); if (strcmp(str, "EOF") != 0) {fprintf(stderr, "didn't see 'EOF' at end of file\n"); exit(-1);} mallocOnGPU(*pos_d, sizeof(Data) * cities); copyToGPU(*pos_d, pos, sizeof(Data) * cities); fclose(f); delete (pos); return cities; } // // Given an enum value return it's string representation // // @status - The enum value to translate // // @return - The enums string representation in the source code static const std::string getName(const ThreadBufferStatus status) { switch(status) { case MORE_THREADS_THAN_BUFFER: return std::string("MORE_THREADS_THAN_BUFFER"); case EQUAL_SIZE: return std::string("EQUAL_SIZE"); case MORE_BUFFER_THAN_THREADS: return std::string("MORE_BUFFER_THAN_THREADS"); }; return std::string("enum value not found."); } // // Calculates the maximum number of resident blocks that the card can hold // // @Threads - Number of threads that each block will have // @Shared_Bytes - The amount of bytes each block will allocate // // @return - Returns the number of blocks the card can have resident static int getMaxBlocks(const int Shared_Bytes, const int Threads) { cudaDeviceProp props; cudaGetDeviceProperties(&props,0); if(props.major < 2) { const int Max_Shared = 16384; const int Block_Shared_Limit = (Max_Shared / Shared_Bytes); return props.multiProcessorCount * min(8,min(Block_Shared_Limit,(int)(2048/Threads))); }else if(props.major < 5) { const int Max_Shared = 32768; const int Block_Shared_Limit = (Max_Shared / Shared_Bytes); return props.multiProcessorCount * min(16,min(Block_Shared_Limit,(int)(2048/Threads))); }else { const int Max_Shared = 65536; const int Block_Shared_Limit = (Max_Shared / Shared_Bytes); return props.multiProcessorCount * min(32,min(Block_Shared_Limit,(int)(2048/Threads))); } } // // Given an integer returns the next multiple of 32 greater than or equal to it. // // @in - The integer to round to next multiple of 32 // // @return - Returns the next multiple of 32 that is greater than or equals to in static int next32(int in) { return ((in + 31) / 32 ) * 32; } // // Handle ThreadBufferStatus kernel selection // template <int TileSize> static float _wrapStatus(const int Restarts, const int Threads, const Data *Pos_d, const int Cities) { const int Shared_Bytes = (sizeof(int) + 2*sizeof(float)) * TileSize; const int Blocks = min(Restarts,getMaxBlocks(Shared_Bytes,Threads)); const ThreadBufferStatus Status = (Threads > TileSize) ? MORE_THREADS_THAN_BUFFER : (Threads < TileSize) ? MORE_BUFFER_THAN_THREADS : EQUAL_SIZE; float time; const int Device_Memory = (sizeof(int) + sizeof(Data)) * (Cities + 1)* 2*Blocks; cudaDeviceSetLimit(cudaLimitMallocHeapSize, Device_Memory); CudaTest("Change heap size"); // Output runtime configuration std::cout << "Blocks = " << Blocks << ", Threads = " << Threads << ", TileSize = " << TileSize << ", Status = " << getName(Status) << ", Shared Bytes = " << Shared_Bytes << ", Device Memory = " << Device_Memory/(1024.0f*1024.0f) << "MB" << std::endl; cudaEvent_t begin,end; cudaEventCreate(&begin); cudaEventCreate(&end); cudaDeviceSetCacheConfig(cudaFuncCachePreferEqual); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte); switch(Status) { case MORE_THREADS_THAN_BUFFER: cudaEventRecord(begin,0); TwoOpt<MORE_THREADS_THAN_BUFFER,TileSize><<<Blocks,Threads>>>(Restarts,Pos_d,Cities); CudaTest("Kernel Call"); cudaEventRecord(end,0); cudaEventSynchronize(end); break; case EQUAL_SIZE: cudaEventRecord(begin,0); TwoOpt<EQUAL_SIZE,TileSize><<<Blocks,Threads>>>(Restarts,Pos_d,Cities); CudaTest("Kernel Call"); cudaEventRecord(end,0); cudaEventSynchronize(end); break; case MORE_BUFFER_THAN_THREADS: cudaEventRecord(begin,0); TwoOpt<MORE_BUFFER_THAN_THREADS,TileSize><<<Blocks,Threads>>>(Restarts,Pos_d,Cities); CudaTest("Kernel Call"); cudaEventRecord(end,0); cudaEventSynchronize(end); break; }; cudaEventElapsedTime(&time,begin,end); cudaEventDestroy(begin); cudaEventDestroy(end); return time; } // // Outer call to kernel selection // static float RunKernel(const int Cities, const Data *Pos, const int Restarts, const int Threads, const int TileSize) { switch(TileSize) { case 32: return _wrapStatus<32>(Restarts,Threads,Pos,Cities); case 64: return _wrapStatus<64>(Restarts,Threads,Pos,Cities); case 96: return _wrapStatus<96>(Restarts,Threads,Pos,Cities); case 128: return _wrapStatus<128>(Restarts,Threads,Pos,Cities); case 160: return _wrapStatus<160>(Restarts,Threads,Pos,Cities); case 192: return _wrapStatus<192>(Restarts,Threads,Pos,Cities); case 224: return _wrapStatus<224>(Restarts,Threads,Pos,Cities); case 256: return _wrapStatus<256>(Restarts,Threads,Pos,Cities); case 288: return _wrapStatus<288>(Restarts,Threads,Pos,Cities); case 320: return _wrapStatus<320>(Restarts,Threads,Pos,Cities); case 352: return _wrapStatus<352>(Restarts,Threads,Pos,Cities); case 384: return _wrapStatus<384>(Restarts,Threads,Pos,Cities); case 416: return _wrapStatus<416>(Restarts,Threads,Pos,Cities); case 448: return _wrapStatus<448>(Restarts,Threads,Pos,Cities); case 480: return _wrapStatus<480>(Restarts,Threads,Pos,Cities); case 512: return _wrapStatus<512>(Restarts,Threads,Pos,Cities); case 544: return _wrapStatus<544>(Restarts,Threads,Pos,Cities); case 576: return _wrapStatus<576>(Restarts,Threads,Pos,Cities); case 608: return _wrapStatus<608>(Restarts,Threads,Pos,Cities); case 640: return _wrapStatus<640>(Restarts,Threads,Pos,Cities); case 672: return _wrapStatus<672>(Restarts,Threads,Pos,Cities); case 704: return _wrapStatus<704>(Restarts,Threads,Pos,Cities); case 736: return _wrapStatus<736>(Restarts,Threads,Pos,Cities); case 768: return _wrapStatus<768>(Restarts,Threads,Pos,Cities); case 800: return _wrapStatus<800>(Restarts,Threads,Pos,Cities); case 832: return _wrapStatus<832>(Restarts,Threads,Pos,Cities); case 864: return _wrapStatus<864>(Restarts,Threads,Pos,Cities); case 896: return _wrapStatus<896>(Restarts,Threads,Pos,Cities); case 928: return _wrapStatus<928>(Restarts,Threads,Pos,Cities); case 960: return _wrapStatus<960>(Restarts,Threads,Pos,Cities); case 992: return _wrapStatus<992>(Restarts,Threads,Pos,Cities); case 1024: return _wrapStatus<1024>(Restarts,Threads,Pos,Cities); default: std::cout << "Invalid TileSize = " << TileSize << std::endl; exit(-1); }; return -1; } // // Main entry point to program. // int main(int argc, char *argv[]) { if (argc < 3 || argc > 5) {fprintf(stderr, "\narguments: input_file restart_count <threads> <tilesize> \n"); exit(-1);} const int Restarts = atoi(argv[2]); if (Restarts < 1) {fprintf(stderr, "restart_count is too small: %d\n", Restarts); exit(-1);} Data *pos_d; const int Cities = readInput(argv[1], &pos_d); printf("configuration: %d cities, %d restarts, %s input\n", Cities, Restarts, argv[1]); const int Threads = (argc >= 4) ? min(1024,next32(atoi(argv[3]))) : min(1024,next32(Cities)); const int TileSize = (argc >= 5) ? min( next32(atoi(argv[4])),1024) : Threads; const float time = RunKernel(Cities,pos_d,Restarts,Threads,TileSize); int hours = (int)(time / (3600.0f * 1000.0f)); int seconds = (int)(time/1000) % 60; int minutes = ((int)(time/1000) / 60) % 60; int climbs,best; cudaMemcpyFromSymbol(&climbs,climbs_d,sizeof(int),0,cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&best,best_d,sizeof(int),0,cudaMemcpyDeviceToHost); long long moves = 1LL * climbs * (Cities - 2) * (Cities - 1) / 2; std::cout << moves * 0.000001 / time << "Gmoves/s" << std::endl; std::cout << "best found tour length = " << best << std::endl; std::cout << "Total Time : " << time / 1000.0f << "s" << std::endl; std::cout << "Hours = " << hours << ", Minutes = " << minutes << ", Seconds = " << seconds << ", Milliseconds = " << (int)(time) % 1000 << std::endl; cudaDeviceReset(); cudaFree(pos_d); return 0; }
de04ab5bf631f339396e3777d82723e319ad5bc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/layers/slice_layer.hpp" #include "HugeCTR/include/common.hpp" #include "HugeCTR/include/tensor.hpp" #include "HugeCTR/include/utils.cuh" #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { template <size_t length, typename T> __device__ int array_length(T (&arr)[length]) { return length; } template <typename T, typename... Args> __global__ void slice_kernel(bool forward, T* in, const int h, const int in_w, const int virt_w, const Args... args) { const typename SliceLayer<T>::OutParam out_params[] = {args...}; const int n_outs = array_length(out_params); for (int row = blockIdx.x; row < h; row += gridDim.x) { for (int k = 0; k < n_outs; k++) { int st = out_params[k].st; int ed = out_params[k].ed; int out_w = ed - st; for (int out_col = threadIdx.x; out_col < out_w; out_col += blockDim.x) { int in_col = out_col + st; int in_idx = row * in_w + in_col; int out_idx = row * out_w + out_col; T* out = out_params[k].out; if (forward) { out[out_idx] = in[in_idx]; } else { in[in_idx] += out[out_idx]; } } __syncthreads(); } } } } // anonymous namespace template <typename T> SliceLayer<T>::SliceLayer(const std::shared_ptr<Tensor<T>>& in_tensor, Tensors<T>& out_tensors, const std::shared_ptr<GeneralBuffer<T>>& blobs_buff, std::vector<std::pair<int, int>>& ranges, int device_id) : Layer(device_id), n_sms_(0), virt_w_(0) { try { CudaDeviceContext context(device_id); if (ranges.empty()) { CK_THROW_(Error_t::WrongInput, "Empty slice ranges is not allowed"); } if (!out_tensors.empty()) { CK_THROW_(Error_t::WrongInput, "output tensor vector must be empty"); } auto in_dims = in_tensor->get_dims(); if (in_dims.size() != 2) { CK_THROW_(Error_t::WrongInput, "Only 2D tensors can be concatenated"); } if (in_tensor->get_format() != TensorFormat_t::HW) { CK_THROW_(Error_t::WrongInput, "Only TensorFormat_t::HW is allowed"); } size_t height = in_dims[0]; int in_w = in_dims[1]; int prev_min = -1; int prev_max = 0; for (auto& range : ranges) { int cur_min = range.first; int cur_max = range.second; if (cur_min >= cur_max) { CK_THROW_(Error_t::WrongInput, "Reverse range is not allowed"); } if (cur_min < 0 || cur_max < 0) { CK_THROW_(Error_t::WrongInput, "Negative ranges cannot be allowed"); } if (!(prev_min <= cur_min && prev_max <= cur_max)) { CK_THROW_(Error_t::WrongInput, "A range cannot be out-order nor included in another"); } if (cur_min >= in_w || cur_max > in_w) { CK_THROW_(Error_t::WrongInput, "Ranges cannot be bigger than the input width"); } size_t out_w = cur_max - cur_min; std::vector<size_t> out_dims = {height, out_w}; out_tensors.emplace_back(new Tensor<T>(out_dims, blobs_buff, TensorFormat_t::HW)); sts_.push_back(cur_min); virt_w_ += out_w; prev_min = cur_min; prev_max = cur_max; } in_tensors_.emplace_back(in_tensor); for (auto& out_tensor : out_tensors) { out_tensors_.emplace_back(out_tensor); } int device = get_device_id(); CK_CUDA_THROW_(hipDeviceGetAttribute(&n_sms_, hipDeviceAttributeMultiprocessorCount, device)); assert(n_sms_ > 0); } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } template <typename T> void SliceLayer<T>::fprop(hipStream_t stream) { prop_common(true, stream); } template <typename T> void SliceLayer<T>::bprop(hipStream_t stream) { prop_common(false, stream); } template <typename T> void SliceLayer<T>::prop_common(bool forward, hipStream_t stream) { CudaDeviceContext context(get_device_id()); int n_out_tensors = out_tensors_.size(); if (n_out_tensors == 2) { std::vector<OutParam> out_params = set_out_params(2); kernel_launch(forward, stream, out_params[0], out_params[1]); } else if (n_out_tensors == 3) { std::vector<OutParam> out_params = set_out_params(3); kernel_launch(forward, stream, out_params[0], out_params[1], out_params[2]); } else if (n_out_tensors == 4) { std::vector<OutParam> out_params = set_out_params(4); kernel_launch(forward, stream, out_params[0], out_params[1], out_params[2], out_params[3]); } else if (n_out_tensors == 5) { std::vector<OutParam> out_params = set_out_params(5); kernel_launch(forward, stream, out_params[0], out_params[1], out_params[2], out_params[3], out_params[4]); } else { CK_THROW_(Error_t::UnSupportedFormat, "Slicing into > 5 layers is not supported"); } #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } template <typename T> std::vector<typename SliceLayer<T>::OutParam> SliceLayer<T>::set_out_params(int n) { std::vector<OutParam> out_params; for (int i = 0; i < n; i++) { const auto& out_tensor = out_tensors_[i]; T* out = out_tensor->get_ptr(); int st = sts_[i]; int w = out_tensor->get_dims()[1]; out_params.push_back({out, st, st + w}); } return std::move(out_params); } template <typename T> template <typename... Args> void SliceLayer<T>::kernel_launch(bool forward, hipStream_t stream, Args&... args) { int block_size = 512; int n_blocks = n_sms_ * 4; const auto& in_tensor = in_tensors_[0]; T* in = in_tensor->get_ptr(); int h = in_tensor->get_dims()[0]; int in_w = in_tensor->get_dims()[1]; if (!forward) { hipLaunchKernelGGL(( initialize_array), dim3(n_blocks), dim3(block_size), 0, stream, in, h * in_w, T(0)); } hipLaunchKernelGGL(( slice_kernel), dim3(n_blocks), dim3(block_size), 0, stream, forward, in, h, in_w, virt_w_, args...); } template class SliceLayer<float>; template class SliceLayer<__half>; } // namespace HugeCTR
de04ab5bf631f339396e3777d82723e319ad5bc9.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/layers/slice_layer.hpp" #include "HugeCTR/include/common.hpp" #include "HugeCTR/include/tensor.hpp" #include "HugeCTR/include/utils.cuh" #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { template <size_t length, typename T> __device__ int array_length(T (&arr)[length]) { return length; } template <typename T, typename... Args> __global__ void slice_kernel(bool forward, T* in, const int h, const int in_w, const int virt_w, const Args... args) { const typename SliceLayer<T>::OutParam out_params[] = {args...}; const int n_outs = array_length(out_params); for (int row = blockIdx.x; row < h; row += gridDim.x) { for (int k = 0; k < n_outs; k++) { int st = out_params[k].st; int ed = out_params[k].ed; int out_w = ed - st; for (int out_col = threadIdx.x; out_col < out_w; out_col += blockDim.x) { int in_col = out_col + st; int in_idx = row * in_w + in_col; int out_idx = row * out_w + out_col; T* out = out_params[k].out; if (forward) { out[out_idx] = in[in_idx]; } else { in[in_idx] += out[out_idx]; } } __syncthreads(); } } } } // anonymous namespace template <typename T> SliceLayer<T>::SliceLayer(const std::shared_ptr<Tensor<T>>& in_tensor, Tensors<T>& out_tensors, const std::shared_ptr<GeneralBuffer<T>>& blobs_buff, std::vector<std::pair<int, int>>& ranges, int device_id) : Layer(device_id), n_sms_(0), virt_w_(0) { try { CudaDeviceContext context(device_id); if (ranges.empty()) { CK_THROW_(Error_t::WrongInput, "Empty slice ranges is not allowed"); } if (!out_tensors.empty()) { CK_THROW_(Error_t::WrongInput, "output tensor vector must be empty"); } auto in_dims = in_tensor->get_dims(); if (in_dims.size() != 2) { CK_THROW_(Error_t::WrongInput, "Only 2D tensors can be concatenated"); } if (in_tensor->get_format() != TensorFormat_t::HW) { CK_THROW_(Error_t::WrongInput, "Only TensorFormat_t::HW is allowed"); } size_t height = in_dims[0]; int in_w = in_dims[1]; int prev_min = -1; int prev_max = 0; for (auto& range : ranges) { int cur_min = range.first; int cur_max = range.second; if (cur_min >= cur_max) { CK_THROW_(Error_t::WrongInput, "Reverse range is not allowed"); } if (cur_min < 0 || cur_max < 0) { CK_THROW_(Error_t::WrongInput, "Negative ranges cannot be allowed"); } if (!(prev_min <= cur_min && prev_max <= cur_max)) { CK_THROW_(Error_t::WrongInput, "A range cannot be out-order nor included in another"); } if (cur_min >= in_w || cur_max > in_w) { CK_THROW_(Error_t::WrongInput, "Ranges cannot be bigger than the input width"); } size_t out_w = cur_max - cur_min; std::vector<size_t> out_dims = {height, out_w}; out_tensors.emplace_back(new Tensor<T>(out_dims, blobs_buff, TensorFormat_t::HW)); sts_.push_back(cur_min); virt_w_ += out_w; prev_min = cur_min; prev_max = cur_max; } in_tensors_.emplace_back(in_tensor); for (auto& out_tensor : out_tensors) { out_tensors_.emplace_back(out_tensor); } int device = get_device_id(); CK_CUDA_THROW_(cudaDeviceGetAttribute(&n_sms_, cudaDevAttrMultiProcessorCount, device)); assert(n_sms_ > 0); } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } template <typename T> void SliceLayer<T>::fprop(cudaStream_t stream) { prop_common(true, stream); } template <typename T> void SliceLayer<T>::bprop(cudaStream_t stream) { prop_common(false, stream); } template <typename T> void SliceLayer<T>::prop_common(bool forward, cudaStream_t stream) { CudaDeviceContext context(get_device_id()); int n_out_tensors = out_tensors_.size(); if (n_out_tensors == 2) { std::vector<OutParam> out_params = set_out_params(2); kernel_launch(forward, stream, out_params[0], out_params[1]); } else if (n_out_tensors == 3) { std::vector<OutParam> out_params = set_out_params(3); kernel_launch(forward, stream, out_params[0], out_params[1], out_params[2]); } else if (n_out_tensors == 4) { std::vector<OutParam> out_params = set_out_params(4); kernel_launch(forward, stream, out_params[0], out_params[1], out_params[2], out_params[3]); } else if (n_out_tensors == 5) { std::vector<OutParam> out_params = set_out_params(5); kernel_launch(forward, stream, out_params[0], out_params[1], out_params[2], out_params[3], out_params[4]); } else { CK_THROW_(Error_t::UnSupportedFormat, "Slicing into > 5 layers is not supported"); } #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } template <typename T> std::vector<typename SliceLayer<T>::OutParam> SliceLayer<T>::set_out_params(int n) { std::vector<OutParam> out_params; for (int i = 0; i < n; i++) { const auto& out_tensor = out_tensors_[i]; T* out = out_tensor->get_ptr(); int st = sts_[i]; int w = out_tensor->get_dims()[1]; out_params.push_back({out, st, st + w}); } return std::move(out_params); } template <typename T> template <typename... Args> void SliceLayer<T>::kernel_launch(bool forward, cudaStream_t stream, Args&... args) { int block_size = 512; int n_blocks = n_sms_ * 4; const auto& in_tensor = in_tensors_[0]; T* in = in_tensor->get_ptr(); int h = in_tensor->get_dims()[0]; int in_w = in_tensor->get_dims()[1]; if (!forward) { initialize_array<<<n_blocks, block_size, 0, stream>>>(in, h * in_w, T(0)); } slice_kernel<<<n_blocks, block_size, 0, stream>>>(forward, in, h, in_w, virt_w_, args...); } template class SliceLayer<float>; template class SliceLayer<__half>; } // namespace HugeCTR
6f25b1e3b2f6ef7387d6481b6a6897bd9184ffea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THCS_GENERIC_FILE #define THCS_GENERIC_FILE "generic/THCSTensorMath.cu" #else #include "THHThrustAllocator.cuh" #include "THHNumerics.cuh" #include <thrust/device_ptr.h> #include <thrust/sequence.h> #define ROW_PTR2(t, r) (THCTensor_(data)(THCState *state, t) + (r) * (t)->stride[0]) #define COL_PTR2(t, c) (THCTensor_(data)(THCState *state, t) + (c) * (t)->stride[1]) #define I_INFO(tensor) getTensorInfo<int64_t, THCIndexTensor, uint64_t>(state, tensor) #define V_INFO(tensor) getTensorInfo<real, THCTensor, uint64_t>(state, tensor) THCudaIntTensor *THCSTensor_(toCSR)(THCState *state, THCIndexTensor *rowIndices, int64_t dim, int64_t nnz) { THCudaIntTensor *csr = THCudaIntTensor_newWithSize1d(state, dim + 1); THCudaIntTensor *rowIndicesInt = THCudaIntTensor_newWithSize1d(state, rowIndices->size[0]); THCudaIntTensor_copyCudaLong(state, rowIndicesInt, rowIndices); THCudaSparse_Xcoo2csr( state, THCudaIntTensor_data(state, rowIndicesInt), nnz, dim, THCudaIntTensor_data(state, csr)); THCudaIntTensor_free(state, rowIndicesInt); return csr; } void THCSTensor_(zero)(THCState *state, THCSTensor *self) { if (self->indices->nDimension) { THCIndexTensor_(resizeNd)(state, self->indices, 0, NULL, NULL); } if (self->values->nDimension) { THCTensor_(resizeNd)(state, self->values, 0, NULL, NULL); } self->nnz = 0; } void THCSTensor_(zeros)(THCState *state, THCSTensor *r_, THLongStorage *size) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 1, r_)); THCSTensor_(resize)(state, r_, size); THCSTensor_(zero)(state, r_); } void THCSTensor_(zerosLike)(THCState *state, THCSTensor *r_, THCSTensor *input) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 2, r_, input)); THCSTensor_(resizeAs)(state, r_, input); THCSTensor_(zero)(state, r_); } void THCTensor_(spaddcmul)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) { THError("WARNING: Sparse Cuda Tensor op spaddcmul is not implemented"); } void THCTensor_(spaddcdiv)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) { THError("WARNING: Sparse Cuda Tensor op spaddcdiv is not implemented"); } void THCSTensor_(spaddmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCSTensor *sparse_, THCTensor *dense) { #if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE) THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 4, sparse_, r_, t, dense)); THCudaIntTensor *csr; THCIndexTensor *indices; THCTensor *values, *r__, *dense_; THArgCheck(sparse_->nDimensionI == 2, 2, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 2, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 2, "matrices expected, got %dD tensor", dense->nDimension); int64_t m = THCSTensor_(size)(state, sparse_, 0); int64_t k = THCSTensor_(size)(state, sparse_, 1); int64_t n = THCTensor_(size)(state, dense, 1); THCTensor_(resize2d)(state, r_, m, n); THArgCheck(THCTensor_(size)(state, t, 0) == m, 1, "Expected dim 0 size %d, got %d", m, THCTensor_(size)(state, t, 0)); THArgCheck(THCTensor_(size)(state, t, 1) == n, 1, "Expected dim 1 size %d, got %d", n, THCTensor_(size)(state, t, 1)); THArgCheck(THCTensor_(size)(state, dense, 0) == k, 3, "Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0)); THCSTensor *sparse = THCSTensor_(newCoalesce)(state, sparse_); int64_t nnz = THCSTensor_(nnz)(state, sparse); indices = THCSTensor_(newIndices)(state, sparse); values = THCSTensor_(newValues)(state, sparse); THCIndexTensor *rowIndices = THCIndexTensor_(newSelect)(state, indices, 0, 0); THCIndexTensor *colIndices = THCIndexTensor_(newSelect)(state, indices, 0, 1); csr = THCSTensor_(toCSR)(state, rowIndices, m, nnz); THCudaIntTensor *colIndicesInt = THCudaIntTensor_newWithSize1d(state, colIndices->size[0]); THCudaIntTensor_copyCudaLong(state, colIndicesInt, colIndices); char transpose_dense; if (beta == 0) { THCTensor_(zero)(state, r_); } else if (beta == ScalarConvert<int, real>::to(1)) { if (t != r_) { THCTensor_(copy)(state, r_, t); } } else { THCTensor_(mul)(state, r_, t, beta); } /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] == r_->size[0]) { r__ = r_; THCTensor_(retain)(state, r__); } else { THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* dense */ if(dense->stride[0] == 1 && dense->stride[1] == dense->size[0]) { transpose_dense = 'n'; dense_ = dense; THCTensor_(retain)(state, dense_); } else if(dense->stride[1] == 1 && dense->stride[0] != dense->size[1]) { transpose_dense = 't'; dense_ = dense; THCTensor_(retain)(state, dense_); } else { transpose_dense = 't'; dense_ = THCTensor_(newContiguous)(state, dense); } #if defined(THCS_REAL_IS_FLOAT) THCudaSparse_Scsrmm2( #elif defined(THCS_REAL_IS_DOUBLE) THCudaSparse_Dcsrmm2( #endif state, 'n', transpose_dense, m, n, k, nnz, alpha, THCTensor_(data)(state, values), THCudaIntTensor_data(state, csr), THCudaIntTensor_data(state, colIndicesInt), THCTensor_(data)(state, dense_), (transpose_dense == 'n' ? dense_->stride[1] : dense_->stride[0]), beta, THCTensor_(data)(state, r__), r__->stride[1]); /* free intermediate variables */ THCTensor_(free)(state, dense_); THCTensor_(freeCopyTo)(state, r__, r_); THCudaIntTensor_free(state, colIndicesInt); THCudaIntTensor_free(state, csr); THCIndexTensor_(free)(state, indices); THCIndexTensor_(free)(state, rowIndices); THCIndexTensor_(free)(state, colIndices); THCTensor_(free)(state, values); THCSTensor_(free)(state, sparse); #else THError("unimplemented data type"); #endif } void THCSTensor_(sspaddmm)(THCState *state, THCSTensor *r_, real beta, THCSTensor *t, real alpha, THCSTensor *sparse, THCTensor *dense) { THError("WARNING: Sparse Cuda Tensor op sspaddmm is not implemented"); // TODO Write some kernels } void THCSTensor_(hspmm)(THCState *state, THCSTensor *r_, real alpha, THCSTensor *sparse_, THCTensor *dense) { #if TORCH_HIP_VERSION >= 7000 || defined(__HIP_PLATFORM_HCC__) THCThrustAllocator thrustAlloc(state); #define THRUST_EXEC(fn, ...) fn(thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), ##__VA_ARGS__) #else #define THRUST_EXEC(fn, ...) fn(##__VA_ARGS__) #endif THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 3, r_, sparse_, dense)); THArgCheck(sparse_->nDimensionI == 2, 3, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 3, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 4, "matrices expected, got %dD tensor", dense->nDimension); int64_t m = THCSTensor_(size)(state, sparse_, 0); int64_t k = THCSTensor_(size)(state, sparse_, 1); int64_t n = THCTensor_(size)(state, dense, 1); THArgCheck(THCTensor_(size)(state, dense, 0) == k, 4, "Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0)); int64_t size[2] = {m, n}; THCSTensor_(rawResize)(state, r_, 1, 1, size); THCSTensor *sparse = THCSTensor_(newCoalesce)(state, sparse_); int64_t nnz = THCSTensor_(nnz)(state, sparse); THCIndexTensor *indices = THCIndexTensor_(newWithSize2d)(state, 1, nnz); // create values in column-major format to avoid copying in spaddmm THCTensor *values = THCTensor_(newWithSize2d)(state, n, nnz); THCTensor_(transpose)(state, values, NULL, 0, 1); // why does sparse need to be cloned? If this is really necessary maybe we // need to fuse this with newCoalesce THCSTensor *newSparse = THCSTensor_(newClone)(state, sparse); THCIndexTensor *spIndices = THCSTensor_(newIndices)(state, newSparse); THCIndexTensor *dstIndices = THCIndexTensor_(newSelect)(state, spIndices, 0, 0); // Save destination indices to output hybrid tensor THCIndexTensor_(copy)(state, indices, dstIndices); // Replace destination indices with 0, 1, 2, 3, ... and compute output values // tensor with sparse * dense multiplication thrust::device_ptr<indexT> indicesIter(THCIndexTensor_(data)(state, dstIndices)); THRUST_EXEC(thrust::sequence, indicesIter, indicesIter + nnz); newSparse->size[0] = nnz; THCSTensor_(spaddmm)(state, values, ScalarConvert<int, real>::to(0), values, alpha, newSparse, dense); THCSTensor_(_move)(state, r_, indices, values); THCSTensor_(free)(state, newSparse); THCIndexTensor_(free)(state, spIndices); THCIndexTensor_(free)(state, dstIndices); THCSTensor_(free)(state, sparse); #undef THRUST_EXEC } void THCSTensor_(spcadd)(THCState *state, THCTensor *r_, THCTensor *dense, real value, THCSTensor *sparse) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 3, sparse, r_, dense)); const ptrdiff_t nnz = THCSTensor_(nnz)(state, sparse); if (nnz == 0) { THCTensor_(resizeAs)(state, r_, dense); THCTensor_(copy)(state, r_, dense); return; } THCTensor *r = r_; if (r != dense) { THCTensor_(retain)(state, r); THCTensor_(resizeAs)(state, r, dense); THCTensor_(copy)(state, r, dense); } else { if (!THCTensor_(isContiguous)(state, r_)) { THError("CUDA sparse spcadd: known bug"); } r = THCTensor_(newContiguous)(state, r_); } THCIndexTensor *indices = THCSTensor_(newIndices)(state, sparse); THCTensor *values = THCSTensor_(newValues)(state, sparse); int64_t nDim = THCTensor_(nDimension)(state, dense); int64_t nDimI = THCSTensor_(nDimensionI)(state, sparse); if (THCSTensor_(isCoalesced)(state, sparse)) { // TODO benchmark to decide whether to remove this special case const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); if (sparse->nDimensionV == 0) { THArgCheck(getApplyGrid(state, nnz, grid, curDevice), 1, CUTORCH_DIM_WARNING); hipLaunchKernelGGL(( THCSTensor_sparseElementwiseKernelScalar<TensorCAddOp<real>, uint64_t, real>) , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), TensorCAddOp<real>(value), V_INFO(r_), I_INFO(indices), V_INFO(values), (uint64_t) nnz); } else { THArgCheck(getApplyGrid(state, nnz * block.x, grid, curDevice), 1, CUTORCH_DIM_WARNING); hipLaunchKernelGGL(( THCSTensor_sparseElementwiseKernel<TensorCAddOp<real>, uint64_t, real>) , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), TensorCAddOp<real>(value), V_INFO(r_), I_INFO(indices), V_INFO(values), (uint64_t) nnz); } } else { THCIndexTensor *indices1D = THCSTensor_(newFlattenedIndices)(state, sparse, 0); THCIndexTensor_(resize1d)(state, indices1D, nnz); if (value != ScalarConvert<int, real>::to(1)) { // FIXME: at some point we can wrap the scale into indexAdd THCTensor *scaled = THCTensor_(new)(state); THCTensor_(mul)(state, scaled, values, value); THCTensor_(free)(state, values); values = scaled; } int64_t view_rows = 1; int64_t view_columns = 1; THLongStorage *r_size = THCTensor_(newSizeOf)(state, r); for (int i = 0; i < nDimI; i++) view_rows *= THLongStorage_data(r_size)[i]; for (int i = nDimI; i < nDim; i++) view_columns *= THLongStorage_data(r_size)[i]; THLongStorage *r_view_size = THLongStorage_newWithSize2(view_rows, view_columns); THCTensor *r_view = THCTensor_(newView)(state, r, r_view_size); THCTensor_(resize2d)(state, values, nnz, view_columns); THCTensor_(indexAdd)(state, r_view, 0, indices1D, values); THCIndexTensor_(free)(state, indices1D); THLongStorage_free(r_size); THLongStorage_free(r_view_size); THCTensor_(free)(state, r_view); } THCudaCheck(hipGetLastError()); THCIndexTensor_(free)(state, indices); THCTensor_(free)(state, values); THCTensor_(free)(state, r); } void THCSTensor_(mul)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) { if (r_ == t) { THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCTensor_(mul)(state, r_values_, r_values_, value); THCTensor_(free)(state, r_values_); } else { THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(mul)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); } } void THCSTensor_(div)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) { if (r_ == t) { THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCTensor_(div)(state, r_values_, r_values_, value); THCTensor_(free)(state, r_values_); } else { THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(div)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); } } int THCSTensor_(isSameSizeIgnoringDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) { int d; if (self->nDimensionI + self->nDimensionV != src->nDimensionI + src->nDimensionV) { return 0; } for(d = 0; d < self->nDimensionI + self->nDimensionV; ++d) { if(self->size[d] != src->size[d]) { return 0; } } return 1; } int THCSTensor_(isSameDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) { return self->nDimensionI == src->nDimensionI && self->nDimensionV == src->nDimensionV; } void THCSTensor_(cadd)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t, src)); if (!THCSTensor_(isSameSizeIgnoringDensity)(state, t, src)) { THError("cadd operands have incompatible sizes"); } if (src->nnz == 0) { THCSTensor_(copy)(state, r_, t); return; } if (t->nnz == 0) { THCSTensor_(mul)(state, r_, src, value); return; } if(!THCSTensor_(isSameDensity)(state, t, src)) { THError("cadd operands have incompatible densities"); } // We deliberately choose to simply concat the indices and values tensors // rather than merging them. This removes the need to synchronously fetch nnz // at the end of the operation, at the cost of having a non-coalesced result. // This trade-off is preferable for the common use-case of gradient accumulation. THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src); THCTensor *s_values_ = THCSTensor_(newValues)(state, src); if (value != ScalarConvert<int, real>::to(1)) { THCTensor *s_values_orig = s_values_; s_values_ = THCTensor_(new)(state); THCTensor_(mul)(state, s_values_, s_values_orig, value); THCTensor_(free)(state, s_values_orig); } THCIndexTensor *r_indices_ = THCIndexTensor_(new)(state); THCTensor *r_values_ = THCTensor_(new)(state); THCIndexTensor_(cat)(state, r_indices_, t_indices_, s_indices_, 1); THCTensor_(cat)(state, r_values_, t_values_, s_values_, 0); THCSTensor_(resizeAs)(state, r_, src); THCSTensor_(_move)(state, r_, r_indices_, r_values_); // FIXME: add some heuristic about when to call coalesce() here, so that // tensors don't totally blow up in size by concatenation; e.g. // r->minUnique = max(a->minUnique + b->minUnique); // if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) { // THCSTensor_(contiguous)(r); // r->minUnique = r->nnz; // } THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCIndexTensor_(free)(state, s_indices_); THCTensor_(free)(state, s_values_); } void THCSTensor_(csub)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) { THCSTensor_(cadd)(state, r_, t, ScalarNegate<real>::to(value), src); } void THCSTensor_(cmul)(THCState *state, THCSTensor *r_, THCSTensor *t_, THCSTensor *src_) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t_, src_)); if(!THCSTensor_(isSameSizeAs)(state, t_, src_)) { THError("cmul operands have incompatible sizes or dimension types"); } THCSTensor *t = THCSTensor_(newCoalesce)(state, t_); THCSTensor *src = THCSTensor_(newCoalesce)(state, src_); if (t->nnz == 0 || src->nnz == 0) { THCSTensor_(zero)(state, r_); return; } // saving those because they can be overwritten when doing in-place operations ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz; ptrdiff_t max_nnz = t_nnz < s_nnz ? t_nnz : s_nnz; int64_t nDimI = src->nDimensionI; THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src); THCTensor *s_values_ = THCSTensor_(newValues)(state, src); THCIndexTensor *r_indices_ = THCIndexTensor_(newWithSize2d)(state, nDimI, max_nnz); THCTensor *r_values_ = THCSTensor_(newValuesWithSizeOf)(state, s_values_, max_nnz); THCTensor_(zero)(state, r_values_); THCSTensor_(resizeAs)(state, r_, src); THCSTensor_(_move)(state, r_, r_indices_, r_values_); int64_t valueSize = t_values_->stride[0]; const dim3 block = dim3(min((int64_t) getApplyBlock().x, valueSize)); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, valueSize, grid, curDevice), 1, CUTORCH_DIM_WARNING); hipLaunchKernelGGL(( THCSTensor_valueSparseIntersectionKernel<TensorMulOp<real>, uint64_t, real>) , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), TensorMulOp<real>(), I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_), (uint64_t)t_nnz, (uint64_t)s_nnz); THCudaCheck(hipGetLastError()); THCudaLongStorage *resultNnz = THCudaLongStorage_newWithSize(state, 1); hipLaunchKernelGGL(( THCSTensor_indexSparseIntersectionKernel<uint64_t, real>) , dim3(1), dim3(1), 0, THCState_getCurrentStreamOnDevice(state, curDevice), I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), (uint64_t)t_nnz, (uint64_t)s_nnz, (uint64_t*)THCudaLongStorage_data(state, resultNnz)); THCudaCheck(hipGetLastError()); r_->nnz = THCudaLongStorage_get(state, resultNnz, 0); THCudaLongStorage_free(state, resultNnz); r_->coalesced = 1; THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCIndexTensor_(free)(state, s_indices_); THCTensor_(free)(state, s_values_); THCSTensor_(free)(state, t); THCSTensor_(free)(state, src); } void THCSTensor_(pow)(THCState *state, THCSTensor *r_, THCSTensor *t_, real value) { if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(0))) { THError("cannot raise to zeroth power on sparse tensor"); } THCSTensor *t = THCSTensor_(newCoalesce)(state, t_); THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(pow)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCSTensor_(free)(state, t); } #if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE) || defined(THCS_REAL_IS_HALF) accreal THCSTensor_(normall)(THCState *state, THCSTensor *self, real value) { THCSTensor* self_coalesced = THCSTensor_(newCoalesce)(state, self); accreal result = THCTensor_(normall)(state, self_coalesced->values, value); THCSTensor_(free)(state, self_coalesced); return result; } #endif #undef ROW_PTR2 #undef COL_PTR2 #endif
6f25b1e3b2f6ef7387d6481b6a6897bd9184ffea.cu
#ifndef THCS_GENERIC_FILE #define THCS_GENERIC_FILE "generic/THCSTensorMath.cu" #else #include "THCThrustAllocator.cuh" #include "THCNumerics.cuh" #include <thrust/device_ptr.h> #include <thrust/sequence.h> #define ROW_PTR2(t, r) (THCTensor_(data)(THCState *state, t) + (r) * (t)->stride[0]) #define COL_PTR2(t, c) (THCTensor_(data)(THCState *state, t) + (c) * (t)->stride[1]) #define I_INFO(tensor) getTensorInfo<int64_t, THCIndexTensor, uint64_t>(state, tensor) #define V_INFO(tensor) getTensorInfo<real, THCTensor, uint64_t>(state, tensor) THCudaIntTensor *THCSTensor_(toCSR)(THCState *state, THCIndexTensor *rowIndices, int64_t dim, int64_t nnz) { THCudaIntTensor *csr = THCudaIntTensor_newWithSize1d(state, dim + 1); THCudaIntTensor *rowIndicesInt = THCudaIntTensor_newWithSize1d(state, rowIndices->size[0]); THCudaIntTensor_copyCudaLong(state, rowIndicesInt, rowIndices); THCudaSparse_Xcoo2csr( state, THCudaIntTensor_data(state, rowIndicesInt), nnz, dim, THCudaIntTensor_data(state, csr)); THCudaIntTensor_free(state, rowIndicesInt); return csr; } void THCSTensor_(zero)(THCState *state, THCSTensor *self) { if (self->indices->nDimension) { THCIndexTensor_(resizeNd)(state, self->indices, 0, NULL, NULL); } if (self->values->nDimension) { THCTensor_(resizeNd)(state, self->values, 0, NULL, NULL); } self->nnz = 0; } void THCSTensor_(zeros)(THCState *state, THCSTensor *r_, THLongStorage *size) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 1, r_)); THCSTensor_(resize)(state, r_, size); THCSTensor_(zero)(state, r_); } void THCSTensor_(zerosLike)(THCState *state, THCSTensor *r_, THCSTensor *input) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 2, r_, input)); THCSTensor_(resizeAs)(state, r_, input); THCSTensor_(zero)(state, r_); } void THCTensor_(spaddcmul)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) { THError("WARNING: Sparse Cuda Tensor op spaddcmul is not implemented"); } void THCTensor_(spaddcdiv)(THCState *state, THCTensor *r_, THCTensor *t, real value, THCSTensor *src1, THCSTensor *src2) { THError("WARNING: Sparse Cuda Tensor op spaddcdiv is not implemented"); } void THCSTensor_(spaddmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCSTensor *sparse_, THCTensor *dense) { #if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE) THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 4, sparse_, r_, t, dense)); THCudaIntTensor *csr; THCIndexTensor *indices; THCTensor *values, *r__, *dense_; THArgCheck(sparse_->nDimensionI == 2, 2, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 2, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 2, "matrices expected, got %dD tensor", dense->nDimension); int64_t m = THCSTensor_(size)(state, sparse_, 0); int64_t k = THCSTensor_(size)(state, sparse_, 1); int64_t n = THCTensor_(size)(state, dense, 1); THCTensor_(resize2d)(state, r_, m, n); THArgCheck(THCTensor_(size)(state, t, 0) == m, 1, "Expected dim 0 size %d, got %d", m, THCTensor_(size)(state, t, 0)); THArgCheck(THCTensor_(size)(state, t, 1) == n, 1, "Expected dim 1 size %d, got %d", n, THCTensor_(size)(state, t, 1)); THArgCheck(THCTensor_(size)(state, dense, 0) == k, 3, "Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0)); THCSTensor *sparse = THCSTensor_(newCoalesce)(state, sparse_); int64_t nnz = THCSTensor_(nnz)(state, sparse); indices = THCSTensor_(newIndices)(state, sparse); values = THCSTensor_(newValues)(state, sparse); THCIndexTensor *rowIndices = THCIndexTensor_(newSelect)(state, indices, 0, 0); THCIndexTensor *colIndices = THCIndexTensor_(newSelect)(state, indices, 0, 1); csr = THCSTensor_(toCSR)(state, rowIndices, m, nnz); THCudaIntTensor *colIndicesInt = THCudaIntTensor_newWithSize1d(state, colIndices->size[0]); THCudaIntTensor_copyCudaLong(state, colIndicesInt, colIndices); char transpose_dense; if (beta == 0) { THCTensor_(zero)(state, r_); } else if (beta == ScalarConvert<int, real>::to(1)) { if (t != r_) { THCTensor_(copy)(state, r_, t); } } else { THCTensor_(mul)(state, r_, t, beta); } /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] == r_->size[0]) { r__ = r_; THCTensor_(retain)(state, r__); } else { THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* dense */ if(dense->stride[0] == 1 && dense->stride[1] == dense->size[0]) { transpose_dense = 'n'; dense_ = dense; THCTensor_(retain)(state, dense_); } else if(dense->stride[1] == 1 && dense->stride[0] != dense->size[1]) { transpose_dense = 't'; dense_ = dense; THCTensor_(retain)(state, dense_); } else { transpose_dense = 't'; dense_ = THCTensor_(newContiguous)(state, dense); } #if defined(THCS_REAL_IS_FLOAT) THCudaSparse_Scsrmm2( #elif defined(THCS_REAL_IS_DOUBLE) THCudaSparse_Dcsrmm2( #endif state, 'n', transpose_dense, m, n, k, nnz, alpha, THCTensor_(data)(state, values), THCudaIntTensor_data(state, csr), THCudaIntTensor_data(state, colIndicesInt), THCTensor_(data)(state, dense_), (transpose_dense == 'n' ? dense_->stride[1] : dense_->stride[0]), beta, THCTensor_(data)(state, r__), r__->stride[1]); /* free intermediate variables */ THCTensor_(free)(state, dense_); THCTensor_(freeCopyTo)(state, r__, r_); THCudaIntTensor_free(state, colIndicesInt); THCudaIntTensor_free(state, csr); THCIndexTensor_(free)(state, indices); THCIndexTensor_(free)(state, rowIndices); THCIndexTensor_(free)(state, colIndices); THCTensor_(free)(state, values); THCSTensor_(free)(state, sparse); #else THError("unimplemented data type"); #endif } void THCSTensor_(sspaddmm)(THCState *state, THCSTensor *r_, real beta, THCSTensor *t, real alpha, THCSTensor *sparse, THCTensor *dense) { THError("WARNING: Sparse Cuda Tensor op sspaddmm is not implemented"); // TODO Write some kernels } void THCSTensor_(hspmm)(THCState *state, THCSTensor *r_, real alpha, THCSTensor *sparse_, THCTensor *dense) { #if CUDA_VERSION >= 7000 || defined(__HIP_PLATFORM_HCC__) THCThrustAllocator thrustAlloc(state); #define THRUST_EXEC(fn, ...) fn(thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), ##__VA_ARGS__) #else #define THRUST_EXEC(fn, ...) fn(##__VA_ARGS__) #endif THCAssertSameGPU(THCSTensor_(checkGPU)(state, 2, 3, r_, sparse_, dense)); THArgCheck(sparse_->nDimensionI == 2, 3, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 3, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 4, "matrices expected, got %dD tensor", dense->nDimension); int64_t m = THCSTensor_(size)(state, sparse_, 0); int64_t k = THCSTensor_(size)(state, sparse_, 1); int64_t n = THCTensor_(size)(state, dense, 1); THArgCheck(THCTensor_(size)(state, dense, 0) == k, 4, "Expected dim 0 size %d, got %d", k, THCTensor_(size)(state, dense, 0)); int64_t size[2] = {m, n}; THCSTensor_(rawResize)(state, r_, 1, 1, size); THCSTensor *sparse = THCSTensor_(newCoalesce)(state, sparse_); int64_t nnz = THCSTensor_(nnz)(state, sparse); THCIndexTensor *indices = THCIndexTensor_(newWithSize2d)(state, 1, nnz); // create values in column-major format to avoid copying in spaddmm THCTensor *values = THCTensor_(newWithSize2d)(state, n, nnz); THCTensor_(transpose)(state, values, NULL, 0, 1); // why does sparse need to be cloned? If this is really necessary maybe we // need to fuse this with newCoalesce THCSTensor *newSparse = THCSTensor_(newClone)(state, sparse); THCIndexTensor *spIndices = THCSTensor_(newIndices)(state, newSparse); THCIndexTensor *dstIndices = THCIndexTensor_(newSelect)(state, spIndices, 0, 0); // Save destination indices to output hybrid tensor THCIndexTensor_(copy)(state, indices, dstIndices); // Replace destination indices with 0, 1, 2, 3, ... and compute output values // tensor with sparse * dense multiplication thrust::device_ptr<indexT> indicesIter(THCIndexTensor_(data)(state, dstIndices)); THRUST_EXEC(thrust::sequence, indicesIter, indicesIter + nnz); newSparse->size[0] = nnz; THCSTensor_(spaddmm)(state, values, ScalarConvert<int, real>::to(0), values, alpha, newSparse, dense); THCSTensor_(_move)(state, r_, indices, values); THCSTensor_(free)(state, newSparse); THCIndexTensor_(free)(state, spIndices); THCIndexTensor_(free)(state, dstIndices); THCSTensor_(free)(state, sparse); #undef THRUST_EXEC } void THCSTensor_(spcadd)(THCState *state, THCTensor *r_, THCTensor *dense, real value, THCSTensor *sparse) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 1, 3, sparse, r_, dense)); const ptrdiff_t nnz = THCSTensor_(nnz)(state, sparse); if (nnz == 0) { THCTensor_(resizeAs)(state, r_, dense); THCTensor_(copy)(state, r_, dense); return; } THCTensor *r = r_; if (r != dense) { THCTensor_(retain)(state, r); THCTensor_(resizeAs)(state, r, dense); THCTensor_(copy)(state, r, dense); } else { if (!THCTensor_(isContiguous)(state, r_)) { THError("CUDA sparse spcadd: known bug"); } r = THCTensor_(newContiguous)(state, r_); } THCIndexTensor *indices = THCSTensor_(newIndices)(state, sparse); THCTensor *values = THCSTensor_(newValues)(state, sparse); int64_t nDim = THCTensor_(nDimension)(state, dense); int64_t nDimI = THCSTensor_(nDimensionI)(state, sparse); if (THCSTensor_(isCoalesced)(state, sparse)) { // TODO benchmark to decide whether to remove this special case const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); if (sparse->nDimensionV == 0) { THArgCheck(getApplyGrid(state, nnz, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCSTensor_sparseElementwiseKernelScalar<TensorCAddOp<real>, uint64_t, real> <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( TensorCAddOp<real>(value), V_INFO(r_), I_INFO(indices), V_INFO(values), (uint64_t) nnz); } else { THArgCheck(getApplyGrid(state, nnz * block.x, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCSTensor_sparseElementwiseKernel<TensorCAddOp<real>, uint64_t, real> <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( TensorCAddOp<real>(value), V_INFO(r_), I_INFO(indices), V_INFO(values), (uint64_t) nnz); } } else { THCIndexTensor *indices1D = THCSTensor_(newFlattenedIndices)(state, sparse, 0); THCIndexTensor_(resize1d)(state, indices1D, nnz); if (value != ScalarConvert<int, real>::to(1)) { // FIXME: at some point we can wrap the scale into indexAdd THCTensor *scaled = THCTensor_(new)(state); THCTensor_(mul)(state, scaled, values, value); THCTensor_(free)(state, values); values = scaled; } int64_t view_rows = 1; int64_t view_columns = 1; THLongStorage *r_size = THCTensor_(newSizeOf)(state, r); for (int i = 0; i < nDimI; i++) view_rows *= THLongStorage_data(r_size)[i]; for (int i = nDimI; i < nDim; i++) view_columns *= THLongStorage_data(r_size)[i]; THLongStorage *r_view_size = THLongStorage_newWithSize2(view_rows, view_columns); THCTensor *r_view = THCTensor_(newView)(state, r, r_view_size); THCTensor_(resize2d)(state, values, nnz, view_columns); THCTensor_(indexAdd)(state, r_view, 0, indices1D, values); THCIndexTensor_(free)(state, indices1D); THLongStorage_free(r_size); THLongStorage_free(r_view_size); THCTensor_(free)(state, r_view); } THCudaCheck(cudaGetLastError()); THCIndexTensor_(free)(state, indices); THCTensor_(free)(state, values); THCTensor_(free)(state, r); } void THCSTensor_(mul)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) { if (r_ == t) { THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCTensor_(mul)(state, r_values_, r_values_, value); THCTensor_(free)(state, r_values_); } else { THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(mul)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); } } void THCSTensor_(div)(THCState *state, THCSTensor *r_, THCSTensor *t, real value) { if (r_ == t) { THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCTensor_(div)(state, r_values_, r_values_, value); THCTensor_(free)(state, r_values_); } else { THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(div)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); } } int THCSTensor_(isSameSizeIgnoringDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) { int d; if (self->nDimensionI + self->nDimensionV != src->nDimensionI + src->nDimensionV) { return 0; } for(d = 0; d < self->nDimensionI + self->nDimensionV; ++d) { if(self->size[d] != src->size[d]) { return 0; } } return 1; } int THCSTensor_(isSameDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) { return self->nDimensionI == src->nDimensionI && self->nDimensionV == src->nDimensionV; } void THCSTensor_(cadd)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t, src)); if (!THCSTensor_(isSameSizeIgnoringDensity)(state, t, src)) { THError("cadd operands have incompatible sizes"); } if (src->nnz == 0) { THCSTensor_(copy)(state, r_, t); return; } if (t->nnz == 0) { THCSTensor_(mul)(state, r_, src, value); return; } if(!THCSTensor_(isSameDensity)(state, t, src)) { THError("cadd operands have incompatible densities"); } // We deliberately choose to simply concat the indices and values tensors // rather than merging them. This removes the need to synchronously fetch nnz // at the end of the operation, at the cost of having a non-coalesced result. // This trade-off is preferable for the common use-case of gradient accumulation. THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src); THCTensor *s_values_ = THCSTensor_(newValues)(state, src); if (value != ScalarConvert<int, real>::to(1)) { THCTensor *s_values_orig = s_values_; s_values_ = THCTensor_(new)(state); THCTensor_(mul)(state, s_values_, s_values_orig, value); THCTensor_(free)(state, s_values_orig); } THCIndexTensor *r_indices_ = THCIndexTensor_(new)(state); THCTensor *r_values_ = THCTensor_(new)(state); THCIndexTensor_(cat)(state, r_indices_, t_indices_, s_indices_, 1); THCTensor_(cat)(state, r_values_, t_values_, s_values_, 0); THCSTensor_(resizeAs)(state, r_, src); THCSTensor_(_move)(state, r_, r_indices_, r_values_); // FIXME: add some heuristic about when to call coalesce() here, so that // tensors don't totally blow up in size by concatenation; e.g. // r->minUnique = max(a->minUnique + b->minUnique); // if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) { // THCSTensor_(contiguous)(r); // r->minUnique = r->nnz; // } THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCIndexTensor_(free)(state, s_indices_); THCTensor_(free)(state, s_values_); } void THCSTensor_(csub)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) { THCSTensor_(cadd)(state, r_, t, ScalarNegate<real>::to(value), src); } void THCSTensor_(cmul)(THCState *state, THCSTensor *r_, THCSTensor *t_, THCSTensor *src_) { THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t_, src_)); if(!THCSTensor_(isSameSizeAs)(state, t_, src_)) { THError("cmul operands have incompatible sizes or dimension types"); } THCSTensor *t = THCSTensor_(newCoalesce)(state, t_); THCSTensor *src = THCSTensor_(newCoalesce)(state, src_); if (t->nnz == 0 || src->nnz == 0) { THCSTensor_(zero)(state, r_); return; } // saving those because they can be overwritten when doing in-place operations ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz; ptrdiff_t max_nnz = t_nnz < s_nnz ? t_nnz : s_nnz; int64_t nDimI = src->nDimensionI; THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor *s_indices_ = THCSTensor_(newIndices)(state, src); THCTensor *s_values_ = THCSTensor_(newValues)(state, src); THCIndexTensor *r_indices_ = THCIndexTensor_(newWithSize2d)(state, nDimI, max_nnz); THCTensor *r_values_ = THCSTensor_(newValuesWithSizeOf)(state, s_values_, max_nnz); THCTensor_(zero)(state, r_values_); THCSTensor_(resizeAs)(state, r_, src); THCSTensor_(_move)(state, r_, r_indices_, r_values_); int64_t valueSize = t_values_->stride[0]; const dim3 block = dim3(min((int64_t) getApplyBlock().x, valueSize)); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, valueSize, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCSTensor_valueSparseIntersectionKernel<TensorMulOp<real>, uint64_t, real> <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( TensorMulOp<real>(), I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_), (uint64_t)t_nnz, (uint64_t)s_nnz); THCudaCheck(cudaGetLastError()); THCudaLongStorage *resultNnz = THCudaLongStorage_newWithSize(state, 1); THCSTensor_indexSparseIntersectionKernel<uint64_t, real> <<<1, 1, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), (uint64_t)t_nnz, (uint64_t)s_nnz, (uint64_t*)THCudaLongStorage_data(state, resultNnz)); THCudaCheck(cudaGetLastError()); r_->nnz = THCudaLongStorage_get(state, resultNnz, 0); THCudaLongStorage_free(state, resultNnz); r_->coalesced = 1; THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCIndexTensor_(free)(state, s_indices_); THCTensor_(free)(state, s_values_); THCSTensor_(free)(state, t); THCSTensor_(free)(state, src); } void THCSTensor_(pow)(THCState *state, THCSTensor *r_, THCSTensor *t_, real value) { if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(0))) { THError("cannot raise to zeroth power on sparse tensor"); } THCSTensor *t = THCSTensor_(newCoalesce)(state, t_); THCSTensor_(resizeAs)(state, r_, t); THCIndexTensor *r_indices_ = THCSTensor_(newIndices)(state, r_); THCTensor *r_values_ = THCSTensor_(newValues)(state, r_); THCIndexTensor *t_indices_ = THCSTensor_(newIndices)(state, t); THCTensor *t_values_ = THCSTensor_(newValues)(state, t); THCIndexTensor_(resizeAs)(state, r_indices_, t_indices_); THCIndexTensor_(copy)(state, r_indices_, t_indices_); THCTensor_(pow)(state, r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THCIndexTensor_(free)(state, r_indices_); THCTensor_(free)(state, r_values_); THCIndexTensor_(free)(state, t_indices_); THCTensor_(free)(state, t_values_); THCSTensor_(free)(state, t); } #if defined(THCS_REAL_IS_FLOAT) || defined(THCS_REAL_IS_DOUBLE) || defined(THCS_REAL_IS_HALF) accreal THCSTensor_(normall)(THCState *state, THCSTensor *self, real value) { THCSTensor* self_coalesced = THCSTensor_(newCoalesce)(state, self); accreal result = THCTensor_(normall)(state, self_coalesced->values, value); THCSTensor_(free)(state, self_coalesced); return result; } #endif #undef ROW_PTR2 #undef COL_PTR2 #endif
724d5198df134f6072cd3b1db8128179d3a95809.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <stdio.h> __global__ void add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } void FillWithData(int n, float* x, float* y) { for (int i = 0; i < n; i++) { x[i] = 1.0f; y[i] = 2.0f; } } int main(void) { int N = 1<<20; float *x, *y; float *d_x, *d_y; int size = N * sizeof(float); x = (float*) malloc(size); y = (float*) malloc(size); FillWithData(N, x, y); hipMalloc(&d_x, size); hipMalloc(&d_y, size); hipMemcpy(d_x, x, size, hipMemcpyHostToDevice); hipMemcpy(d_y, y, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(2), dim3(256), 0, 0, N, d_x, d_y); hipMemcpy(x, d_x, size, hipMemcpyDeviceToHost); hipMemcpy(y, d_y, size, hipMemcpyDeviceToHost); int i = 0; int sample_rate = N / 10; for (i = 0; i < N; i=i+sample_rate) { printf("Value y[%d] = %f\n" , i, y[i]); } // Free memory free(x); free(y); hipFree(d_x); hipFree(d_y); return 0; }
724d5198df134f6072cd3b1db8128179d3a95809.cu
#include <iostream> #include <math.h> #include <stdio.h> __global__ void add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } void FillWithData(int n, float* x, float* y) { for (int i = 0; i < n; i++) { x[i] = 1.0f; y[i] = 2.0f; } } int main(void) { int N = 1<<20; float *x, *y; float *d_x, *d_y; int size = N * sizeof(float); x = (float*) malloc(size); y = (float*) malloc(size); FillWithData(N, x, y); cudaMalloc(&d_x, size); cudaMalloc(&d_y, size); cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice); add<<<2, 256>>>(N, d_x, d_y); cudaMemcpy(x, d_x, size, cudaMemcpyDeviceToHost); cudaMemcpy(y, d_y, size, cudaMemcpyDeviceToHost); int i = 0; int sample_rate = N / 10; for (i = 0; i < N; i=i+sample_rate) { printf("Value y[%d] = %f\n" , i, y[i]); } // Free memory free(x); free(y); cudaFree(d_x); cudaFree(d_y); return 0; }
0336d18317d88ddc358653b6653988646c449e56.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "TRCudaV2.cuh" #include "EigenUtility.h" TRCudaV2::TRCudaV2() { } TRCudaV2::~TRCudaV2() { // SaveDelete(VolumeData); SaveDelete(PointType); SaveDelete(PointType_1D); } ////////////////////////////////////////////////////////////////////////// // GPU ////////////////////////////////////////////////////////////////////////// __device__ static float Z1Function(float x1) { // Function XD // https://i.imgur.com/QS3bczf.png return -126.4517 + 0.4005123 * x1 - 0.000011981 * pow(x1 - 2122.41, 2) - 0.000000011664 * pow(x1 - 2122.41, 3) + 0.000000000001432 * pow(x1 - 2122.41, 4) - 0.0000000000000008164 * pow(x1 - 2122.41, 5) + 5.939E-20 * pow(x1 - 2122.41, 6); } __global__ static void RawDataToOriginalData(char* FileRawData, int* OCTRawData, int OCTDataSize) { // 1 Byte 2 Bytes int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // if (id >= OCTDataSize) { printf(" Raw Data Error!\n"); return; } OCTRawData[id] = (int)((uchar)FileRawData[id * 2]) + (int)((uchar)FileRawData[id * 2 + 1]) * 256; } __global__ static void CombineTwoChannels_Single(int* OCTData_2Channls, int* OCTData, int SizeX, int SizeY, int SizeZ) { // Denoise Channel int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // if (id >= SizeX * SizeY * SizeZ) { printf("Combine Two Channel Error!\n"); return; } int BoxSize = SizeX * SizeZ; // int BoxIndex = id / BoxSize; int BoxLeft = id % BoxSize; OCTData[id] = (OCTData_2Channls[BoxIndex * 2 * BoxSize + BoxLeft] + OCTData_2Channls[(BoxIndex * 2 + 1) * BoxSize + BoxLeft]) / 2; } __global__ static void CombineTwoChannels_Multi(int* OCTData_2Channls, int* OCTData, int SizeX, int SizeY, int SizeZ) { // Denoise Channel int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // if (id >= SizeX * SizeY * SizeZ) { printf("Combine Two Channel Error!\n"); return; } int BoxSize = SizeX * SizeZ * 2; // Channel + int BoxIndex = id / BoxSize; int BoxLeft = id % BoxSize; OCTData[id] = (OCTData_2Channls[BoxIndex * 2 * BoxSize + BoxLeft] + OCTData_2Channls[(BoxIndex * 2 + 1) * BoxSize + BoxLeft]) / 2; } __global__ static void ReverseBackScanData(int* OCTData, int SizeX, int SizeY, int SizeZ) { // int id = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9 blockIdx.x * gridDim.z * blockDim.x + // X => X * (125 * 2) * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; int changeID = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9 (gridDim.y * 2 - blockIdx.x - 1) * gridDim.z * blockDim.x + // X => (250 - X - 1) * (125 * 2) * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; int value = OCTData[id]; OCTData[id] = OCTData[changeID]; OCTData[changeID] = value; } __global__ static void GetMatrixA(int* OCTData, float* MatrixA, int NumPolynomial, int OneDataSize) { // Function MatrixA int id = blockIdx.x * blockDim.x + threadIdx.x; // () if (id >= (NumPolynomial + 1) * (NumPolynomial + 1)) { printf(" Fitting !\n"); return; } // Index int rowIndex = id % (NumPolynomial + 1); int colsIndex = id / (NumPolynomial + 1); // float value = 0; for (int i = 0; i < OneDataSize; i++) { // float FirstValue = (float)i / OneDataSize; float SecondValue = (float)i / OneDataSize; value += pow(FirstValue, NumPolynomial - rowIndex) * pow(SecondValue, NumPolynomial - colsIndex); } MatrixA[id] = value; } __global__ static void GetMatrixB(int* OCTData, float* MatrixB, float YAverage, int NumPolynomial, int OneDataSize) { int id = blockIdx.x * blockDim.x + threadIdx.x; // Index int rowIndex = id % (NumPolynomial + 1); int colsIndex = id / (NumPolynomial + 1); // float value = 0; for (int i = 0; i < OneDataSize; i++) { // float FirstValue = (float)i / OneDataSize; float SecondValue = OCTData[i] - YAverage; value += pow(FirstValue, NumPolynomial - rowIndex) * SecondValue; } MatrixB[id] = value; } __global__ static void MinusByFittingFunction(int* OCTData, float* PolyValue, int SizeZ) { // Fitting Data int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // Z int idZ = id % SizeZ; // OCTData[id] -= PolyValue[idZ]; } __global__ static void ComputePXScale(float* PXScale, int OffsetBegin, int ShiftValue, int Steps, int Size) { // PXScale Array(@@) int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= Size) { printf("ComputePXScale !\n"); return; } // int idOffset = OffsetBegin + ShiftValue; PXScale[id] = (Z1Function(idOffset + id) - Z1Function(idOffset)) * Steps; } __global__ static void FrequencyAdjust(int* OCTData, float* KSpaceData, float* PXScale, int* IndexArray, int CutIndex, int SizeX, int SizeY, int SizeZ) { // Denoise Channel int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= SizeX * SizeY * SizeZ) { printf("Frequency "); return; } // Index int idZ = id % SizeZ; if (IndexArray[idZ] == -1 || idZ >= CutIndex || idZ == 0) { KSpaceData[id] = 0; return; } // int LastPXScaleIndex = (IndexArray[idZ] - 1 <= 0 ? 0 : IndexArray[idZ] - 1); double m = (double)(OCTData[id] - OCTData[id - 1]) / (PXScale[IndexArray[idZ]] - PXScale[LastPXScaleIndex]); double c = OCTData[id] - m * PXScale[IndexArray[idZ]]; KSpaceData[id] = m * idZ + c; } __global__ static void DataToComplexData(float* KSpaceData, hipfftComplex* FFTData, int OCTDataSize) { // KSpace Data FFT int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= OCTDataSize) { printf(" Complex Data !!\n"); return; } // Complex Data FFTData[id].x = KSpaceData[id]; FFTData[id].y = 0; } __global__ static void ComplexDataToData(hipfftComplex* FFTData, float* OCTFloatData, int SizeX, int SizeY, int SizeZ, int OCTDataSize) { // FFT int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (1 * 1024) blockIdx.z * blockDim.x + // Z => (0 * 1024 + Z2) threadIdx.x; if (id >= OCTDataSize / 2) { printf("Complex To Data !!\n"); return; } // 0 ( 1024) Youtube (!!) // 2 // () // ( Size / 2 - 1 => 1023) 1022 /*int idZ = id % (SizeZ / 2); idZ = SizeZ / 2 - idZ - 1; if (idZ == SizeZ / 2 - 1) idZ--;*/ int idZ = id % (SizeZ / 2); if (idZ == 0) idZ++; // int tempIndex = id / (SizeZ / 2); int idX = tempIndex % SizeX; int idY = tempIndex / SizeX; int NewIndex = idY * SizeX * SizeZ + idX * SizeZ + idZ; float temp = sqrt(FFTData[NewIndex].x * FFTData[NewIndex].x + FFTData[NewIndex].y * FFTData[NewIndex].y); // if (temp == 0) OCTFloatData[id] = 0; else OCTFloatData[id] = log10f(temp) * 10; } __global__ static void ShiftFinalData(float* AfterFFTData, float* ShiftData, int SizeX, int SizeY, int FinalSizeZ, int FinalDataSize) { // // // => | -> // ("->" "=>" ) int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= FinalDataSize) { printf("Shift Data !!\n"); return; } // int idZ = id % FinalSizeZ; int tempIndex = id / FinalSizeZ; int idX = tempIndex % SizeX; int idY = tempIndex / SizeX; // SizeY // (0 ~ 124 125 ~ 249) // // (125 ~ 249 0 ~ 124) idY = (idY + SizeY / 2) % SizeY; int NewIndex = idY * SizeX * FinalSizeZ + idX * FinalSizeZ + idZ; ShiftData[id] = AfterFFTData[NewIndex]; //ShiftData[id] = AfterFFTData[id]; } __global__ static void NormalizeData(float* ShiftData, float MaxValue, float MinValue, int FinalDataSize) { // Normalize int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // if (id >= FinalDataSize) { printf("Normaliza Data \n"); return; } if (ShiftData[id] < MinValue) ShiftData[id] = 0; else if (ShiftData[id] > MaxValue) ShiftData[id] = 1; else ShiftData[id] = (ShiftData[id] - MinValue) / (MaxValue - MinValue); } // & (Smooth Data) __device__ static float SmoothDataByIndex(float* VolumeData, int id, int FinalSizeZ, int SmoothSizeRange) { int idZ = id % FinalSizeZ; int SmoothRadius = (SmoothSizeRange - 1) / 2; // Smooth int MinValue = min(SmoothRadius, idZ - 0); int MaxValue = min(SmoothRadius, FinalSizeZ - idZ - 1); float TempTotal = 0; // for (int i = -MinValue; i <= MaxValue; i++) TempTotal += VolumeData[id + i]; TempTotal /= (MaxValue + MinValue + 1); return TempTotal; } __global__ static void TransformToImageAndBorderData(float* VolumeData_Normalized, float* SmoothData, uchar* ImageArray, int SizeX, int SizeY, int FinalSizeZ, int SmoothSizeRange) { // int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * 1 * 1024 blockIdx.x * gridDim.z * blockDim.x + // X => X * 1 * 1024 blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= SizeX * SizeY * FinalSizeZ) // return; // Border Detect SmoothData[id] = SmoothDataByIndex(VolumeData_Normalized, id, FinalSizeZ, SmoothSizeRange); // 1.3 float data = VolumeData_Normalized[id] * 255 * 1.3f; if (data >= 255) ImageArray[id] = 255; else if (data <= 0) ImageArray[id] = 0; else ImageArray[id] = (uchar)data; } // __global__ static void ZCalcBrightness(float* DataArray, float* BrightArray, int size, int rows, int cols, int startIndex) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // return; // Index int sizeIndex = id / rows; int rowIndex = id % rows; BrightArray[id] = 0; for (int i = startIndex; i < cols; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; BrightArray[id] += DataArray[currentID]; } } __global__ static void findMaxAndMinPeak(float* DataArray, float* BrightnessArray, uchar* PointType, int size, int rows, int cols, float MaxPeakThreshold, float SatPeakThreshold) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= rows * cols * size) // return; // width 1 ~ (width - 1) int colID = id % cols; if (1 >= colID || colID == (cols - 1)) return; // int tempIndex = id / cols; if (BrightnessArray[tempIndex] > SatPeakThreshold) return; // // ( ) float DiffLeft = DataArray[id] - DataArray[id - 1]; float DiffRight = DataArray[id] - DataArray[id + 1]; if (DiffLeft > 0 && DiffRight > 0 && DataArray[id] > MaxPeakThreshold) PointType[id] = 1; else if (DiffLeft < 0 && DiffRight < 0) PointType[id] = 2; } __global__ static void ParseMaxMinPeak(uchar* PointType, int size, int rows, int cols, int startIndex) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // return; // Index int sizeIndex = id / rows; int rowIndex = id % rows; // Skip Min int lastMinID = -1; bool FindMax = false; // () int Useful_Start = -1; int Useful_End = -1; int Useful_PeakCount = -1, tempPeakCount = 0; // min Peak for (int i = 0; i < startIndex; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; PointType[currentID] = 0; } for (int i = startIndex; i < cols; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; if (lastMinID == -1) // or Max { // Min if (PointType[currentID] == 2) lastMinID = i; else if (PointType[currentID] == 1) PointType[currentID] = 0; // } else { // min min if (PointType[currentID] == 1) { // Max FindMax = true; tempPeakCount++; } else if (FindMax && PointType[currentID] == 2) { // Max Min if (Useful_PeakCount < tempPeakCount) { Useful_PeakCount = tempPeakCount; Useful_Start = lastMinID; Useful_End = i; } FindMax = false; tempPeakCount = 0; lastMinID = -1; } else if (!FindMax && PointType[currentID] == 2) { // Max Min PointType[sizeIndex * rows * cols + rowIndex * cols + lastMinID] = 0; lastMinID = i; } } } // Min if (lastMinID != -1) PointType[sizeIndex * rows * cols + rowIndex * cols + lastMinID] = 0; } __device__ static void InsertBestNChoice(float* CandidateGap, int* PointType_BestN, int offsetIndex, int bestNoffsetIndex, int CurrentIndex, int ChooseBestN) { bool IsInsert = false; for (int i = 0; i < ChooseBestN && !IsInsert; i++) { // 0 if (PointType_BestN[bestNoffsetIndex + i] > 0) { // int preIndex = PointType_BestN[bestNoffsetIndex + i]; if (CandidateGap[offsetIndex + preIndex] >= CandidateGap[offsetIndex + CurrentIndex]) // continue; else if (CandidateGap[offsetIndex + preIndex] < CandidateGap[offsetIndex + CurrentIndex]) // { for (int j = ChooseBestN - 1; j > i; j--) PointType_BestN[bestNoffsetIndex + j] = PointType_BestN[bestNoffsetIndex + j - 1]; PointType_BestN[bestNoffsetIndex + i] = CurrentIndex; IsInsert = true; } } else { PointType_BestN[bestNoffsetIndex + i] = CurrentIndex; break; } } } __global__ static void PickBestChoiceToArray(float* DataArray, uchar* PointType, float* CandidateGap, int* PointType_BestN, int size, int rows, int cols, int ChooseBestN, int startIndex, float Threshold) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // return; // Index int sizeIndex = id / rows; int rowIndex = id % rows; bool IsFindMin = false; // float MinData; int offsetIndex = sizeIndex * rows * cols + rowIndex * cols; int bestNoffsetIndex = sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN; float lastData = -1; for (int i = startIndex; i < cols; i++) { // if (PointType[i + offsetIndex] == 2) { // if (IsFindMin) lastData = -1; IsFindMin = true; MinData = DataArray[i + offsetIndex]; } else if ( IsFindMin && // PointType[i + offsetIndex] == 1 && DataArray[i + offsetIndex] - MinData > Threshold // Threshold ) { lastData = DataArray[i + offsetIndex] - MinData; // PointType_BestN CandidateGap[offsetIndex + i] = lastData; InsertBestNChoice(CandidateGap, PointType_BestN, offsetIndex, bestNoffsetIndex, i, ChooseBestN); } } // 0 for (int i = 0; i < ChooseBestN; i++) if (PointType_BestN[bestNoffsetIndex + i] == 0) PointType_BestN[bestNoffsetIndex + i] = -1; } __global__ static void CalcNeighbor(int* PointType_BestN, float* NeighborCountArray, int size, int rows, int cols, int ChooseBestN, int Radius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // return; // Index int sizeIndex = id / rows; int rowIndex = id % rows; // index int chooseIndex = sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN; for (int i = 0; i < ChooseBestN; i++) { // int totalPixelCount = 0; float avgPixel = 0; int BestN = PointType_BestN[chooseIndex + i]; if (BestN == -1) { NeighborCountArray[chooseIndex + i] == 0; continue; } // for (int y = -Radius; y <= Radius; y++) for (int x = -Radius; x <= Radius; x++) for (int n = 0; n < ChooseBestN; n++) { int currentSizeIndex = sizeIndex + y; int currentRowIndex = rowIndex + x; if (0 <= currentSizeIndex && currentSizeIndex < size && 0 <= currentRowIndex && currentRowIndex < rows) { totalPixelCount++; int CurrentBestNIndex = currentSizeIndex * rows * ChooseBestN + currentRowIndex * ChooseBestN + n; int CurrentBestN = PointType_BestN[CurrentBestNIndex]; // Return if (CurrentBestN == -1) continue; if (abs(CurrentBestN - BestN) <= Radius) avgPixel++; } } // NeighborCountArray[chooseIndex + i] = avgPixel / totalPixelCount; } // int maxIndex = (thrust::max_element(thrust::device, NeighborCountArray + chooseIndex, NeighborCountArray + chooseIndex + ChooseBestN) - (NeighborCountArray + chooseIndex)); PointType_BestN[chooseIndex] = PointType_BestN[chooseIndex + maxIndex]; for (int i = 1; i < ChooseBestN; i++) PointType_BestN[i] = -1; } __global__ static void ConnectPointsStatus(int* PointType_BestN, int* ConnectStatus, int size, int rows, int ChooseBestN, int ConnectRadius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows * ChooseBestN) // return; // Index int sizeIndex = id / (rows * ChooseBestN); int tempID = id % (rows * ChooseBestN); int rowIndex = tempID / ChooseBestN; int chooseIndex = tempID % ChooseBestN; // if (PointType_BestN[sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN + chooseIndex] == -1) return; // int finalPos = min(rowIndex + ConnectRadius, rows); // for (int i = rowIndex + 1; i < finalPos; i++) { for (int j = 0; j < ChooseBestN; j++) { // ( i row ) // 1 if (PointType_BestN[sizeIndex * rows * ChooseBestN + i * ChooseBestN + j] != -1) { // // int diffX = PointType_BestN[sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN + chooseIndex] - PointType_BestN[sizeIndex * rows * ChooseBestN + i * ChooseBestN + j]; int diffY = i - rowIndex; int Radius = diffX * diffX + diffY * diffY; // 0 if (Radius < ConnectRadius * ConnectRadius) { // + Row + Top N + + Top N int index = sizeIndex * rows * ChooseBestN * ConnectRadius * ChooseBestN + // rowIndex * ChooseBestN * ConnectRadius * ChooseBestN + // Row chooseIndex * ConnectRadius * ChooseBestN + // Top N (i - rowIndex) * ChooseBestN + // j; ConnectStatus[index] = Radius; } } } } } // Multi TopView __global__ static void GetOtherSideView(float* Data, float* OtherSideData, int SizeX, int SizeY, int FinalSizeZ) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= SizeX * SizeY) { printf("!!\n"); return; } // id int idX = id / SizeY; int idY = id % SizeY; int DataOffsetIndex = idX * SizeY * FinalSizeZ + idY * FinalSizeZ; // SizeZ float totalZ = 0; for (int i = 0; i < FinalSizeZ; i++) totalZ += Data[DataOffsetIndex + i]; // // rows => (SizeY) // cols => SizeX int offsetIndex = idY * SizeX + idX; OtherSideData[offsetIndex] = totalZ; } __global__ static void TransformOtherSideDataToImage(float* OtherSideData, uchar* UintOtherSideData, float Mean, float FixMean, int SizeX, int SizeY) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= SizeX * SizeY) // return; // Mean float ScaleFactor = FixMean / Mean / 255; float data = OtherSideData[id] * 255 * ScaleFactor; if (data >= 255) UintOtherSideData[id] = 255; else if (data <= 0) UintOtherSideData[id] = 0; else UintOtherSideData[id] = (uchar)data; } ////////////////////////////////////////////////////////////////////////// // CPU ////////////////////////////////////////////////////////////////////////// // Function void TRCudaV2::SingleRawDataToPointCloud(char* FileRawData, int DataSize, int SizeX, int SizeZ, long ShiftValue, double K_Step, int CutValue) { // #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock(); #endif ////////////////////////////////////////////////////////////////////////// // // 1. GPU Data // 2. ( 2 Bytes QT GPU 2 Bytes) // Channels2 ( 2) // 3. 5 Fit // 4. Space K Space // 5. cuFFT // () // 7. Normalize // 8. // 9. // 10. GPU Data // // // 1. Function => X Y // 2. ShiftValue => TRIGGER DELAY(FIBER) // 3. K_Step => (14.mm 2.5k step2)(k stepz1~2.5) // 4. CutValue => OCTzlaser ( cuteValue XD) // 5. 2 Channel 2048 x 250 x 2 x 2 x 2 // () x () x (()) x Channel x 2 Byte ////////////////////////////////////////////////////////////////////////// #pragma region 1. GPU Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME clock_t time = clock(); #endif // GPU Data char* GPU_FileRawData; // => Raw Data int *GPU_OCTRawData_2Channel; // => OCT Raw Data (2Channels"" Channel ) int *GPU_OCTRawData; // => Denoise Data ( CH1 + CH2 ) ("" Channel) float *GPU_OCTFloatData; // => K Space FFT // !! () 2 DataSize /= 2; // 2 Channels bool UseTwoChannels = (DataSize / SizeX / SizeZ == 4); // 2 Byte & 2 Channles // hipMalloc(&GPU_FileRawData, sizeof(char) * DataSize); // Copy () hipMemcpy(GPU_FileRawData, FileRawData, sizeof(char) * DataSize / 2, hipMemcpyHostToDevice); hipMemcpy(GPU_FileRawData + DataSize / 2, FileRawData + DataSize, sizeof(char) * DataSize / 2, hipMemcpyHostToDevice); CheckCudaError(); // 2 Chanels int OCTDataSize = SizeX * SizeZ; if (UseTwoChannels) hipMalloc(&GPU_OCTRawData_2Channel, sizeof(int) * OCTDataSize * 2); hipMalloc(&GPU_OCTRawData, sizeof(int) * OCTDataSize); hipMalloc(&GPU_OCTFloatData, sizeof(float) * OCTDataSize); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "1. GPU: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 2. ////////////////////////////////////////////////////////////////////////// // // ( 0 ~ 250) // // channel ////////////////////////////////////////////////////////////////////////// // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 2 Byte if (UseTwoChannels) { RawDataToOriginalData << < dim3(SizeX, 1, SizeZ / NumThreads * 2), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData_2Channel, DataSize / 2); CheckCudaError(); // Channel Denoise CombineTwoChannels_Single << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData_2Channel, GPU_OCTRawData, SizeX, 1, SizeZ); // hipFree(GPU_OCTRawData_2Channel); } else RawDataToOriginalData << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData, DataSize / 2); CheckCudaError(); // FileRaw Data hipFree(GPU_FileRawData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "2. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 3. Fitting // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // Matrix float* GPU_MatrixA; float* GPU_MatrixB; hipMalloc(&GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1)); hipMalloc(&GPU_MatrixB, sizeof(float) * (NumPolynomial + 1)); // int* FirstSizeZData = new int[SizeZ]; memset(FirstSizeZData, 0, sizeof(int) * SizeZ); hipMemcpy(FirstSizeZData, GPU_OCTRawData, sizeof(int) * SizeZ, hipMemcpyDeviceToHost); hipDeviceSynchronize(); float average = accumulate(FirstSizeZData, FirstSizeZData + SizeZ, 0.0f) / SizeZ; delete[] FirstSizeZData; // Matrix GetMatrixA << <1, (NumPolynomial + 1) * (NumPolynomial + 1) >> > (GPU_OCTRawData, GPU_MatrixA, NumPolynomial, SizeZ); GetMatrixB << <1, NumPolynomial + 1 >> > (GPU_OCTRawData, GPU_MatrixB, average, NumPolynomial, SizeZ); CheckCudaError(); float* MatrixA = new float[(NumPolynomial + 1) *(NumPolynomial + 1)]; float* MatrixB = new float[(NumPolynomial + 1)]; hipMemcpy(MatrixA, GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1), hipMemcpyDeviceToHost); hipMemcpy(MatrixB, GPU_MatrixB, sizeof(float) * (NumPolynomial + 1), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Eigen Fitting Function EigenUtility eigen; eigen.SetAverageValue(average); eigen.SolveByEigen(MatrixA, MatrixB, NumPolynomial); // Function float* GPU_PolyValue; float* PolyValue = eigen.GetFunctionArray(SizeZ, average); hipMalloc(&GPU_PolyValue, sizeof(float) * SizeZ); hipMemcpy(GPU_PolyValue, PolyValue, sizeof(float) * SizeZ, hipMemcpyHostToDevice); MinusByFittingFunction << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_PolyValue, SizeZ); CheckCudaError(); // hipFree(GPU_MatrixA); hipFree(GPU_MatrixB); hipFree(GPU_PolyValue); delete[] MatrixA; delete[] MatrixB; delete[] PolyValue; // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "3. Fitting : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 4. Space K Space // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // float* PX_Scale = new float[SizeZ]; int* KSpaceIndexArray = new int[SizeZ]; float* GPU_PXScale; int* GPU_KSpaceIndexArray; hipMalloc(&GPU_PXScale, sizeof(float) * SizeZ); hipMalloc(&GPU_KSpaceIndexArray, sizeof(int) * SizeZ); // int OffsetBegin = 800; // PXScale Array ComputePXScale << <SizeZ / NumThreads, NumThreads >> > (GPU_PXScale, OffsetBegin, ShiftValue, K_Step, SizeZ); CheckCudaError(); // K Space Index ( GPU thread thread CPU ) hipMemcpy(PX_Scale, GPU_PXScale, sizeof(float) * SizeZ, hipMemcpyDeviceToHost); // K Space Array int index = 1; int KSpaceOffset = PX_Scale[SizeZ - 1]; for (int i = 0; i <= KSpaceOffset; i++) { while (i >= PX_Scale[index]) { index++; } KSpaceIndexArray[i] = index; } for (int i = KSpaceOffset + 1; i < SizeZ; i++) KSpaceIndexArray[i] = -1; // K Space KSpaceIndexArray Index Index hipMemcpy(GPU_KSpaceIndexArray, KSpaceIndexArray, sizeof(int) * SizeZ, hipMemcpyHostToDevice); FrequencyAdjust << <dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_OCTFloatData, GPU_PXScale, GPU_KSpaceIndexArray, KSpaceOffset - CutValue, SizeX, 1, SizeZ); CheckCudaError(); // hipFree(GPU_PXScale); hipFree(GPU_KSpaceIndexArray); hipFree(GPU_OCTRawData); delete[] KSpaceIndexArray; // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "4. Space K Space : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 5. cuFFT // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif hipfftHandle PlanHandle; hipfftComplex* GPU_ComplexData; // FFT Handle & C2C hipfftComplex int NX = SizeZ; int BatchSize = SizeX; hipfftPlan1d(&PlanHandle, NX, HIPFFT_C2C, BatchSize); hipMalloc(&GPU_ComplexData, sizeof(hipfftComplex) * NX * BatchSize); CheckCudaError(); // Complex Data //gpuDataToComplex << <512, 4 >> > (GPU_OCTFloatData, GPU_ComplexData, NX * BatchSize, 0); DataToComplexData << <dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTFloatData, GPU_ComplexData, OCTDataSize); CheckCudaError(); // cuFFT(CUDA Fast Fourier Transform) hipfftExecC2C(PlanHandle, GPU_ComplexData, GPU_ComplexData, HIPFFT_FORWARD); CheckCudaError(); // (FFT) & // https://www.youtube.com/watch?v=spUNpyF58BY //gpuComplexToData << <512, 4 >> > (GPU_ComplexData, GPU_OCTFloatData, NX * BatchSize / 2, SizeZ, 0); ComplexDataToData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ComplexData, GPU_OCTFloatData, SizeX, 1, SizeZ, OCTDataSize); CheckCudaError(); // hipfftDestroy(PlanHandle); hipFree(GPU_ComplexData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "5. cuFFT: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 7. Normalize Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // float *GPU_BrightnessArray; hipMalloc(&GPU_BrightnessArray, sizeof(float) * SizeX); ZCalcBrightness << <1, SizeX >> > (GPU_OCTFloatData, GPU_BrightnessArray, 1, SizeX, SizeZ / 2, StartIndex); CheckCudaError(); // float MaxValue = 0; float *GPU_MaxElement = thrust::max_element(thrust::device, GPU_OCTFloatData, GPU_OCTFloatData + OCTDataSize / 2); hipMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), hipMemcpyDeviceToHost); CheckCudaError(); // ( GPU Normalize) // // TL // // // BR float MinValue = 0; for (int i = MinValuePixel_TL; i <= MinValuePixel_BR; i++) { // [first, last) int beginIndex = i * SizeZ / 2 + i; int endIndex = i * SizeZ / 2 + MinValuePixel_BR + 1; MinValue += thrust::reduce(thrust::device, GPU_OCTFloatData + beginIndex, GPU_OCTFloatData + endIndex); } MinValue /= (MinValuePixel_BR - MinValuePixel_TL + 1) * (MinValuePixel_BR - MinValuePixel_TL + 1); MinValue *= MinValueScalar; // Normaliza Data (Max - Min) 0 // ( array Min & Max ()) assert(MaxValue != MinValue && "FFT!!"); NormalizeData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, MaxValue, MinValue, OCTDataSize / 2); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "7. Normalize Data: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 8. // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // uchar *GPU_UintDataArray; float* GPU_OCTSmoothData; hipMalloc(&GPU_UintDataArray, sizeof(uchar) * SizeX * 1 * SizeZ); hipMalloc(&GPU_OCTSmoothData, sizeof(float) * SizeX * 1 * SizeZ); CheckCudaError(); // TransformToImageAndBorderData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, GPU_OCTSmoothData, GPU_UintDataArray, SizeX, 1, SizeZ / 2, SmoothSizeRange); CheckCudaError(); // size = 1; rows = SizeX; cols = SizeZ / 2; // hipFree(GPU_OCTFloatData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "8. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 9. // // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif #pragma region Init SaveDelete(PointType); PointType = new uchar[size * rows * cols]; memset(PointType, 0, sizeof(uchar) * size * rows * cols); SaveDelete(PointType_1D); PointType_1D = new int[size * rows]; memset(PointType_1D, 0, sizeof(int) * size * rows); // uchar* GPU_PointType; hipMalloc(&GPU_PointType, sizeof(uchar) * size * rows * cols); hipMemset(GPU_PointType, 0, sizeof(uchar) * size * rows * cols); #pragma endregion #pragma region assert(rows <= NumThreads && "rows 1024 "); // & findMaxAndMinPeak << < size * rows * cols / NumThreads, NumThreads >> > (GPU_OCTSmoothData, GPU_BrightnessArray, GPU_PointType, size, rows, cols, MaxPeakThreshold, SatPeakThreshold); CheckCudaError(); // Parse ParseMaxMinPeak << < size, rows >> > (GPU_PointType, size, rows, cols, StartIndex); CheckCudaError(); // int *GPU_PointType_BestN, *PointType_BestN; hipMalloc(&GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN); //PickBestChoiceToArray << < size, rows >> > (GPU_OCTSmoothData, GPU_PointType, GPU_PointType_BestN, size, rows, cols, ChooseBestN, StartIndex, GoThroughThreshold); //CheckCudaError(); // // => * 250(rows) * (ChooseBestN) * (Raidus) * N (ChooseBestN) int *GPU_Connect_Status; int ConnectStateSize = size * rows * ChooseBestN * ConnectRadius * ChooseBestN; hipMalloc(&GPU_Connect_Status, sizeof(int) * ConnectStateSize); hipMemset(GPU_Connect_Status, 0, sizeof(int) * ConnectStateSize); ConnectPointsStatus << < size * ChooseBestN , rows >> > (GPU_PointType_BestN, GPU_Connect_Status, size, rows, ChooseBestN, ConnectRadius); CheckCudaError(); // CPU int *Connect_Status = new int[ConnectStateSize]; PointType_BestN = new int[size * rows * ChooseBestN]; hipMemcpy(PointType, GPU_PointType, sizeof(uchar) * size * rows * cols, hipMemcpyDeviceToHost); hipMemcpy(Connect_Status, GPU_Connect_Status, sizeof(int) * ConnectStateSize, hipMemcpyDeviceToHost); hipMemcpy(PointType_BestN, GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN, hipMemcpyDeviceToHost); CheckCudaError(); // GetSurface(PointType_BestN, Connect_Status); #pragma endregion // hipFree(GPU_PointType); hipFree(GPU_PointType_BestN); hipFree(GPU_Connect_Status); hipFree(GPU_OCTSmoothData); hipFree(GPU_BrightnessArray); delete[] Connect_Status; delete[] PointType_BestN; #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "9. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 10. GPU Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // SaveDelete(VolumeData); VolumeData = new uchar[SizeX * 1 * SizeZ]; hipMemcpy(VolumeData, GPU_UintDataArray, sizeof(uchar) * SizeX * 1 * SizeZ / 2, hipMemcpyDeviceToHost); // GPU hipFree(GPU_UintDataArray); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "10. GPU Data : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion // #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock() - totalTime; cout << ": " << ((float)totalTime) / CLOCKS_PER_SEC << " sec" << endl; #endif } void TRCudaV2::MultiRawDataToPointCloud(char* FileRawData, int DataSize, int SizeX, int SizeY, int SizeZ, long ShiftValue, double K_Step, int CutValue) { // #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock(); #endif ////////////////////////////////////////////////////////////////////////// // // 1. GPU Data // 2. ( 2 Bytes QT GPU 2 Bytes) // Channels2 ( 2) // 3. 5 Fit // 4. Space K Space // 5. cuFFT // 6. Data // 6.5 TopView (TopView ) // 7. Normalize // 8. // 9. // 10. GPU Data // // // 1. Function => X Y // 2. ShiftValue => TRIGGER DELAY(FIBER) // 3. K_Step => (14.mm 2.5k step2)(k stepz1~2.5) // 4. CutValue => OCTzlaser ( cuteValue XD) // 5. Function SizeY // 6. TopView ////////////////////////////////////////////////////////////////////////// #pragma region 1. GPU Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME clock_t time = clock(); #endif // GPU Data char* GPU_FileRawData; // => Raw Data int *GPU_OCTRawData_2Channel; // => OCT Raw Data (2Channels"" Channel ) int *GPU_OCTRawData; // => Denoise Data ( CH1 + CH2 ) ("" Channel) float *GPU_OCTFloatData; // => K Space FFT // 2 Channels bool UseTwoChannels = (DataSize / SizeX / SizeY / SizeZ == 4); // 2 Byte & 2 Channles // hipMalloc(&GPU_FileRawData, sizeof(char) * DataSize); hipMemcpy(GPU_FileRawData, FileRawData, sizeof(char) * DataSize, hipMemcpyHostToDevice); CheckCudaError(); // 2 Chanels int OCTDataSize = SizeX * SizeY * SizeZ; if (UseTwoChannels) hipMalloc(&GPU_OCTRawData_2Channel, sizeof(int) * OCTDataSize * 2); hipMalloc(&GPU_OCTRawData, sizeof(int) * OCTDataSize); hipMalloc(&GPU_OCTFloatData, sizeof(float) * OCTDataSize); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "1. GPU: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 2. ////////////////////////////////////////////////////////////////////////// // // ( 0 ~ 250) // // channel ////////////////////////////////////////////////////////////////////////// // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 2 Byte if (UseTwoChannels) { RawDataToOriginalData << < dim3(SizeX, SizeY, SizeZ / NumThreads * 2), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData_2Channel, DataSize / 2); CheckCudaError(); // Channel Denoise CombineTwoChannels_Multi << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData_2Channel, GPU_OCTRawData, SizeX, SizeY, SizeZ); // hipFree(GPU_OCTRawData_2Channel); } else RawDataToOriginalData << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData, DataSize / 2); CheckCudaError(); // Index ReverseBackScanData << < dim3(SizeX / 2, SizeY / 2, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, SizeX, SizeY, SizeZ); // FileRaw Data hipFree(GPU_FileRawData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "2. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 3. Fitting // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // Matrix float* GPU_MatrixA; float* GPU_MatrixB; hipMalloc(&GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1)); hipMalloc(&GPU_MatrixB, sizeof(float) * (NumPolynomial + 1)); // int* FirstSizeZData = new int[SizeZ]; hipMemcpy(FirstSizeZData, GPU_OCTRawData, sizeof(int) * SizeZ, hipMemcpyDeviceToHost); hipDeviceSynchronize(); float average = accumulate(FirstSizeZData, FirstSizeZData + SizeZ, 0.0) / SizeZ; delete[] FirstSizeZData; // Matrix GetMatrixA << <1, (NumPolynomial + 1) * (NumPolynomial + 1) >> > (GPU_OCTRawData, GPU_MatrixA, NumPolynomial, SizeZ); GetMatrixB << <1, NumPolynomial + 1 >> > (GPU_OCTRawData, GPU_MatrixB, average, NumPolynomial, SizeZ); CheckCudaError(); float* MatrixA = new float[(NumPolynomial + 1) *(NumPolynomial + 1)]; float* MatrixB = new float[(NumPolynomial + 1)]; hipMemcpy(MatrixA, GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1), hipMemcpyDeviceToHost); hipMemcpy(MatrixB, GPU_MatrixB, sizeof(float) * (NumPolynomial + 1), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Eigen Fitting Function EigenUtility eigen; eigen.SetAverageValue(average); eigen.SolveByEigen(MatrixA, MatrixB, NumPolynomial); // Function float* GPU_PolyValue; float* PolyValue = eigen.GetFunctionArray(SizeZ, average); hipMalloc(&GPU_PolyValue, sizeof(float) * SizeZ); hipMemcpy(GPU_PolyValue, PolyValue, sizeof(float) * SizeZ, hipMemcpyHostToDevice); MinusByFittingFunction << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_PolyValue, SizeZ); CheckCudaError(); // hipFree(GPU_MatrixA); hipFree(GPU_MatrixB); hipFree(GPU_PolyValue); delete[] MatrixA; delete[] MatrixB; delete[] PolyValue; // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "3. Fitting : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 4. Space K Space // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // float* PX_Scale = new float[SizeZ]; int* KSpaceIndexArray = new int[SizeZ]; float* GPU_PXScale; int* GPU_KSpaceIndexArray; hipMalloc(&GPU_PXScale, sizeof(float) * SizeZ); hipMalloc(&GPU_KSpaceIndexArray, sizeof(int) * SizeZ); // int OffsetBegin = 800; // PXScale Array ComputePXScale << <SizeZ / NumThreads, NumThreads >> > (GPU_PXScale, OffsetBegin, ShiftValue, K_Step, SizeZ); CheckCudaError(); // K Space Index ( GPU thread thread CPU ) hipMemcpy(PX_Scale, GPU_PXScale, sizeof(float) * SizeZ, hipMemcpyDeviceToHost); // K Space Array int index = 1; int KSpaceOffset = PX_Scale[SizeZ - 1]; for (int i = 0; i <= KSpaceOffset; i++) { while (i >= PX_Scale[index]) { index++; } KSpaceIndexArray[i] = index; } for (int i = KSpaceOffset + 1; i < SizeZ; i++) KSpaceIndexArray[i] = -1; // K Space KSpaceIndexArray Index Index hipMemcpy(GPU_KSpaceIndexArray, KSpaceIndexArray, sizeof(int) * SizeZ, hipMemcpyHostToDevice); FrequencyAdjust << <dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_OCTFloatData, GPU_PXScale, GPU_KSpaceIndexArray, KSpaceOffset - CutValue, SizeX, SizeY, SizeZ); CheckCudaError(); // hipFree(GPU_PXScale); hipFree(GPU_KSpaceIndexArray); hipFree(GPU_OCTRawData); delete[] KSpaceIndexArray; // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "4. Space K Space : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 5. cuFFT // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif hipfftHandle PlanHandle; hipfftComplex* GPU_ComplexData; // FFT Handle & C2C hipfftComplex int NX = SizeZ; int BatchSize = SizeX * SizeY; hipfftPlan1d(&PlanHandle, NX, HIPFFT_C2C, BatchSize); hipMalloc(&GPU_ComplexData, sizeof(hipfftComplex) * NX * BatchSize); CheckCudaError(); // Complex Data DataToComplexData << <dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTFloatData, GPU_ComplexData, OCTDataSize); CheckCudaError(); // cuFFT(CUDA Fast Fourier Transform) hipfftExecC2C(PlanHandle, GPU_ComplexData, GPU_ComplexData, HIPFFT_FORWARD); CheckCudaError(); // (FFT) & // https://www.youtube.com/watch?v=spUNpyF58BY ComplexDataToData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ComplexData, GPU_OCTFloatData, SizeX, SizeY, SizeZ, OCTDataSize); CheckCudaError(); // hipfftDestroy(PlanHandle); hipFree(GPU_ComplexData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "5. cuFFT: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 6. Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif float* GPU_ShiftData; hipMalloc(&GPU_ShiftData, sizeof(float) * OCTDataSize / 2); // // ShiftFinalData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, GPU_ShiftData, SizeX, SizeY, SizeZ / 2, OCTDataSize / 2); CheckCudaError(); // hipFree(GPU_OCTFloatData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "6. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 6.5 TopView // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // TopView float* GPU_OtherSideData; hipMalloc(&GPU_OtherSideData, sizeof(float) * OCTDataSize / 2); GetOtherSideView << <SizeX, SizeY >> > (GPU_ShiftData, GPU_OtherSideData, SizeX, SizeY, SizeZ / 2); CheckCudaError(); hipDeviceSynchronize(); // float MaxValue = 0, MinValue = 0; float *GPU_MaxElement = thrust::max_element(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY); float *GPU_MinElement = thrust::min_element(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY); hipMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&MinValue, GPU_MinElement, sizeof(float), hipMemcpyDeviceToHost); NormalizeData << < SizeX, SizeY >> > (GPU_OtherSideData, MaxValue, MinValue, SizeX * SizeY); CheckCudaError(); // Top View float MeanValue = thrust::reduce(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY) / SizeX / SizeY; uchar* GPU_UintOtherSideData; hipMalloc(&GPU_UintOtherSideData, sizeof(uchar) * SizeX * SizeY); TransformOtherSideDataToImage << <SizeX, SizeY >> > (GPU_OtherSideData, GPU_UintOtherSideData, MeanValue, OtherSideMean, SizeX, SizeY); CheckCudaError(); // hipFree(GPU_OtherSideData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "6.5. TopView : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 7. Normalize Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // MaxValue = 0; GPU_MaxElement = thrust::max_element(thrust::device, GPU_ShiftData, GPU_ShiftData + OCTDataSize / 2); hipMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), hipMemcpyDeviceToHost); CheckCudaError(); hipDeviceSynchronize(); // ( GPU Normalize) // // TL // // // BR MinValue = 0; for (int i = MinValuePixel_TL; i <= MinValuePixel_BR; i++) { // [first, last) int beginIndex = SizeX * SizeZ / 2 + i * SizeZ / 2 + i; int endIndex = SizeX * SizeZ / 2 + i * SizeZ / 2 + MinValuePixel_BR + 1; MinValue += thrust::reduce(thrust::device, GPU_ShiftData + beginIndex, GPU_ShiftData + endIndex); } MinValue /= (MinValuePixel_BR - MinValuePixel_TL + 1) * (MinValuePixel_BR - MinValuePixel_TL + 1); MinValue *= MinValueScalar; // Normaliza Data (Max - Min) 0 // ( array Min & Max ()) assert(MaxValue != MinValue && "FFT!!"); NormalizeData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ShiftData, MaxValue, MinValue, OCTDataSize / 2); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "7. Normalize Data: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 8. // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // uchar *GPU_UintDataArray; float* GPU_OCTSmoothData; hipMalloc(&GPU_UintDataArray, sizeof(uchar) * SizeX * SizeY * SizeZ); hipMalloc(&GPU_OCTSmoothData, sizeof(float) * SizeX * SizeY * SizeZ); CheckCudaError(); // TransformToImageAndBorderData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ShiftData, GPU_OCTSmoothData, GPU_UintDataArray, SizeX, SizeY, SizeZ / 2, SmoothSizeRange); CheckCudaError(); // size = SizeY; rows = SizeX; cols = SizeZ / 2; // hipFree(GPU_ShiftData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "8. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 9. // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif #pragma region Init SaveDelete(PointType); PointType = new uchar[size * rows * cols]; memset(PointType, 0, sizeof(uchar) * size * rows * cols); SaveDelete(PointType_1D); PointType_1D = new int[size * rows]; memset(PointType_1D, 0, sizeof(int) * size * rows); // uchar* GPU_PointType; hipMalloc(&GPU_PointType, sizeof(uchar) * size * rows * cols); hipMemset(GPU_PointType, 0, sizeof(uchar) * size * rows * cols); #pragma endregion #pragma region assert(rows <= NumThreads && "rows 1024 "); // float *GPU_BrightnessArray; hipMalloc(&GPU_BrightnessArray, sizeof(float) * size * rows); ZCalcBrightness << <size, rows >> > (GPU_OCTSmoothData, GPU_BrightnessArray, size, rows, cols, StartIndex); CheckCudaError(); // & findMaxAndMinPeak << < size * rows * cols / NumThreads, NumThreads >> > (GPU_OCTSmoothData, GPU_BrightnessArray, GPU_PointType, size, rows, cols, MaxPeakThreshold, SatPeakThreshold); CheckCudaError(); // Parse ParseMaxMinPeak << < size, rows >> > (GPU_PointType, size, rows, cols, StartIndex); CheckCudaError(); // int *GPU_PointType_BestN, *PointType_BestN; float* GPU_CandidateGap; hipMalloc(&GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN); hipMalloc(&GPU_CandidateGap, sizeof(float) * size * rows * cols); // Gap hipMemset(GPU_CandidateGap, 0, sizeof(float) * size * rows * cols); hipMemset(GPU_PointType_BestN, 0, sizeof(int) * size * rows * ChooseBestN); PickBestChoiceToArray << < size, rows >> > (GPU_OCTSmoothData, GPU_PointType, GPU_CandidateGap, GPU_PointType_BestN, size, rows, cols, ChooseBestN, StartIndex, GoThroughThreshold); CheckCudaError(); // Neighbor float* GPU_NeighborCountArray; hipMalloc(&GPU_NeighborCountArray, sizeof(float) * size * rows * ChooseBestN); CalcNeighbor << <size, rows >> > (GPU_PointType_BestN, GPU_NeighborCountArray, size, rows, cols, ChooseBestN, DenoiseWindowsRadius); CheckCudaError(); // // => * 250(rows) * (ChooseBestN) * (Raidus) * N (ChooseBestN) int *GPU_Connect_Status; int ConnectStateSize = size * rows * ChooseBestN * ConnectRadius * ChooseBestN; hipMalloc(&GPU_Connect_Status, sizeof(int) * ConnectStateSize); hipMemset(GPU_Connect_Status, 0, sizeof(int) * ConnectStateSize); ConnectPointsStatus << < size * ChooseBestN, rows >> > (GPU_PointType_BestN, GPU_Connect_Status, size, rows, ChooseBestN, ConnectRadius); CheckCudaError(); // CPU int *Connect_Status = new int[ConnectStateSize]; PointType_BestN = new int[size * rows * ChooseBestN]; hipMemcpy(PointType, GPU_PointType, sizeof(uchar) * size * rows * cols, hipMemcpyDeviceToHost); hipMemcpy(Connect_Status, GPU_Connect_Status, sizeof(int) * ConnectStateSize, hipMemcpyDeviceToHost); hipMemcpy(PointType_BestN, GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN, hipMemcpyDeviceToHost); CheckCudaError(); // GetSurface(PointType_BestN, Connect_Status); #pragma endregion // hipFree(GPU_PointType); hipFree(GPU_PointType_BestN); hipFree(GPU_Connect_Status); hipFree(GPU_OCTSmoothData); hipFree(GPU_BrightnessArray); hipFree(GPU_CandidateGap); hipFree(GPU_NeighborCountArray); delete[] Connect_Status; delete[] PointType_BestN; // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "9. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 10. GPU Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // SaveDelete(VolumeData); VolumeData = new uchar[SizeX * SizeY * SizeZ / 2]; hipMemcpy(VolumeData, GPU_UintDataArray, sizeof(uchar) * SizeX * SizeY * SizeZ / 2, hipMemcpyDeviceToHost); SaveDelete(VolumeData_OtherSide); VolumeData_OtherSide = new uchar[SizeX * SizeY]; hipMemcpy(VolumeData_OtherSide, GPU_UintOtherSideData, sizeof(uchar) * SizeX * SizeY, hipMemcpyDeviceToHost); // GPU hipFree(GPU_UintDataArray); hipFree(GPU_UintOtherSideData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "10. GPU Data : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion // #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock() - totalTime; cout << ": " << ((float)totalTime) / CLOCKS_PER_SEC << " sec" << endl; #endif } // vector<Mat> TRCudaV2::TransfromMatArray(bool SaveBorder = false) { // Mat vector<Mat> ImgArray; for (int i = 0; i < size; i++) { // Offset Mat img(rows, cols, CV_8U, VolumeData + i * rows * cols); cvtColor(img, img, CV_GRAY2BGR); // ImgArray.push_back(img); } if (SaveBorder) { // Debug peak /*for (int i = 0; i < size; i++) for (int j = 0; j < rows * cols; j++) { int offsetIndex = i * rows * cols; int rowIndex = j / cols; int colIndex = j % cols; Vec3b color(0, 0, 0); if (PointType[offsetIndex + j] == 1) color = Vec3b(0, 255, 255); else if (PointType[offsetIndex + j] == 2) color = Vec3b(255, 255, 255); ImgArray[i].at<Vec3b>(rowIndex, colIndex) = color; }*/ // for (int i = 0; i < size; i++) for (int j = 0; j < rows; j++) { int index = i * rows + j; if (PointType_1D[index] != -1) { Point contourPoint(PointType_1D[index], j); circle(ImgArray[i], contourPoint, 2, Scalar(0, 255, 255), CV_FILLED); } } } return ImgArray; } Mat TRCudaV2::TransformToOtherSideView() { assert(size > 1 && ""); Mat img(rows, size, CV_8U, VolumeData_OtherSide); cvtColor(img, img, CV_GRAY2BGR); return img; } void TRCudaV2::CopySingleBorder(int* LastArray) { assert(LastArray != NULL && PointType_1D != NULL && size == 1 && " Array !!"); // assert call () memcpy(LastArray, PointType_1D, sizeof (int) * size * rows); } void TRCudaV2::CopyBorder(int* BorderArray) { assert(BorderArray != NULL && PointType_1D != NULL && size != 1 && " Array !!"); // assert call () memcpy(BorderArray, PointType_1D, sizeof(int) * size * rows); } bool TRCudaV2::ShakeDetect_Single(int* LastArray, bool ShowDebugMessage) { // int voteNum = 0; // float MoveDis = 0; // // for (int i = 0; i < rows; i++) { if (PointType_1D[i] != -1 && LastArray[i] != -1) { MoveDis += abs(PointType_1D[i] - LastArray[i]); voteNum++; } } // if (voteNum > OCT_Valid_VoteNum) { MoveDis /= voteNum; // Debug Message if(ShowDebugMessage) cout << "(pixel): " << (float)MoveDis << endl; // if (MoveDis < OCT_Move_Threshold) return false; } return true; } bool TRCudaV2::ShakeDetect_Multi(bool UsePreiseThreshold, bool ShowDebugMessage) { // 60 ~ 200 int voteNum = 0; // float MoveDis = 0; // // Reverse 0 ~ 250 for (int i = 60; i < 200; i++) { bool IsMove = false; // int leftIndex = 124 * rows + i; // 124 int rightIndex = 125 * rows + i; // 125 // for (int j = size / 2 - 1; j >= 0; j--) if (PointType_1D[j * rows + i] != -1) { leftIndex = j * rows + i; break; } // for (int j = size / 2; j < size; j++) if (PointType_1D[j] != -1) { rightIndex = j * rows + i; break; } int leftY = PointType_1D[leftIndex]; int rightY = PointType_1D[rightIndex]; // if (PointType_1D[leftIndex] != -1 && PointType_1D[rightIndex] != -1) { int DisMid = abs(PointType_1D[rightIndex] - PointType_1D[leftIndex]); MoveDis += DisMid; voteNum++; } } // if (voteNum > OCT_Valid_VoteNum) { MoveDis /= voteNum; // Debug Message if (ShowDebugMessage) cout << "(pixel): " << (float)MoveDis << endl; // if (UsePreiseThreshold) { // if (MoveDis < OCT_Move_Precise_Threshold) return false; } else { // if (MoveDis < OCT_Move_Threshold) return false; } } else if (ShowDebugMessage) cout << "!!" << endl; return true; } ////////////////////////////////////////////////////////////////////////// // Helper Function ////////////////////////////////////////////////////////////////////////// void TRCudaV2::GetSurface(int *PointType_BestN, int *Connect_Status) { // N #pragma omp parallel for //num_thread(4) for (int i = 0; i < size; i++) { // 10 Sample int RowGap = rows / 10; vector<vector<ConnectInfo>> StatusVector; for (int j = 0; j < rows; j += RowGap) for (int chooseNIndex = 0; chooseNIndex < ChooseBestN; chooseNIndex++) { int begin = j; int end = j; // if (PointType_BestN[i * rows * ChooseBestN + j * ChooseBestN + chooseNIndex] == -1) continue; // vector<ConnectInfo> Connect; #pragma region // ConnectInfo info; info.rowIndex = j; info.chooseIndex = chooseNIndex; Connect.push_back(info); int FindIndex = j; int FindChooseIndex = chooseNIndex; bool IsFind = true; while (IsFind && FindIndex > 0) { int minMoveIndex = -1; int minChooseIndex = -1; int tempValue = ConnectRadius * ConnectRadius; for (int k = 1; k < ConnectRadius; k++) for (int nextChooseNIndex = 0; nextChooseNIndex < ChooseBestN; nextChooseNIndex++) { int index = i * rows * ChooseBestN * ConnectRadius * ChooseBestN + // Size (FindIndex - k) * ChooseBestN * ConnectRadius * ChooseBestN + // Rows nextChooseNIndex * ConnectRadius * ChooseBestN + // Top N ( ChooseIndex) k * ChooseBestN + // FindChooseIndex; if (FindIndex - k >= 0 && Connect_Status[index] != 0 && tempValue > Connect_Status[index]) { tempValue = Connect_Status[index]; minMoveIndex = k; minChooseIndex = nextChooseNIndex; } } // if (minMoveIndex != -1) { // FindIndex = FindIndex - minMoveIndex; FindChooseIndex = minChooseIndex; // info.rowIndex = FindIndex; info.chooseIndex = minChooseIndex; Connect.push_back(info); // IsFind = true; } else IsFind = false; } #pragma endregion #pragma region FindIndex = j; FindChooseIndex = chooseNIndex; while (IsFind && FindIndex < rows - 1) { int minMoveIndex = -1; int minChooseIndex = -1; int tempValue = ConnectRadius * ConnectRadius; for (int k = 1; k < ConnectRadius; k++) for (int nextChooseNIndex = 0; nextChooseNIndex < ChooseBestN; nextChooseNIndex++) { int index = i * rows * ChooseBestN * ConnectRadius * ChooseBestN + // Size FindIndex * ChooseBestN * ConnectRadius * ChooseBestN + // Rows FindChooseIndex * ConnectRadius * ChooseBestN + // Top N k * ChooseBestN + // nextChooseNIndex; if (FindIndex + k < rows && Connect_Status[index] != 0 && tempValue > Connect_Status[index]) { tempValue = Connect_Status[index]; minMoveIndex = k; minChooseIndex = nextChooseNIndex; } } // if (minMoveIndex != -1) { // FindIndex = FindIndex + minMoveIndex; FindChooseIndex = minChooseIndex; // info.rowIndex = FindIndex; info.chooseIndex = minChooseIndex; Connect.push_back(info); // IsFind = true; } else IsFind = false; } #pragma endregion // 1 if (Connect.size() > 1) { // sort(Connect.begin(), Connect.end(), SortByRows); StatusVector.push_back(Connect); } } // if (StatusVector.size() == 0) { memset(&PointType_1D[i * rows], -1, sizeof(int) * rows); continue; } // sort(StatusVector.begin(), StatusVector.end(), SortByVectorSize); // () vector<int> BestCandidate; int Begin = rows; int End = 0; for (int j = 0; j < StatusVector.size() && j < 3; j++) { int CurrentBegin = StatusVector[j][0].rowIndex; int CurrentEnd = StatusVector[j][StatusVector[j].size() - 1].rowIndex; if (Begin > CurrentBegin) { Begin = min(Begin, CurrentBegin); End = max(End, CurrentEnd); BestCandidate.push_back(j); } if (End < CurrentEnd) { Begin = min(Begin, CurrentBegin); End = max(End, CurrentEnd); BestCandidate.push_back(j); } } // for (int j = 1; j < BestCandidate.size(); j++) if (StatusVector[BestCandidate[j]].size() >= 3) for (int k = 0; k < StatusVector[BestCandidate[j]].size(); k++) StatusVector[0].push_back(StatusVector[j][k]); vector<ConnectInfo> LineVector = StatusVector[0]; int index = 0; // LineVector Index for (int j = 0; j < rows; j++) { int Type1D_Index = i * rows + j; if (LineVector[index].rowIndex != j) PointType_1D[Type1D_Index] = -1; else if (LineVector[index].rowIndex == j) { int BestN_Index = i * rows * ChooseBestN + // LineVector[index].rowIndex * ChooseBestN + // row LineVector[index].chooseIndex; // ChooseIndex // PointType PointType_1D[j + i * rows] = PointType_BestN[BestN_Index]; index++; if (index >= LineVector.size()) { for (int k = j + 1; k < rows; k++) PointType_1D[k + i * rows] = -1; break; } } } } // Smooth int* tempPointType_1D = new int[size * rows]; for (int i = 0; i < size; i++) for (int j = 0; j < rows; j ++) { int totalPoint = 0; int totalZ = 0; int index = i * rows + j; if (PointType_1D[index] == -1) { tempPointType_1D[index] = -1; continue; } for (int k = -DenoiseWindowsRadius; k <= DenoiseWindowsRadius; k++) for (int l = -DenoiseWindowsRadius; l <= DenoiseWindowsRadius; l++) { int currentI = i + k; int currentJ = j + l; if (0 <= currentI && currentI < size && 0 <= currentJ && currentJ < rows) { int currentIndex = currentI *rows + currentJ; if (PointType_1D[currentIndex] != -1) { totalPoint++; totalZ += PointType_1D[currentIndex]; } } } tempPointType_1D[index] = totalZ / totalPoint; } memcpy(PointType_1D, tempPointType_1D, sizeof(int) * size * rows); delete[] tempPointType_1D; } bool TRCudaV2::SortByRows(ConnectInfo left, ConnectInfo right) { return left.rowIndex < right.rowIndex; } bool TRCudaV2::SortByVectorSize(vector<ConnectInfo> left, vector<ConnectInfo> right) { return right.size() < left.size(); } void TRCudaV2::CheckCudaError() { hipError_t GPU_Error = hipGetLastError(); if (GPU_Error != hipSuccess) { cout << hipGetErrorString(GPU_Error) << endl; assert(false); exit(-1); } } void TRCudaV2::SaveDelete(void* pointer) { if (pointer != NULL) delete[] pointer; }
0336d18317d88ddc358653b6653988646c449e56.cu
#include "TRCudaV2.cuh" #include "EigenUtility.h" TRCudaV2::TRCudaV2() { } TRCudaV2::~TRCudaV2() { // 這邊要刪除其他創出來的變數 SaveDelete(VolumeData); SaveDelete(PointType); SaveDelete(PointType_1D); } ////////////////////////////////////////////////////////////////////////// // GPU ////////////////////////////////////////////////////////////////////////// __device__ static float Z1Function(float x1) { // 這個 Function 不確定在幹嘛XD // https://i.imgur.com/QS3bczf.png return -126.4517 + 0.4005123 * x1 - 0.000011981 * pow(x1 - 2122.41, 2) - 0.000000011664 * pow(x1 - 2122.41, 3) + 0.000000000001432 * pow(x1 - 2122.41, 4) - 0.0000000000000008164 * pow(x1 - 2122.41, 5) + 5.939E-20 * pow(x1 - 2122.41, 6); } __global__ static void RawDataToOriginalData(char* FileRawData, int* OCTRawData, int OCTDataSize) { // 這邊是原本讀取是 1個 Byte 要轉乘 2個 Bytes 為一筆資料 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // 這邊應該是不會發生,就當作例外判斷 if (id >= OCTDataSize) { printf("轉 Raw Data 有 Error!\n"); return; } OCTRawData[id] = (int)((uchar)FileRawData[id * 2]) + (int)((uchar)FileRawData[id * 2 + 1]) * 256; } __global__ static void CombineTwoChannels_Single(int* OCTData_2Channls, int* OCTData, int SizeX, int SizeY, int SizeZ) { // 這邊是 Denoise,把兩個 Channel 的資料相加 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // 這邊應該是不會發生,就當作例外判斷 if (id >= SizeX * SizeY * SizeZ) { printf("Combine Two Channel 有 Error!\n"); return; } int BoxSize = SizeX * SizeZ; // 這邊沒有反掃,所以直接接上大小 int BoxIndex = id / BoxSize; int BoxLeft = id % BoxSize; OCTData[id] = (OCTData_2Channls[BoxIndex * 2 * BoxSize + BoxLeft] + OCTData_2Channls[(BoxIndex * 2 + 1) * BoxSize + BoxLeft]) / 2; } __global__ static void CombineTwoChannels_Multi(int* OCTData_2Channls, int* OCTData, int SizeX, int SizeY, int SizeZ) { // 這邊是 Denoise,把兩個 Channel 的資料相加 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // 這邊應該是不會發生,就當作例外判斷 if (id >= SizeX * SizeY * SizeZ) { printf("Combine Two Channel 有 Error!\n"); return; } int BoxSize = SizeX * SizeZ * 2; // 一個 Channel 的資料是 正掃 + 反掃 int BoxIndex = id / BoxSize; int BoxLeft = id % BoxSize; OCTData[id] = (OCTData_2Channls[BoxIndex * 2 * BoxSize + BoxLeft] + OCTData_2Channls[(BoxIndex * 2 + 1) * BoxSize + BoxLeft]) / 2; } __global__ static void ReverseBackScanData(int* OCTData, int SizeX, int SizeY, int SizeZ) { // 這邊是要反轉 反掃的資料 int id = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9 blockIdx.x * gridDim.z * blockDim.x + // X => X * (125 * 2) * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; int changeID = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9 (gridDim.y * 2 - blockIdx.x - 1) * gridDim.z * blockDim.x + // X => (250 - X - 1) * (125 * 2) * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; int value = OCTData[id]; OCTData[id] = OCTData[changeID]; OCTData[changeID] = value; } __global__ static void GetMatrixA(int* OCTData, float* MatrixA, int NumPolynomial, int OneDataSize) { // 這個 Function 是去取得 MatrixA 的值 int id = blockIdx.x * blockDim.x + threadIdx.x; // 例外判斷 (理論上應該也是不會超過) if (id >= (NumPolynomial + 1) * (NumPolynomial + 1)) { printf("多項式 Fitting 有問題!\n"); return; } // 算 Index int rowIndex = id % (NumPolynomial + 1); int colsIndex = id / (NumPolynomial + 1); // 做相加 float value = 0; for (int i = 0; i < OneDataSize; i++) { // 抓出兩項的值 float FirstValue = (float)i / OneDataSize; float SecondValue = (float)i / OneDataSize; value += pow(FirstValue, NumPolynomial - rowIndex) * pow(SecondValue, NumPolynomial - colsIndex); } MatrixA[id] = value; } __global__ static void GetMatrixB(int* OCTData, float* MatrixB, float YAverage, int NumPolynomial, int OneDataSize) { int id = blockIdx.x * blockDim.x + threadIdx.x; // 算 Index int rowIndex = id % (NumPolynomial + 1); int colsIndex = id / (NumPolynomial + 1); // 做相加 float value = 0; for (int i = 0; i < OneDataSize; i++) { // 抓出兩項的值 float FirstValue = (float)i / OneDataSize; float SecondValue = OCTData[i] - YAverage; value += pow(FirstValue, NumPolynomial - rowIndex) * SecondValue; } MatrixB[id] = value; } __global__ static void MinusByFittingFunction(int* OCTData, float* PolyValue, int SizeZ) { // 這邊要減掉 Fitting Data int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // 先拿出他是第幾個 Z int idZ = id % SizeZ; // 減掉預測的值 OCTData[id] -= PolyValue[idZ]; } __global__ static void ComputePXScale(float* PXScale, int OffsetBegin, int ShiftValue, int Steps, int Size) { // 這邊是算出 PXScale Array(詳細在幹嘛我不是很懂@@) int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= Size) { printf("ComputePXScale 有問題!\n"); return; } // 聽說是去直流 int idOffset = OffsetBegin + ShiftValue; PXScale[id] = (Z1Function(idOffset + id) - Z1Function(idOffset)) * Steps; } __global__ static void FrequencyAdjust(int* OCTData, float* KSpaceData, float* PXScale, int* IndexArray, int CutIndex, int SizeX, int SizeY, int SizeZ) { // 這邊是 Denoise,把兩個 Channel 的資料相加 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= SizeX * SizeY * SizeZ) { printf("Frequency 轉換的地方有問題"); return; } // 算回原本的 Index int idZ = id % SizeZ; if (IndexArray[idZ] == -1 || idZ >= CutIndex || idZ == 0) { KSpaceData[id] = 0; return; } // 要算斜率前,先拿出上一筆資料 int LastPXScaleIndex = (IndexArray[idZ] - 1 <= 0 ? 0 : IndexArray[idZ] - 1); double m = (double)(OCTData[id] - OCTData[id - 1]) / (PXScale[IndexArray[idZ]] - PXScale[LastPXScaleIndex]); double c = OCTData[id] - m * PXScale[IndexArray[idZ]]; KSpaceData[id] = m * idZ + c; } __global__ static void DataToComplexData(float* KSpaceData, cufftComplex* FFTData, int OCTDataSize) { // 把 KSpace 的 Data 塞進 FFT int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= OCTDataSize) { printf("放進 Complex Data 有錯誤!!\n"); return; } // 放進 Complex Data 裡 FFTData[id].x = KSpaceData[id]; FFTData[id].y = 0; } __global__ static void ComplexDataToData(cufftComplex* FFTData, float* OCTFloatData, int SizeX, int SizeY, int SizeZ, int OCTDataSize) { // FFT 資料塞回原本的資料集 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (1 * 1024) blockIdx.z * blockDim.x + // Z => (0 * 1024 + Z2) threadIdx.x; if (id >= OCTDataSize / 2) { printf("Complex To Data 有錯誤!!\n"); return; } // 這邊要避免 0 頻率與 最大頻率(由於只取一半的右邊,所以只拿 1024),詳情請看 Youtube 連結 (你看學長有多好,都找連結給你了,還不看!!) // 這邊要除以 2 是因為它會對稱 // 然後拿的順序要反過來 (由於東元那邊的程式是這樣) // 如果是最大頻率 (也就是 Size / 2 - 1 => 1023),那就要去下一個 也就是 1022 /*int idZ = id % (SizeZ / 2); idZ = SizeZ / 2 - idZ - 1; if (idZ == SizeZ / 2 - 1) idZ--;*/ int idZ = id % (SizeZ / 2); if (idZ == 0) idZ++; // 這邊的算法要對應回去原本的資料 int tempIndex = id / (SizeZ / 2); int idX = tempIndex % SizeX; int idY = tempIndex / SizeX; int NewIndex = idY * SizeX * SizeZ + idX * SizeZ + idZ; float temp = sqrt(FFTData[NewIndex].x * FFTData[NewIndex].x + FFTData[NewIndex].y * FFTData[NewIndex].y); // 做一下例外判斷 if (temp == 0) OCTFloatData[id] = 0; else OCTFloatData[id] = log10f(temp) * 10; } __global__ static void ShiftFinalData(float* AfterFFTData, float* ShiftData, int SizeX, int SizeY, int FinalSizeZ, int FinalDataSize) { // 這邊要做位移 // 由於硬體是這樣子 ↓ // => | -> // ("->" 是指第一段,"=>" 是指第二段) int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= FinalDataSize) { printf("Shift Data 有錯誤!!\n"); return; } // 這邊的算法要對應回去原本的資料 int idZ = id % FinalSizeZ; int tempIndex = id / FinalSizeZ; int idX = tempIndex % SizeX; int idY = tempIndex / SizeX; // SizeY 折回來 // (0 ~ 124 125 ~ 249) // ↓ // (125 ~ 249 0 ~ 124) idY = (idY + SizeY / 2) % SizeY; int NewIndex = idY * SizeX * FinalSizeZ + idX * FinalSizeZ + idZ; ShiftData[id] = AfterFFTData[NewIndex]; //ShiftData[id] = AfterFFTData[id]; } __global__ static void NormalizeData(float* ShiftData, float MaxValue, float MinValue, int FinalDataSize) { // 這邊是根據資料的最大最小值,去做 Normalize 資料 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // 例外判斷 if (id >= FinalDataSize) { printf("Normaliza Data 超出範圍\n"); return; } if (ShiftData[id] < MinValue) ShiftData[id] = 0; else if (ShiftData[id] > MaxValue) ShiftData[id] = 1; else ShiftData[id] = (ShiftData[id] - MinValue) / (MaxValue - MinValue); } // 轉成圖片 & 產生邊界判斷 (Smooth 後的 Data)的資料 __device__ static float SmoothDataByIndex(float* VolumeData, int id, int FinalSizeZ, int SmoothSizeRange) { int idZ = id % FinalSizeZ; int SmoothRadius = (SmoothSizeRange - 1) / 2; // Smooth 這個區段的資料 int MinValue = min(SmoothRadius, idZ - 0); int MaxValue = min(SmoothRadius, FinalSizeZ - idZ - 1); float TempTotal = 0; // 把範圍內的部分相加 for (int i = -MinValue; i <= MaxValue; i++) TempTotal += VolumeData[id + i]; TempTotal /= (MaxValue + MinValue + 1); return TempTotal; } __global__ static void TransformToImageAndBorderData(float* VolumeData_Normalized, float* SmoothData, uchar* ImageArray, int SizeX, int SizeY, int FinalSizeZ, int SmoothSizeRange) { // 這邊是將原本的資料,轉換完圖片 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * 1 * 1024 blockIdx.x * gridDim.z * blockDim.x + // X => X * 1 * 1024 blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= SizeX * SizeY * FinalSizeZ) // 判斷是否超出大小 return; // 產生 Border Detect 的資料 SmoothData[id] = SmoothDataByIndex(VolumeData_Normalized, id, FinalSizeZ, SmoothSizeRange); // 這個 1.3 倍,是東元測出來的 float data = VolumeData_Normalized[id] * 255 * 1.3f; if (data >= 255) ImageArray[id] = 255; else if (data <= 0) ImageArray[id] = 0; else ImageArray[id] = (uchar)data; } // 邊界部分 __global__ static void ZCalcBrightness(float* DataArray, float* BrightArray, int size, int rows, int cols, int startIndex) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // 超出範圍 return; // 算 Index int sizeIndex = id / rows; int rowIndex = id % rows; BrightArray[id] = 0; for (int i = startIndex; i < cols; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; BrightArray[id] += DataArray[currentID]; } } __global__ static void findMaxAndMinPeak(float* DataArray, float* BrightnessArray, uchar* PointType, int size, int rows, int cols, float MaxPeakThreshold, float SatPeakThreshold) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= rows * cols * size) // 超出範圍 return; // width 判斷 1 ~ (width - 1) int colID = id % cols; if (1 >= colID || colID == (cols - 1)) return; // 是否飽和 int tempIndex = id / cols; if (BrightnessArray[tempIndex] > SatPeakThreshold) return; // 接著要去比周圍 // 峰值判斷 (要比兩邊高,且峰值要高於某一個值,且左 或右差值,只有一端能高於這個值) float DiffLeft = DataArray[id] - DataArray[id - 1]; float DiffRight = DataArray[id] - DataArray[id + 1]; if (DiffLeft > 0 && DiffRight > 0 && DataArray[id] > MaxPeakThreshold) PointType[id] = 1; else if (DiffLeft < 0 && DiffRight < 0) PointType[id] = 2; } __global__ static void ParseMaxMinPeak(uchar* PointType, int size, int rows, int cols, int startIndex) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // 超出範圍 return; // 算 Index int sizeIndex = id / rows; int rowIndex = id % rows; // 然後這邊要去 Skip 所有的 Min int lastMinID = -1; bool FindMax = false; // 為了要抓出 最大(有效)的 區間 int Useful_Start = -1; int Useful_End = -1; int Useful_PeakCount = -1, tempPeakCount = 0; // 刪除多餘 min Peak for (int i = 0; i < startIndex; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; PointType[currentID] = 0; } for (int i = startIndex; i < cols; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; if (lastMinID == -1) // 判斷是不適剛開始 or 找到 Max { // 要先去抓出第一個 Min if (PointType[currentID] == 2) lastMinID = i; else if (PointType[currentID] == 1) PointType[currentID] = 0; // 這邊代表沒有遇到峰值,應該是雜訊了 } else { // 已經抓到 min 了之後,要去濾掉其他的 min if (PointType[currentID] == 1) { // 抓到 Max FindMax = true; tempPeakCount++; } else if (FindMax && PointType[currentID] == 2) { // 抓到 Max 之後,又找到一個 Min if (Useful_PeakCount < tempPeakCount) { Useful_PeakCount = tempPeakCount; Useful_Start = lastMinID; Useful_End = i; } FindMax = false; tempPeakCount = 0; lastMinID = -1; } else if (!FindMax && PointType[currentID] == 2) { // 沒抓到 Max 只抓到 Min PointType[sizeIndex * rows * cols + rowIndex * cols + lastMinID] = 0; lastMinID = i; } } } // 跑到最後結束,要再去判斷最後一個是否是多餘的 Min if (lastMinID != -1) PointType[sizeIndex * rows * cols + rowIndex * cols + lastMinID] = 0; } __device__ static void InsertBestNChoice(float* CandidateGap, int* PointType_BestN, int offsetIndex, int bestNoffsetIndex, int CurrentIndex, int ChooseBestN) { bool IsInsert = false; for (int i = 0; i < ChooseBestN && !IsInsert; i++) { // 大於 0 代表已經有值了 if (PointType_BestN[bestNoffsetIndex + i] > 0) { // 比較 int preIndex = PointType_BestN[bestNoffsetIndex + i]; if (CandidateGap[offsetIndex + preIndex] >= CandidateGap[offsetIndex + CurrentIndex]) // 原先的比他大,代表不加入,找下一個 continue; else if (CandidateGap[offsetIndex + preIndex] < CandidateGap[offsetIndex + CurrentIndex]) // 把剩下來的往後推,並加入此答案 { for (int j = ChooseBestN - 1; j > i; j--) PointType_BestN[bestNoffsetIndex + j] = PointType_BestN[bestNoffsetIndex + j - 1]; PointType_BestN[bestNoffsetIndex + i] = CurrentIndex; IsInsert = true; } } else { PointType_BestN[bestNoffsetIndex + i] = CurrentIndex; break; } } } __global__ static void PickBestChoiceToArray(float* DataArray, uchar* PointType, float* CandidateGap, int* PointType_BestN, int size, int rows, int cols, int ChooseBestN, int startIndex, float Threshold) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // 判斷是否超出大小 return; // 算 Index int sizeIndex = id / rows; int rowIndex = id % rows; bool IsFindMin = false; // 是否找到底端 float MinData; int offsetIndex = sizeIndex * rows * cols + rowIndex * cols; int bestNoffsetIndex = sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN; float lastData = -1; for (int i = startIndex; i < cols; i++) { // 先找最小的 if (PointType[i + offsetIndex] == 2) { // 如果前面已經有找到其他點的話 if (IsFindMin) lastData = -1; IsFindMin = true; MinData = DataArray[i + offsetIndex]; } else if ( IsFindMin && // 要先找到最低點 PointType[i + offsetIndex] == 1 && DataArray[i + offsetIndex] - MinData > Threshold // 接著找大於這個 Threshold ) { lastData = DataArray[i + offsetIndex] - MinData; // 把差距加進去,跟前面的比較,找出最好的加入 PointType_BestN CandidateGap[offsetIndex + i] = lastData; InsertBestNChoice(CandidateGap, PointType_BestN, offsetIndex, bestNoffsetIndex, i, ChooseBestN); } } // 把其他的設定為 0 for (int i = 0; i < ChooseBestN; i++) if (PointType_BestN[bestNoffsetIndex + i] == 0) PointType_BestN[bestNoffsetIndex + i] = -1; } __global__ static void CalcNeighbor(int* PointType_BestN, float* NeighborCountArray, int size, int rows, int cols, int ChooseBestN, int Radius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // 判斷是否超出大小 return; // 算 Index int sizeIndex = id / rows; int rowIndex = id % rows; // 先塞 index int chooseIndex = sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN; for (int i = 0; i < ChooseBestN; i++) { // 清空陣列 int totalPixelCount = 0; float avgPixel = 0; int BestN = PointType_BestN[chooseIndex + i]; if (BestN == -1) { NeighborCountArray[chooseIndex + i] == 0; continue; } // 算有幾個在鄰居 for (int y = -Radius; y <= Radius; y++) for (int x = -Radius; x <= Radius; x++) for (int n = 0; n < ChooseBestN; n++) { int currentSizeIndex = sizeIndex + y; int currentRowIndex = rowIndex + x; if (0 <= currentSizeIndex && currentSizeIndex < size && 0 <= currentRowIndex && currentRowIndex < rows) { totalPixelCount++; int CurrentBestNIndex = currentSizeIndex * rows * ChooseBestN + currentRowIndex * ChooseBestN + n; int CurrentBestN = PointType_BestN[CurrentBestNIndex]; // 如果沒有東西就 Return if (CurrentBestN == -1) continue; if (abs(CurrentBestN - BestN) <= Radius) avgPixel++; } } // 算完之後,先塞到裡面 NeighborCountArray[chooseIndex + i] = avgPixel / totalPixelCount; } // 只保留最大的 int maxIndex = (thrust::max_element(thrust::device, NeighborCountArray + chooseIndex, NeighborCountArray + chooseIndex + ChooseBestN) - (NeighborCountArray + chooseIndex)); PointType_BestN[chooseIndex] = PointType_BestN[chooseIndex + maxIndex]; for (int i = 1; i < ChooseBestN; i++) PointType_BestN[i] = -1; } __global__ static void ConnectPointsStatus(int* PointType_BestN, int* ConnectStatus, int size, int rows, int ChooseBestN, int ConnectRadius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows * ChooseBestN) // 判斷是否超出大小 return; // 算 Index int sizeIndex = id / (rows * ChooseBestN); int tempID = id % (rows * ChooseBestN); int rowIndex = tempID / ChooseBestN; int chooseIndex = tempID % ChooseBestN; // 代表這個點沒有有效的點 if (PointType_BestN[sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN + chooseIndex] == -1) return; // 如果是有效的點,就繼續往下追 int finalPos = min(rowIndex + ConnectRadius, rows); // 截止條件 for (int i = rowIndex + 1; i < finalPos; i++) { for (int j = 0; j < ChooseBestN; j++) { // 下一個點的位置 (第 i 個 row 的點) // 然後的第 1 個點 if (PointType_BestN[sizeIndex * rows * ChooseBestN + i * ChooseBestN + j] != -1) { // 前面項為現在這個點 // 後面項為往下的點 int diffX = PointType_BestN[sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN + chooseIndex] - PointType_BestN[sizeIndex * rows * ChooseBestN + i * ChooseBestN + j]; int diffY = i - rowIndex; int Radius = diffX * diffX + diffY * diffY; // 0 沒有用到喔 if (Radius < ConnectRadius * ConnectRadius) { // 張數的位移 + Row 的位移 + 現在在 Top N 的點 + 半徑的位移 + 往下 Top N 的結果 int index = sizeIndex * rows * ChooseBestN * ConnectRadius * ChooseBestN + // 張數 rowIndex * ChooseBestN * ConnectRadius * ChooseBestN + // Row chooseIndex * ConnectRadius * ChooseBestN + // 現在在 Top N (i - rowIndex) * ChooseBestN + // 半徑 j; ConnectStatus[index] = Radius; } } } } } // 這邊是例外,只有 Multi 才有TopView __global__ static void GetOtherSideView(float* Data, float* OtherSideData, int SizeX, int SizeY, int FinalSizeZ) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= SizeX * SizeY) { printf("範圍有錯!!\n"); return; } // id 換算 int idX = id / SizeY; int idY = id % SizeY; int DataOffsetIndex = idX * SizeY * FinalSizeZ + idY * FinalSizeZ; // 總和一個 SizeZ float totalZ = 0; for (int i = 0; i < FinalSizeZ; i++) totalZ += Data[DataOffsetIndex + i]; // 這邊的單位要調整一下 // rows => 是張樹 (SizeY) // cols => 是 SizeX int offsetIndex = idY * SizeX + idX; OtherSideData[offsetIndex] = totalZ; } __global__ static void TransformOtherSideDataToImage(float* OtherSideData, uchar* UintOtherSideData, float Mean, float FixMean, int SizeX, int SizeY) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= SizeX * SizeY) // 判斷是否超出大小 return; // 位移到設定的 Mean 直間 float ScaleFactor = FixMean / Mean / 255; float data = OtherSideData[id] * 255 * ScaleFactor; if (data >= 255) UintOtherSideData[id] = 255; else if (data <= 0) UintOtherSideData[id] = 0; else UintOtherSideData[id] = (uchar)data; } ////////////////////////////////////////////////////////////////////////// // CPU ////////////////////////////////////////////////////////////////////////// // 轉換 Function void TRCudaV2::SingleRawDataToPointCloud(char* FileRawData, int DataSize, int SizeX, int SizeZ, long ShiftValue, double K_Step, int CutValue) { // 算時間 #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock(); #endif ////////////////////////////////////////////////////////////////////////// // 步驟說明 // 1. 上傳 GPU Data // 2. 一開始要把資料讀進來 (由於原本的資料都是 2個 Bytes 為一組,但 QT 目前是先用 GPU 轉換到 2個 Bytes),和 // 由於資料有 兩個 Channels,要相加除以2,可以去除雜訊 (由於原本的能量強度資料是使用三角波,所以會有去跟回兩個資料,就是把這兩筆資料相加除以 2) // 3. 用 5 次項去 Fit 一條曲線 // 4. λ Space 轉成 K Space // 5. cuFFT // (這個部分不用位移) // 7. 根據最大最小值來 Normalize 資料 // 8. 轉成圖 // 9. 邊界判斷 // 10. 抓下 GPU Data // // 細節說明: // 1. 轉換 Function => X 快軸、Y 慢軸 // 2. ShiftValue => TRIGGER DELAY位移(換FIBER,電線校正回來用的) // 3. K_Step => 深度(14.多mm對應 2.5的k step;可以考慮之後用2)(k step越大,z軸越深,但資料精細度越差;1~2.5) // 4. CutValue => OCT每個z軸,前面數據減去多少。原因是開頭的laser弱,干涉訊號不明顯,拿掉的資料會比較美。 (東元那邊的變數是 cuteValue XD) // 5. 這邊如果是 2 Channel 的話,大小為 2048 x 250 x 2 x 2 x 2 // (深度) x (快軸) x (慢軸(反掃)) x Channel x 2個 Byte 為一組 ////////////////////////////////////////////////////////////////////////// #pragma region 1. 上傳 GPU Data // 初始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME clock_t time = clock(); #endif // GPU Data char* GPU_FileRawData; // => 從檔案讀進來的 Raw Data int *GPU_OCTRawData_2Channel; // => 這個是 OCT 掃完全部的 Raw Data (2Channels,如果"只用到一個" Channel 那就不會用到這個陣列) int *GPU_OCTRawData; // => 這個是實際 Denoise 的 Data (也就是 CH1 + CH2 的資料) (如果"只有一個" Channel,就只會用到這個陣列) float *GPU_OCTFloatData; // => 這個會用在兩個地方,一個是 K Space 的資料,一個是 FFT 後的資料 // 注意!! 因為只拿一組,不需要 兩個慢軸的資訊 (也就是反掃的資訊),所以除以 2 DataSize /= 2; // 是否是 2 Channels bool UseTwoChannels = (DataSize / SizeX / SizeZ == 4); // 2 Byte & 2 Channles // 原始資料 cudaMalloc(&GPU_FileRawData, sizeof(char) * DataSize); // 這邊要分兩個 Copy (略過反掃資料) cudaMemcpy(GPU_FileRawData, FileRawData, sizeof(char) * DataSize / 2, cudaMemcpyHostToDevice); cudaMemcpy(GPU_FileRawData + DataSize / 2, FileRawData + DataSize, sizeof(char) * DataSize / 2, cudaMemcpyHostToDevice); CheckCudaError(); // 判對是否使用 2 Chanels int OCTDataSize = SizeX * SizeZ; if (UseTwoChannels) cudaMalloc(&GPU_OCTRawData_2Channel, sizeof(int) * OCTDataSize * 2); cudaMalloc(&GPU_OCTRawData, sizeof(int) * OCTDataSize); cudaMalloc(&GPU_OCTFloatData, sizeof(float) * OCTDataSize); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "1. 上傳至 GPU: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 2. 讀檔轉換 ////////////////////////////////////////////////////////////////////////// // 這邊的資料格式是這樣 // ↗↘↗↘ 是一組 (↗代表掃描 0 ~ 250的一次資料) // 其中一個 ↗↘ 是一個三角波的資料 // 但因為有兩個 channel 所以一組資料是 ↗↘↗↘ ////////////////////////////////////////////////////////////////////////// // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 解出 2 Byte 的資料 if (UseTwoChannels) { RawDataToOriginalData << < dim3(SizeX, 1, SizeZ / NumThreads * 2), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData_2Channel, DataSize / 2); CheckCudaError(); // 兩個 Channel 作 Denoise CombineTwoChannels_Single << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData_2Channel, GPU_OCTRawData, SizeX, 1, SizeZ); // 刪除 cudaFree(GPU_OCTRawData_2Channel); } else RawDataToOriginalData << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData, DataSize / 2); CheckCudaError(); // 刪除 FileRaw Data cudaFree(GPU_FileRawData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "2. 讀檔轉換: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 3. 用五次項去 Fitting // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 初始化 Matrix float* GPU_MatrixA; float* GPU_MatrixB; cudaMalloc(&GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1)); cudaMalloc(&GPU_MatrixB, sizeof(float) * (NumPolynomial + 1)); // 先算平均 int* FirstSizeZData = new int[SizeZ]; memset(FirstSizeZData, 0, sizeof(int) * SizeZ); cudaMemcpy(FirstSizeZData, GPU_OCTRawData, sizeof(int) * SizeZ, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); float average = accumulate(FirstSizeZData, FirstSizeZData + SizeZ, 0.0f) / SizeZ; delete[] FirstSizeZData; // 取得 Matrix GetMatrixA << <1, (NumPolynomial + 1) * (NumPolynomial + 1) >> > (GPU_OCTRawData, GPU_MatrixA, NumPolynomial, SizeZ); GetMatrixB << <1, NumPolynomial + 1 >> > (GPU_OCTRawData, GPU_MatrixB, average, NumPolynomial, SizeZ); CheckCudaError(); float* MatrixA = new float[(NumPolynomial + 1) *(NumPolynomial + 1)]; float* MatrixB = new float[(NumPolynomial + 1)]; cudaMemcpy(MatrixA, GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1), cudaMemcpyDeviceToHost); cudaMemcpy(MatrixB, GPU_MatrixB, sizeof(float) * (NumPolynomial + 1), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // 解 Eigen 找 Fitting Function EigenUtility eigen; eigen.SetAverageValue(average); eigen.SolveByEigen(MatrixA, MatrixB, NumPolynomial); // 扣除那個 Function float* GPU_PolyValue; float* PolyValue = eigen.GetFunctionArray(SizeZ, average); cudaMalloc(&GPU_PolyValue, sizeof(float) * SizeZ); cudaMemcpy(GPU_PolyValue, PolyValue, sizeof(float) * SizeZ, cudaMemcpyHostToDevice); MinusByFittingFunction << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_PolyValue, SizeZ); CheckCudaError(); // 刪除多出來的 cudaFree(GPU_MatrixA); cudaFree(GPU_MatrixB); cudaFree(GPU_PolyValue); delete[] MatrixA; delete[] MatrixB; delete[] PolyValue; // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "3. 多項式去 Fitting : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 4. λ Space 轉成 K Space // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 初始化 float* PX_Scale = new float[SizeZ]; int* KSpaceIndexArray = new int[SizeZ]; float* GPU_PXScale; int* GPU_KSpaceIndexArray; cudaMalloc(&GPU_PXScale, sizeof(float) * SizeZ); cudaMalloc(&GPU_KSpaceIndexArray, sizeof(int) * SizeZ); // 設定一些系數 int OffsetBegin = 800; // 算出 PXScale 的 Array ComputePXScale << <SizeZ / NumThreads, NumThreads >> > (GPU_PXScale, OffsetBegin, ShiftValue, K_Step, SizeZ); CheckCudaError(); // 抓下來準備算 K Space Index (由於這邊如果使用 GPU 去做,會導致大部分的 thread 在等最大工作量的 thread,所以這裡 CPU 做會比較快) cudaMemcpy(PX_Scale, GPU_PXScale, sizeof(float) * SizeZ, cudaMemcpyDeviceToHost); // 算 K Space 的對應 Array int index = 1; int KSpaceOffset = PX_Scale[SizeZ - 1]; for (int i = 0; i <= KSpaceOffset; i++) { while (i >= PX_Scale[index]) { index++; } KSpaceIndexArray[i] = index; } for (int i = KSpaceOffset + 1; i < SizeZ; i++) KSpaceIndexArray[i] = -1; // 由於 K Space 不是線性關係,所以要從 KSpaceIndexArray,找 Index,再從左右兩個點中,內插出實際在這個 Index 的值 cudaMemcpy(GPU_KSpaceIndexArray, KSpaceIndexArray, sizeof(int) * SizeZ, cudaMemcpyHostToDevice); FrequencyAdjust << <dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_OCTFloatData, GPU_PXScale, GPU_KSpaceIndexArray, KSpaceOffset - CutValue, SizeX, 1, SizeZ); CheckCudaError(); // 釋放記憶體 cudaFree(GPU_PXScale); cudaFree(GPU_KSpaceIndexArray); cudaFree(GPU_OCTRawData); delete[] KSpaceIndexArray; // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "4. λ Space 轉成 K Space : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 5. cuFFT // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif cufftHandle PlanHandle; cufftComplex* GPU_ComplexData; // 這邊是創建 FFT 的 Handle & C2C 的 cufftComplex int NX = SizeZ; int BatchSize = SizeX; cufftPlan1d(&PlanHandle, NX, CUFFT_C2C, BatchSize); cudaMalloc(&GPU_ComplexData, sizeof(cufftComplex) * NX * BatchSize); CheckCudaError(); // 把資料塞進 Complex Data 裡 //gpuDataToComplex << <512, 4 >> > (GPU_OCTFloatData, GPU_ComplexData, NX * BatchSize, 0); DataToComplexData << <dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTFloatData, GPU_ComplexData, OCTDataSize); CheckCudaError(); // 執行 cuFFT(CUDA™ Fast Fourier Transform) cufftExecC2C(PlanHandle, GPU_ComplexData, GPU_ComplexData, CUFFT_FORWARD); CheckCudaError(); // 刪除鏡向(FFT轉完之後會兩邊對稱) & 搬移資料 // 想知道更多:https://www.youtube.com/watch?v=spUNpyF58BY //gpuComplexToData << <512, 4 >> > (GPU_ComplexData, GPU_OCTFloatData, NX * BatchSize / 2, SizeZ, 0); ComplexDataToData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ComplexData, GPU_OCTFloatData, SizeX, 1, SizeZ, OCTDataSize); CheckCudaError(); // 刪除 cufftDestroy(PlanHandle); cudaFree(GPU_ComplexData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "5. cuFFT: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 7. Normalize Data // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 要算出原始整條的強度值 float *GPU_BrightnessArray; cudaMalloc(&GPU_BrightnessArray, sizeof(float) * SizeX); ZCalcBrightness << <1, SizeX >> > (GPU_OCTFloatData, GPU_BrightnessArray, 1, SizeX, SizeZ / 2, StartIndex); CheckCudaError(); // 算最大值 float MaxValue = 0; float *GPU_MaxElement = thrust::max_element(thrust::device, GPU_OCTFloatData, GPU_OCTFloatData + OCTDataSize / 2); cudaMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), cudaMemcpyDeviceToHost); CheckCudaError(); // 最小值 (拿一塊不會使用的 GPU 部分,來做 Normalize) // 拿一個正方形的區塊 // TL---x // |   | // |   | // x---BR float MinValue = 0; for (int i = MinValuePixel_TL; i <= MinValuePixel_BR; i++) { // [first, last) int beginIndex = i * SizeZ / 2 + i; int endIndex = i * SizeZ / 2 + MinValuePixel_BR + 1; MinValue += thrust::reduce(thrust::device, GPU_OCTFloatData + beginIndex, GPU_OCTFloatData + endIndex); } MinValue /= (MinValuePixel_BR - MinValuePixel_TL + 1) * (MinValuePixel_BR - MinValuePixel_TL + 1); MinValue *= MinValueScalar; // 因為 Normaliza Data 要做一件事情是 除 (Max - Min) ,要預防他除以 0 // 所以這邊先判斷兩個是不是位置一樣 (因為如果整個 array 值都一樣,Min & Max 給的位置都會一樣(以驗證過)) assert(MaxValue != MinValue && "FFT後最大最小值一樣,資料有錯誤!!"); NormalizeData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, MaxValue, MinValue, OCTDataSize / 2); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "7. Normalize Data: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 8. 轉成圖 // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 圖片的資料 uchar *GPU_UintDataArray; float* GPU_OCTSmoothData; cudaMalloc(&GPU_UintDataArray, sizeof(uchar) * SizeX * 1 * SizeZ); cudaMalloc(&GPU_OCTSmoothData, sizeof(float) * SizeX * 1 * SizeZ); CheckCudaError(); // 轉圖片 TransformToImageAndBorderData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, GPU_OCTSmoothData, GPU_UintDataArray, SizeX, 1, SizeZ / 2, SmoothSizeRange); CheckCudaError(); // 設定一下其他參數 size = 1; rows = SizeX; cols = SizeZ / 2; // 刪除記憶體 cudaFree(GPU_OCTFloatData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "8. 轉成圖: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 9. 邊界判斷 // 目前邊界判斷沒有寫 // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif #pragma region Init SaveDelete(PointType); PointType = new uchar[size * rows * cols]; memset(PointType, 0, sizeof(uchar) * size * rows * cols); SaveDelete(PointType_1D); PointType_1D = new int[size * rows]; memset(PointType_1D, 0, sizeof(int) * size * rows); // 點的型別 uchar* GPU_PointType; cudaMalloc(&GPU_PointType, sizeof(uchar) * size * rows * cols); cudaMemset(GPU_PointType, 0, sizeof(uchar) * size * rows * cols); #pragma endregion #pragma region 抓取邊界 assert(rows <= NumThreads && "rows 要小於 1024 的限制"); // 找最大最小值 & 刪除過飽合的部分 findMaxAndMinPeak << < size * rows * cols / NumThreads, NumThreads >> > (GPU_OCTSmoothData, GPU_BrightnessArray, GPU_PointType, size, rows, cols, MaxPeakThreshold, SatPeakThreshold); CheckCudaError(); // Parse 一些連續最小值 ParseMaxMinPeak << < size, rows >> > (GPU_PointType, size, rows, cols, StartIndex); CheckCudaError(); // 抓出一維陣列 int *GPU_PointType_BestN, *PointType_BestN; cudaMalloc(&GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN); //PickBestChoiceToArray << < size, rows >> > (GPU_OCTSmoothData, GPU_PointType, GPU_PointType_BestN, size, rows, cols, ChooseBestN, StartIndex, GoThroughThreshold); //CheckCudaError(); // 連結點 // 這個的大小 為 => 張數 * 250(rows) * 取幾個最大值(ChooseBestN個) * 每個最大值底下有 半徑個 (Raidus) * 的下 N 排的幾個最大值(ChooseBestN) int *GPU_Connect_Status; int ConnectStateSize = size * rows * ChooseBestN * ConnectRadius * ChooseBestN; cudaMalloc(&GPU_Connect_Status, sizeof(int) * ConnectStateSize); cudaMemset(GPU_Connect_Status, 0, sizeof(int) * ConnectStateSize); ConnectPointsStatus << < size * ChooseBestN , rows >> > (GPU_PointType_BestN, GPU_Connect_Status, size, rows, ChooseBestN, ConnectRadius); CheckCudaError(); // 把資料傳回 CPU int *Connect_Status = new int[ConnectStateSize]; PointType_BestN = new int[size * rows * ChooseBestN]; cudaMemcpy(PointType, GPU_PointType, sizeof(uchar) * size * rows * cols, cudaMemcpyDeviceToHost); cudaMemcpy(Connect_Status, GPU_Connect_Status, sizeof(int) * ConnectStateSize, cudaMemcpyDeviceToHost); cudaMemcpy(PointType_BestN, GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN, cudaMemcpyDeviceToHost); CheckCudaError(); // 抓取最大的線 GetSurface(PointType_BestN, Connect_Status); #pragma endregion // 刪除記憶體 cudaFree(GPU_PointType); cudaFree(GPU_PointType_BestN); cudaFree(GPU_Connect_Status); cudaFree(GPU_OCTSmoothData); cudaFree(GPU_BrightnessArray); delete[] Connect_Status; delete[] PointType_BestN; #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "9. 抓取邊界: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 10. 抓下 GPU Data // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 刪除之前的資料 SaveDelete(VolumeData); VolumeData = new uchar[SizeX * 1 * SizeZ]; cudaMemcpy(VolumeData, GPU_UintDataArray, sizeof(uchar) * SizeX * 1 * SizeZ / 2, cudaMemcpyDeviceToHost); // 刪除 GPU cudaFree(GPU_UintDataArray); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "10. 抓下 GPU Data : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion // 結算 #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock() - totalTime; cout << "轉換單張點雲: " << ((float)totalTime) / CLOCKS_PER_SEC << " sec" << endl; #endif } void TRCudaV2::MultiRawDataToPointCloud(char* FileRawData, int DataSize, int SizeX, int SizeY, int SizeZ, long ShiftValue, double K_Step, int CutValue) { // 計算時間 #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock(); #endif ////////////////////////////////////////////////////////////////////////// // 步驟說明 // 1. 上傳 GPU Data // 2. 一開始要把資料讀進來 (由於原本的資料都是 2個 Bytes 為一組,但 QT 目前是先用 GPU 轉換到 2個 Bytes),和 // 由於資料有 兩個 Channels,要相加除以2,可以去除雜訊 (由於原本的能量強度資料是使用三角波,所以會有去跟回兩個資料,就是把這兩筆資料相加除以 2) // 3. 用 5 次項去 Fit 一條曲線 // 4. λ Space 轉成 K Space // 5. cuFFT // 6. 位移 Data // 6.5 要找出TopView (這邊有多一個要找出TopView ) // 7. 根據最大最小值來 Normalize 資料 // 8. 轉成圖 // 9. 邊界判斷 // 10. 抓下 GPU Data // // 細節說明: // 1. 轉換 Function => X 快軸、Y 慢軸 // 2. ShiftValue => TRIGGER DELAY位移(換FIBER,電線校正回來用的) // 3. K_Step => 深度(14.多mm對應 2.5的k step;可以考慮之後用2)(k step越大,z軸越深,但資料精細度越差;1~2.5) // 4. CutValue => OCT每個z軸,前面數據減去多少。原因是開頭的laser弱,干涉訊號不明顯,拿掉的資料會比較美。 (東元那邊的變數是 cuteValue XD) // 5. 只是這邊比上方的 Function 多了 SizeY 個 // 6. 有多一個 找出TopView ////////////////////////////////////////////////////////////////////////// #pragma region 1. 上傳 GPU Data // 初始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME clock_t time = clock(); #endif // GPU Data char* GPU_FileRawData; // => 從檔案讀進來的 Raw Data int *GPU_OCTRawData_2Channel; // => 這個是 OCT 掃完全部的 Raw Data (2Channels,如果"只用到一個" Channel 那就不會用到這個陣列) int *GPU_OCTRawData; // => 這個是實際 Denoise 的 Data (也就是 CH1 + CH2 的資料) (如果"只有一個" Channel,就只會用到這個陣列) float *GPU_OCTFloatData; // => 這個會用在兩個地方,一個是 K Space 的資料,一個是 FFT 後的資料 // 是否是 2 Channels bool UseTwoChannels = (DataSize / SizeX / SizeY / SizeZ == 4); // 2 Byte & 2 Channles // 原始資料 cudaMalloc(&GPU_FileRawData, sizeof(char) * DataSize); cudaMemcpy(GPU_FileRawData, FileRawData, sizeof(char) * DataSize, cudaMemcpyHostToDevice); CheckCudaError(); // 判對是否使用 2 Chanels int OCTDataSize = SizeX * SizeY * SizeZ; if (UseTwoChannels) cudaMalloc(&GPU_OCTRawData_2Channel, sizeof(int) * OCTDataSize * 2); cudaMalloc(&GPU_OCTRawData, sizeof(int) * OCTDataSize); cudaMalloc(&GPU_OCTFloatData, sizeof(float) * OCTDataSize); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "1. 上傳至 GPU: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 2. 讀檔轉換 ////////////////////////////////////////////////////////////////////////// // 這邊的資料格式是這樣 // ↗↘↗↘ 是一組 (↗代表掃描 0 ~ 250的一次資料) // 其中一個 ↗↘ 是一個三角波的資料 // 但因為有兩個 channel 所以一組資料是 ↗↘↗↘ ////////////////////////////////////////////////////////////////////////// // 初始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 解出 2 Byte 的資料 if (UseTwoChannels) { RawDataToOriginalData << < dim3(SizeX, SizeY, SizeZ / NumThreads * 2), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData_2Channel, DataSize / 2); CheckCudaError(); // 兩個 Channel 作 Denoise CombineTwoChannels_Multi << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData_2Channel, GPU_OCTRawData, SizeX, SizeY, SizeZ); // 刪除 cudaFree(GPU_OCTRawData_2Channel); } else RawDataToOriginalData << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData, DataSize / 2); CheckCudaError(); // 反掃的資料,Index 要反轉 ReverseBackScanData << < dim3(SizeX / 2, SizeY / 2, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, SizeX, SizeY, SizeZ); // 刪除 FileRaw Data cudaFree(GPU_FileRawData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "2. 讀檔轉換: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 3. 用五次項去 Fitting // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 初始化 Matrix float* GPU_MatrixA; float* GPU_MatrixB; cudaMalloc(&GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1)); cudaMalloc(&GPU_MatrixB, sizeof(float) * (NumPolynomial + 1)); // 先算平均 int* FirstSizeZData = new int[SizeZ]; cudaMemcpy(FirstSizeZData, GPU_OCTRawData, sizeof(int) * SizeZ, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); float average = accumulate(FirstSizeZData, FirstSizeZData + SizeZ, 0.0) / SizeZ; delete[] FirstSizeZData; // 取得 Matrix GetMatrixA << <1, (NumPolynomial + 1) * (NumPolynomial + 1) >> > (GPU_OCTRawData, GPU_MatrixA, NumPolynomial, SizeZ); GetMatrixB << <1, NumPolynomial + 1 >> > (GPU_OCTRawData, GPU_MatrixB, average, NumPolynomial, SizeZ); CheckCudaError(); float* MatrixA = new float[(NumPolynomial + 1) *(NumPolynomial + 1)]; float* MatrixB = new float[(NumPolynomial + 1)]; cudaMemcpy(MatrixA, GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1), cudaMemcpyDeviceToHost); cudaMemcpy(MatrixB, GPU_MatrixB, sizeof(float) * (NumPolynomial + 1), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // 解 Eigen 找 Fitting Function EigenUtility eigen; eigen.SetAverageValue(average); eigen.SolveByEigen(MatrixA, MatrixB, NumPolynomial); // 扣除那個 Function float* GPU_PolyValue; float* PolyValue = eigen.GetFunctionArray(SizeZ, average); cudaMalloc(&GPU_PolyValue, sizeof(float) * SizeZ); cudaMemcpy(GPU_PolyValue, PolyValue, sizeof(float) * SizeZ, cudaMemcpyHostToDevice); MinusByFittingFunction << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_PolyValue, SizeZ); CheckCudaError(); // 刪除多出來的 cudaFree(GPU_MatrixA); cudaFree(GPU_MatrixB); cudaFree(GPU_PolyValue); delete[] MatrixA; delete[] MatrixB; delete[] PolyValue; // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "3. 多項式去 Fitting : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 4. λ Space 轉成 K Space // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 初始化 float* PX_Scale = new float[SizeZ]; int* KSpaceIndexArray = new int[SizeZ]; float* GPU_PXScale; int* GPU_KSpaceIndexArray; cudaMalloc(&GPU_PXScale, sizeof(float) * SizeZ); cudaMalloc(&GPU_KSpaceIndexArray, sizeof(int) * SizeZ); // 設定一些系數 int OffsetBegin = 800; // 算出 PXScale 的 Array ComputePXScale << <SizeZ / NumThreads, NumThreads >> > (GPU_PXScale, OffsetBegin, ShiftValue, K_Step, SizeZ); CheckCudaError(); // 抓下來準備算 K Space Index (由於這邊如果使用 GPU 去做,會導致大部分的 thread 在等最大工作量的 thread,所以這裡 CPU 做會比較快) cudaMemcpy(PX_Scale, GPU_PXScale, sizeof(float) * SizeZ, cudaMemcpyDeviceToHost); // 算 K Space 的對應 Array int index = 1; int KSpaceOffset = PX_Scale[SizeZ - 1]; for (int i = 0; i <= KSpaceOffset; i++) { while (i >= PX_Scale[index]) { index++; } KSpaceIndexArray[i] = index; } for (int i = KSpaceOffset + 1; i < SizeZ; i++) KSpaceIndexArray[i] = -1; // 由於 K Space 不是線性關係,所以要從 KSpaceIndexArray,找 Index,再從左右兩個點中,內插出實際在這個 Index 的值 cudaMemcpy(GPU_KSpaceIndexArray, KSpaceIndexArray, sizeof(int) * SizeZ, cudaMemcpyHostToDevice); FrequencyAdjust << <dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_OCTFloatData, GPU_PXScale, GPU_KSpaceIndexArray, KSpaceOffset - CutValue, SizeX, SizeY, SizeZ); CheckCudaError(); // 釋放記憶體 cudaFree(GPU_PXScale); cudaFree(GPU_KSpaceIndexArray); cudaFree(GPU_OCTRawData); delete[] KSpaceIndexArray; // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "4. λ Space 轉成 K Space : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 5. cuFFT // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif cufftHandle PlanHandle; cufftComplex* GPU_ComplexData; // 這邊是創建 FFT 的 Handle & C2C 的 cufftComplex int NX = SizeZ; int BatchSize = SizeX * SizeY; cufftPlan1d(&PlanHandle, NX, CUFFT_C2C, BatchSize); cudaMalloc(&GPU_ComplexData, sizeof(cufftComplex) * NX * BatchSize); CheckCudaError(); // 把資料塞進 Complex Data 裡 DataToComplexData << <dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTFloatData, GPU_ComplexData, OCTDataSize); CheckCudaError(); // 執行 cuFFT(CUDA™ Fast Fourier Transform) cufftExecC2C(PlanHandle, GPU_ComplexData, GPU_ComplexData, CUFFT_FORWARD); CheckCudaError(); // 刪除鏡向(FFT轉完之後會兩邊對稱) & 搬移資料 // 想知道更多:https://www.youtube.com/watch?v=spUNpyF58BY ComplexDataToData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ComplexData, GPU_OCTFloatData, SizeX, SizeY, SizeZ, OCTDataSize); CheckCudaError(); // 刪除 cufftDestroy(PlanHandle); cudaFree(GPU_ComplexData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "5. cuFFT: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 6. 位移 Data // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif float* GPU_ShiftData; cudaMalloc(&GPU_ShiftData, sizeof(float) * OCTDataSize / 2); // 因為一半相同,所以去掉了 // 這邊也是 ShiftFinalData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, GPU_ShiftData, SizeX, SizeY, SizeZ / 2, OCTDataSize / 2); CheckCudaError(); // 刪除記憶體 cudaFree(GPU_OCTFloatData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "6. 搬移資料: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 6.5 TopView // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 這邊會抓出TopView float* GPU_OtherSideData; cudaMalloc(&GPU_OtherSideData, sizeof(float) * OCTDataSize / 2); GetOtherSideView << <SizeX, SizeY >> > (GPU_ShiftData, GPU_OtherSideData, SizeX, SizeY, SizeZ / 2); CheckCudaError(); cudaDeviceSynchronize(); // 找最大值 float MaxValue = 0, MinValue = 0; float *GPU_MaxElement = thrust::max_element(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY); float *GPU_MinElement = thrust::min_element(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY); cudaMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&MinValue, GPU_MinElement, sizeof(float), cudaMemcpyDeviceToHost); NormalizeData << < SizeX, SizeY >> > (GPU_OtherSideData, MaxValue, MinValue, SizeX * SizeY); CheckCudaError(); // 將 Top View 的圖,部會因為亮度而受影響 float MeanValue = thrust::reduce(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY) / SizeX / SizeY; uchar* GPU_UintOtherSideData; cudaMalloc(&GPU_UintOtherSideData, sizeof(uchar) * SizeX * SizeY); TransformOtherSideDataToImage << <SizeX, SizeY >> > (GPU_OtherSideData, GPU_UintOtherSideData, MeanValue, OtherSideMean, SizeX, SizeY); CheckCudaError(); // 刪除記憶體 cudaFree(GPU_OtherSideData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "6.5. TopView 產生: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 7. Normalize Data // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 算最大值 MaxValue = 0; GPU_MaxElement = thrust::max_element(thrust::device, GPU_ShiftData, GPU_ShiftData + OCTDataSize / 2); cudaMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), cudaMemcpyDeviceToHost); CheckCudaError(); cudaDeviceSynchronize(); // 最小值 (拿一塊不會使用的 GPU 部分,來做 Normalize) // 拿一個正方形的區塊 // TL---x // |   | // |   | // x---BR MinValue = 0; for (int i = MinValuePixel_TL; i <= MinValuePixel_BR; i++) { // [first, last) int beginIndex = SizeX * SizeZ / 2 + i * SizeZ / 2 + i; int endIndex = SizeX * SizeZ / 2 + i * SizeZ / 2 + MinValuePixel_BR + 1; MinValue += thrust::reduce(thrust::device, GPU_ShiftData + beginIndex, GPU_ShiftData + endIndex); } MinValue /= (MinValuePixel_BR - MinValuePixel_TL + 1) * (MinValuePixel_BR - MinValuePixel_TL + 1); MinValue *= MinValueScalar; // 因為 Normaliza Data 要做一件事情是 除 (Max - Min) ,要預防他除以 0 // 所以這邊先判斷兩個是不是位置一樣 (因為如果整個 array 值都一樣,Min & Max 給的位置都會一樣(以驗證過)) assert(MaxValue != MinValue && "FFT後最大最小值一樣,資料有錯誤!!"); NormalizeData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ShiftData, MaxValue, MinValue, OCTDataSize / 2); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "7. Normalize Data: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 8. 轉成圖 // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 圖片的資料 uchar *GPU_UintDataArray; float* GPU_OCTSmoothData; cudaMalloc(&GPU_UintDataArray, sizeof(uchar) * SizeX * SizeY * SizeZ); cudaMalloc(&GPU_OCTSmoothData, sizeof(float) * SizeX * SizeY * SizeZ); CheckCudaError(); // 轉圖片 TransformToImageAndBorderData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ShiftData, GPU_OCTSmoothData, GPU_UintDataArray, SizeX, SizeY, SizeZ / 2, SmoothSizeRange); CheckCudaError(); // 設定一下其他參數 size = SizeY; rows = SizeX; cols = SizeZ / 2; // 刪除記憶體 cudaFree(GPU_ShiftData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "8. 轉成圖: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 9. 抓取邊界 // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif #pragma region Init SaveDelete(PointType); PointType = new uchar[size * rows * cols]; memset(PointType, 0, sizeof(uchar) * size * rows * cols); SaveDelete(PointType_1D); PointType_1D = new int[size * rows]; memset(PointType_1D, 0, sizeof(int) * size * rows); // 點的型別 uchar* GPU_PointType; cudaMalloc(&GPU_PointType, sizeof(uchar) * size * rows * cols); cudaMemset(GPU_PointType, 0, sizeof(uchar) * size * rows * cols); #pragma endregion #pragma region 抓取邊界 assert(rows <= NumThreads && "rows 要小於 1024 的限制"); // 要算出原始整條的強度值 float *GPU_BrightnessArray; cudaMalloc(&GPU_BrightnessArray, sizeof(float) * size * rows); ZCalcBrightness << <size, rows >> > (GPU_OCTSmoothData, GPU_BrightnessArray, size, rows, cols, StartIndex); CheckCudaError(); // 找最大最小值 & 刪除過飽合的部分 findMaxAndMinPeak << < size * rows * cols / NumThreads, NumThreads >> > (GPU_OCTSmoothData, GPU_BrightnessArray, GPU_PointType, size, rows, cols, MaxPeakThreshold, SatPeakThreshold); CheckCudaError(); // Parse 一些連續最小值 ParseMaxMinPeak << < size, rows >> > (GPU_PointType, size, rows, cols, StartIndex); CheckCudaError(); // 抓出一維陣列 int *GPU_PointType_BestN, *PointType_BestN; float* GPU_CandidateGap; cudaMalloc(&GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN); cudaMalloc(&GPU_CandidateGap, sizeof(float) * size * rows * cols); // 暫時用來存 Gap 的記憶體 cudaMemset(GPU_CandidateGap, 0, sizeof(float) * size * rows * cols); cudaMemset(GPU_PointType_BestN, 0, sizeof(int) * size * rows * ChooseBestN); PickBestChoiceToArray << < size, rows >> > (GPU_OCTSmoothData, GPU_PointType, GPU_CandidateGap, GPU_PointType_BestN, size, rows, cols, ChooseBestN, StartIndex, GoThroughThreshold); CheckCudaError(); // 算出 Neighbor 數目的陣列 float* GPU_NeighborCountArray; cudaMalloc(&GPU_NeighborCountArray, sizeof(float) * size * rows * ChooseBestN); CalcNeighbor << <size, rows >> > (GPU_PointType_BestN, GPU_NeighborCountArray, size, rows, cols, ChooseBestN, DenoiseWindowsRadius); CheckCudaError(); // 連結點 // 這個的大小 為 => 張數 * 250(rows) * 取幾個最大值(ChooseBestN個) * 每個最大值底下有 半徑個 (Raidus) * 的下 N 排的幾個最大值(ChooseBestN) int *GPU_Connect_Status; int ConnectStateSize = size * rows * ChooseBestN * ConnectRadius * ChooseBestN; cudaMalloc(&GPU_Connect_Status, sizeof(int) * ConnectStateSize); cudaMemset(GPU_Connect_Status, 0, sizeof(int) * ConnectStateSize); ConnectPointsStatus << < size * ChooseBestN, rows >> > (GPU_PointType_BestN, GPU_Connect_Status, size, rows, ChooseBestN, ConnectRadius); CheckCudaError(); // 把資料傳回 CPU int *Connect_Status = new int[ConnectStateSize]; PointType_BestN = new int[size * rows * ChooseBestN]; cudaMemcpy(PointType, GPU_PointType, sizeof(uchar) * size * rows * cols, cudaMemcpyDeviceToHost); cudaMemcpy(Connect_Status, GPU_Connect_Status, sizeof(int) * ConnectStateSize, cudaMemcpyDeviceToHost); cudaMemcpy(PointType_BestN, GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN, cudaMemcpyDeviceToHost); CheckCudaError(); // 抓取最大的線 GetSurface(PointType_BestN, Connect_Status); #pragma endregion // 刪除記憶體 cudaFree(GPU_PointType); cudaFree(GPU_PointType_BestN); cudaFree(GPU_Connect_Status); cudaFree(GPU_OCTSmoothData); cudaFree(GPU_BrightnessArray); cudaFree(GPU_CandidateGap); cudaFree(GPU_NeighborCountArray); delete[] Connect_Status; delete[] PointType_BestN; // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "9. 抓取邊界: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 10. 抓下 GPU Data // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 刪除之前的資料 SaveDelete(VolumeData); VolumeData = new uchar[SizeX * SizeY * SizeZ / 2]; cudaMemcpy(VolumeData, GPU_UintDataArray, sizeof(uchar) * SizeX * SizeY * SizeZ / 2, cudaMemcpyDeviceToHost); SaveDelete(VolumeData_OtherSide); VolumeData_OtherSide = new uchar[SizeX * SizeY]; cudaMemcpy(VolumeData_OtherSide, GPU_UintOtherSideData, sizeof(uchar) * SizeX * SizeY, cudaMemcpyDeviceToHost); // 刪除 GPU cudaFree(GPU_UintDataArray); cudaFree(GPU_UintOtherSideData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "10. 抓下 GPU Data : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion // 結算 #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock() - totalTime; cout << "轉換多張點雲: " << ((float)totalTime) / CLOCKS_PER_SEC << " sec" << endl; #endif } // 拿出圖片 vector<Mat> TRCudaV2::TransfromMatArray(bool SaveBorder = false) { // 轉換到 Mat vector<Mat> ImgArray; for (int i = 0; i < size; i++) { // 根據 Offset 拿圖片 Mat img(rows, cols, CV_8U, VolumeData + i * rows * cols); cvtColor(img, img, CV_GRAY2BGR); // 丟進堆疊 ImgArray.push_back(img); } if (SaveBorder) { // Debug 所有的 peak /*for (int i = 0; i < size; i++) for (int j = 0; j < rows * cols; j++) { int offsetIndex = i * rows * cols; int rowIndex = j / cols; int colIndex = j % cols; Vec3b color(0, 0, 0); if (PointType[offsetIndex + j] == 1) color = Vec3b(0, 255, 255); else if (PointType[offsetIndex + j] == 2) color = Vec3b(255, 255, 255); ImgArray[i].at<Vec3b>(rowIndex, colIndex) = color; }*/ // 只抓出最後的邊界 for (int i = 0; i < size; i++) for (int j = 0; j < rows; j++) { int index = i * rows + j; if (PointType_1D[index] != -1) { Point contourPoint(PointType_1D[index], j); circle(ImgArray[i], contourPoint, 2, Scalar(0, 255, 255), CV_FILLED); } } } return ImgArray; } Mat TRCudaV2::TransformToOtherSideView() { assert(size > 1 && "這段一定要大於一張圖"); Mat img(rows, size, CV_8U, VolumeData_OtherSide); cvtColor(img, img, CV_GRAY2BGR); return img; } void TRCudaV2::CopySingleBorder(int* LastArray) { assert(LastArray != NULL && PointType_1D != NULL && size == 1 && "要先初始化 Array 和要做轉點雲的部分!!"); // assert 抓出 call 錯的可能性 (這邊只能單張) memcpy(LastArray, PointType_1D, sizeof (int) * size * rows); } void TRCudaV2::CopyBorder(int* BorderArray) { assert(BorderArray != NULL && PointType_1D != NULL && size != 1 && "要先初始化 Array 和要做轉點雲的部分!!"); // assert 抓出 call 錯的可能性 (這邊要多張) memcpy(BorderArray, PointType_1D, sizeof(int) * size * rows); } bool TRCudaV2::ShakeDetect_Single(int* LastArray, bool ShowDebugMessage) { // 設定變數 int voteNum = 0; // 有效票數 float MoveDis = 0; // 移動的總共距離 // 跑每一個點 for (int i = 0; i < rows; i++) { if (PointType_1D[i] != -1 && LastArray[i] != -1) { MoveDis += abs(PointType_1D[i] - LastArray[i]); voteNum++; } } // 判斷是否有有效資料 if (voteNum > OCT_Valid_VoteNum) { MoveDis /= voteNum; // Debug Message if(ShowDebugMessage) cout << "晃動距離(pixel): " << (float)MoveDis << endl; // 這邊是代表沒有晃動 if (MoveDis < OCT_Move_Threshold) return false; } return true; } bool TRCudaV2::ShakeDetect_Multi(bool UsePreiseThreshold, bool ShowDebugMessage) { // 找 60 ~ 200 裡面有效的有沒有斷層 int voteNum = 0; // 有效票數 float MoveDis = 0; // 移動的總共距離 // Reverse 後的 0 ~ 250 for (int i = 60; i < 200; i++) { bool IsMove = false; // 這邊先預設給這個值,後面會換掉 int leftIndex = 124 * rows + i; // 第 124 張 int rightIndex = 125 * rows + i; // 第 125 張 // 從中間往前找 for (int j = size / 2 - 1; j >= 0; j--) if (PointType_1D[j * rows + i] != -1) { leftIndex = j * rows + i; break; } // 從中間像後找 for (int j = size / 2; j < size; j++) if (PointType_1D[j] != -1) { rightIndex = j * rows + i; break; } int leftY = PointType_1D[leftIndex]; int rightY = PointType_1D[rightIndex]; // 確認有效票數 if (PointType_1D[leftIndex] != -1 && PointType_1D[rightIndex] != -1) { int DisMid = abs(PointType_1D[rightIndex] - PointType_1D[leftIndex]); MoveDis += DisMid; voteNum++; } } // 判斷是否有有效資料 if (voteNum > OCT_Valid_VoteNum) { MoveDis /= voteNum; // Debug Message if (ShowDebugMessage) cout << "晃動距離(pixel): " << (float)MoveDis << endl; // 這邊是代表沒有晃動 if (UsePreiseThreshold) { // 用較輕確的結果 if (MoveDis < OCT_Move_Precise_Threshold) return false; } else { // 用較不精確的結果 if (MoveDis < OCT_Move_Threshold) return false; } } else if (ShowDebugMessage) cout << "資料量太少!!" << endl; return true; } ////////////////////////////////////////////////////////////////////////// // Helper Function ////////////////////////////////////////////////////////////////////////// void TRCudaV2::GetSurface(int *PointType_BestN, int *Connect_Status) { // 選 N 個 #pragma omp parallel for //num_thread(4) for (int i = 0; i < size; i++) { // 每個 10 段下去 Sample int RowGap = rows / 10; vector<vector<ConnectInfo>> StatusVector; for (int j = 0; j < rows; j += RowGap) for (int chooseNIndex = 0; chooseNIndex < ChooseBestN; chooseNIndex++) { int begin = j; int end = j; // 代表這個點沒有東西,所以略過 if (PointType_BestN[i * rows * ChooseBestN + j * ChooseBestN + chooseNIndex] == -1) continue; // 連接狀況 vector<ConnectInfo> Connect; #pragma region 往上找 // 先加上自己 ConnectInfo info; info.rowIndex = j; info.chooseIndex = chooseNIndex; Connect.push_back(info); int FindIndex = j; int FindChooseIndex = chooseNIndex; bool IsFind = true; while (IsFind && FindIndex > 0) { int minMoveIndex = -1; int minChooseIndex = -1; int tempValue = ConnectRadius * ConnectRadius; for (int k = 1; k < ConnectRadius; k++) for (int nextChooseNIndex = 0; nextChooseNIndex < ChooseBestN; nextChooseNIndex++) { int index = i * rows * ChooseBestN * ConnectRadius * ChooseBestN + // Size (FindIndex - k) * ChooseBestN * ConnectRadius * ChooseBestN + // Rows nextChooseNIndex * ConnectRadius * ChooseBestN + // 現在在的 Top N 的點 (這邊要注意,這邊應該要放的是 要找的那個點的 ChooseIndex) k * ChooseBestN + // 半徑 FindChooseIndex; if (FindIndex - k >= 0 && Connect_Status[index] != 0 && tempValue > Connect_Status[index]) { tempValue = Connect_Status[index]; minMoveIndex = k; minChooseIndex = nextChooseNIndex; } } // 判斷是否有找到,找到就繼續找 if (minMoveIndex != -1) { // 更便位置 FindIndex = FindIndex - minMoveIndex; FindChooseIndex = minChooseIndex; // 丟進陣列 info.rowIndex = FindIndex; info.chooseIndex = minChooseIndex; Connect.push_back(info); // 有找到 IsFind = true; } else IsFind = false; } #pragma endregion #pragma region 往下找 FindIndex = j; FindChooseIndex = chooseNIndex; while (IsFind && FindIndex < rows - 1) { int minMoveIndex = -1; int minChooseIndex = -1; int tempValue = ConnectRadius * ConnectRadius; for (int k = 1; k < ConnectRadius; k++) for (int nextChooseNIndex = 0; nextChooseNIndex < ChooseBestN; nextChooseNIndex++) { int index = i * rows * ChooseBestN * ConnectRadius * ChooseBestN + // Size FindIndex * ChooseBestN * ConnectRadius * ChooseBestN + // Rows FindChooseIndex * ConnectRadius * ChooseBestN + // 現在在的 Top N 的點 k * ChooseBestN + // 半徑 nextChooseNIndex; if (FindIndex + k < rows && Connect_Status[index] != 0 && tempValue > Connect_Status[index]) { tempValue = Connect_Status[index]; minMoveIndex = k; minChooseIndex = nextChooseNIndex; } } // 判斷是否有找到,找到就繼續找 if (minMoveIndex != -1) { // 更便位置 FindIndex = FindIndex + minMoveIndex; FindChooseIndex = minChooseIndex; // 丟進陣列 info.rowIndex = FindIndex; info.chooseIndex = minChooseIndex; Connect.push_back(info); // 有找到 IsFind = true; } else IsFind = false; } #pragma endregion // 判斷是否有連出東西,如果連出來的東西大於 1 if (Connect.size() > 1) { // 由小排到大 sort(Connect.begin(), Connect.end(), SortByRows); StatusVector.push_back(Connect); } } // 前面的幾個張數,可能會找不到點,所以跳過處理 if (StatusVector.size() == 0) { memset(&PointType_1D[i * rows], -1, sizeof(int) * rows); continue; } // 排序之後取最大 sort(StatusVector.begin(), StatusVector.end(), SortByVectorSize); // 超出不重疊的最好連接方法 (最多取前三個) vector<int> BestCandidate; int Begin = rows; int End = 0; for (int j = 0; j < StatusVector.size() && j < 3; j++) { int CurrentBegin = StatusVector[j][0].rowIndex; int CurrentEnd = StatusVector[j][StatusVector[j].size() - 1].rowIndex; if (Begin > CurrentBegin) { Begin = min(Begin, CurrentBegin); End = max(End, CurrentEnd); BestCandidate.push_back(j); } if (End < CurrentEnd) { Begin = min(Begin, CurrentBegin); End = max(End, CurrentEnd); BestCandidate.push_back(j); } } // 加到裡面 for (int j = 1; j < BestCandidate.size(); j++) if (StatusVector[BestCandidate[j]].size() >= 3) for (int k = 0; k < StatusVector[BestCandidate[j]].size(); k++) StatusVector[0].push_back(StatusVector[j][k]); vector<ConnectInfo> LineVector = StatusVector[0]; int index = 0; // LineVector Index for (int j = 0; j < rows; j++) { int Type1D_Index = i * rows + j; if (LineVector[index].rowIndex != j) PointType_1D[Type1D_Index] = -1; else if (LineVector[index].rowIndex == j) { int BestN_Index = i * rows * ChooseBestN + // 張 LineVector[index].rowIndex * ChooseBestN + // row LineVector[index].chooseIndex; // ChooseIndex // 放進 PointType PointType_1D[j + i * rows] = PointType_BestN[BestN_Index]; index++; if (index >= LineVector.size()) { for (int k = j + 1; k < rows; k++) PointType_1D[k + i * rows] = -1; break; } } } } // Smooth int* tempPointType_1D = new int[size * rows]; for (int i = 0; i < size; i++) for (int j = 0; j < rows; j ++) { int totalPoint = 0; int totalZ = 0; int index = i * rows + j; if (PointType_1D[index] == -1) { tempPointType_1D[index] = -1; continue; } for (int k = -DenoiseWindowsRadius; k <= DenoiseWindowsRadius; k++) for (int l = -DenoiseWindowsRadius; l <= DenoiseWindowsRadius; l++) { int currentI = i + k; int currentJ = j + l; if (0 <= currentI && currentI < size && 0 <= currentJ && currentJ < rows) { int currentIndex = currentI *rows + currentJ; if (PointType_1D[currentIndex] != -1) { totalPoint++; totalZ += PointType_1D[currentIndex]; } } } tempPointType_1D[index] = totalZ / totalPoint; } memcpy(PointType_1D, tempPointType_1D, sizeof(int) * size * rows); delete[] tempPointType_1D; } bool TRCudaV2::SortByRows(ConnectInfo left, ConnectInfo right) { return left.rowIndex < right.rowIndex; } bool TRCudaV2::SortByVectorSize(vector<ConnectInfo> left, vector<ConnectInfo> right) { return right.size() < left.size(); } void TRCudaV2::CheckCudaError() { cudaError GPU_Error = cudaGetLastError(); if (GPU_Error != cudaSuccess) { cout << cudaGetErrorString(GPU_Error) << endl; assert(false); exit(-1); } } void TRCudaV2::SaveDelete(void* pointer) { if (pointer != NULL) delete[] pointer; }
dataAlgorithm2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define CONFIGURATION_COUNT 250000 struct Tick { long timestamp; double open; double high; double low; double close; double sma13; double ema50; double ema100; double ema200; double rsi; double stochK; double stochD; double prcUpper; double prcLower; }; struct Strategy { double profitLoss; void (*backtest)(Strategy*, Tick*); }; __device__ void backtest(Strategy *self, Tick *tick) { int i; int j = 0; // Pretend to do something. // TODO: Actually do something useful. for (i=0; i<50; i++) { j++; } } __global__ void initializeStrategies(Strategy *strategies) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < CONFIGURATION_COUNT) { strategies[i].profitLoss = 10000 + i; strategies[i].backtest = backtest; } } __global__ void backtestStrategies(Strategy *strategies, Tick *ticks) { // TODO: Harness multiple dimensions? int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < CONFIGURATION_COUNT) { strategies[i].backtest(&strategies[i], &ticks[i]); } } int main() { int threadsPerBlock = 1000; int blockCount = CONFIGURATION_COUNT / threadsPerBlock; Strategy strategies[CONFIGURATION_COUNT]; Strategy *devStrategies; int i = 0; int j = 0; int k = 0; int tickCount = 1000000; Tick *ticks = (Tick*) malloc(CONFIGURATION_COUNT * sizeof(Tick)); Tick *devTicks; int kFoldCount = 10; void (*backtester)(Strategy*, Tick*); backtester = &backtestStrategies; hipSetDevice(0); // Allocate memory on the GPU for the strategies. hipMalloc((void**)&devStrategies, CONFIGURATION_COUNT * sizeof(Strategy)); // Allocate memory on the GPU for the ticks. hipMalloc((void**)&devTicks, CONFIGURATION_COUNT * sizeof(Tick)); // Initialize strategies on the GPU. hipLaunchKernelGGL(( initializeStrategies), dim3(blockCount), dim3(threadsPerBlock), 0, 0, devStrategies); // Run through each k-fold step. for (i=0; i<kFoldCount; i++) { // Run through every tick. for (j=0; j<tickCount; j++) { printf("%i\n", j); // Set up data for every configuration. for (k=0; k<CONFIGURATION_COUNT; k++) { ticks[k].timestamp = 1460611103; ticks[k].open = 89.5; ticks[k].high = 89.5; ticks[k].low = 89.5; ticks[k].close = 89.5; ticks[k].sma13 = 89.5; ticks[k].ema50 = 89.5; ticks[k].ema100 = 89.5; ticks[k].ema200 = 89.5; ticks[k].rsi = 89.5; ticks[k].stochK = 89.5; ticks[k].stochD = 89.5; ticks[k].prcUpper = 89.5; ticks[k].prcLower = 89.5; } // Copy ticks to the GPU. hipMemcpy(devTicks, ticks, CONFIGURATION_COUNT * sizeof(Tick), hipMemcpyHostToDevice); // Run backtests for all strategy configurations. (hipLaunchKernelGGL((*backtester)), dim3(blockCount), dim3(threadsPerBlock), 0, 0, devStrategies, devTicks); // Wait for currently-running kernels to finish. hipDeviceSynchronize(); } } // Free memory for the tick data from the GPU. hipFree(devTicks); // Copy strategies from the GPU. hipMemcpy(strategies, devStrategies, CONFIGURATION_COUNT * sizeof(Strategy), hipMemcpyDeviceToHost); // Display results. for (i=0; i<CONFIGURATION_COUNT; i++) { printf("%f\n", strategies[i].profitLoss); } // Free memory for the strategies on the GPU. hipFree(devStrategies); return 0; }
dataAlgorithm2.cu
#include <stdio.h> #include <stdlib.h> #define CONFIGURATION_COUNT 250000 struct Tick { long timestamp; double open; double high; double low; double close; double sma13; double ema50; double ema100; double ema200; double rsi; double stochK; double stochD; double prcUpper; double prcLower; }; struct Strategy { double profitLoss; void (*backtest)(Strategy*, Tick*); }; __device__ void backtest(Strategy *self, Tick *tick) { int i; int j = 0; // Pretend to do something. // TODO: Actually do something useful. for (i=0; i<50; i++) { j++; } } __global__ void initializeStrategies(Strategy *strategies) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < CONFIGURATION_COUNT) { strategies[i].profitLoss = 10000 + i; strategies[i].backtest = backtest; } } __global__ void backtestStrategies(Strategy *strategies, Tick *ticks) { // TODO: Harness multiple dimensions? int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < CONFIGURATION_COUNT) { strategies[i].backtest(&strategies[i], &ticks[i]); } } int main() { int threadsPerBlock = 1000; int blockCount = CONFIGURATION_COUNT / threadsPerBlock; Strategy strategies[CONFIGURATION_COUNT]; Strategy *devStrategies; int i = 0; int j = 0; int k = 0; int tickCount = 1000000; Tick *ticks = (Tick*) malloc(CONFIGURATION_COUNT * sizeof(Tick)); Tick *devTicks; int kFoldCount = 10; void (*backtester)(Strategy*, Tick*); backtester = &backtestStrategies; cudaSetDevice(0); // Allocate memory on the GPU for the strategies. cudaMalloc((void**)&devStrategies, CONFIGURATION_COUNT * sizeof(Strategy)); // Allocate memory on the GPU for the ticks. cudaMalloc((void**)&devTicks, CONFIGURATION_COUNT * sizeof(Tick)); // Initialize strategies on the GPU. initializeStrategies<<<blockCount, threadsPerBlock>>>(devStrategies); // Run through each k-fold step. for (i=0; i<kFoldCount; i++) { // Run through every tick. for (j=0; j<tickCount; j++) { printf("%i\n", j); // Set up data for every configuration. for (k=0; k<CONFIGURATION_COUNT; k++) { ticks[k].timestamp = 1460611103; ticks[k].open = 89.5; ticks[k].high = 89.5; ticks[k].low = 89.5; ticks[k].close = 89.5; ticks[k].sma13 = 89.5; ticks[k].ema50 = 89.5; ticks[k].ema100 = 89.5; ticks[k].ema200 = 89.5; ticks[k].rsi = 89.5; ticks[k].stochK = 89.5; ticks[k].stochD = 89.5; ticks[k].prcUpper = 89.5; ticks[k].prcLower = 89.5; } // Copy ticks to the GPU. cudaMemcpy(devTicks, ticks, CONFIGURATION_COUNT * sizeof(Tick), cudaMemcpyHostToDevice); // Run backtests for all strategy configurations. (*backtester)<<<blockCount, threadsPerBlock>>>(devStrategies, devTicks); // Wait for currently-running kernels to finish. cudaDeviceSynchronize(); } } // Free memory for the tick data from the GPU. cudaFree(devTicks); // Copy strategies from the GPU. cudaMemcpy(strategies, devStrategies, CONFIGURATION_COUNT * sizeof(Strategy), cudaMemcpyDeviceToHost); // Display results. for (i=0; i<CONFIGURATION_COUNT; i++) { printf("%f\n", strategies[i].profitLoss); } // Free memory for the strategies on the GPU. cudaFree(devStrategies); return 0; }
5635dac12cf3a7110d877437d3f414a930fde01b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include <thrust/host_vector.h> #include <stdio.h> typedef unsigned int uint32; const size_t BITS_PER_BYTE = 8; const size_t BYTES_AMOUNT = sizeof(unsigned int); const size_t BITS_AMOUNT = BITS_PER_BYTE * BYTES_AMOUNT; const size_t THREADS_AMOUNT = 320; uint32* h_goldMem; uint32* h_inVals; uint32* h_tempMem; uint32* d_tempMem; __global__ void prescan(uint32* g_odata, uint32* g_idata, int bit){ extern __shared__ uint32 temp[];// allocated on invocation const uint32 thid = threadIdx.x; const uint32 doubleId = thid << 1; const uint32 n = blockDim.x * gridDim.x; uint32 offset = 1; temp[doubleId] = (g_idata[doubleId] >> bit) & 1; // load input into shared memory temp[doubleId + 1] = (g_idata[doubleId + 1] >> bit) & 1; for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree __syncthreads(); if (thid < d) { int ai = offset * (doubleId + 1) - 1; int bi = offset * (doubleId + 2) - 1; temp[bi] += temp[ai]; } offset <<= 1; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * (doubleId + 1) - 1; int bi = offset * (doubleId + 2) - 1; uint32 t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[doubleId] = temp[doubleId]; // write results to device memory g_odata[doubleId + 1] = temp[doubleId + 1]; } __global__ void partition_by_bit(uint32* const inVals, uint32* const inPos, uint32* scanResult, uint32 bit) { uint32 threadId = threadIdx.x + blockDim.x * blockIdx.x; uint32 size = blockDim.x * gridDim.x; uint32 x_i = inVals[threadId]; uint32 pos_i = inPos[threadId]; uint32 p_i = (x_i >> bit) & 1u; __syncthreads(); uint32 T_before = scanResult[threadId]; __syncthreads(); uint32 F_total = size - scanResult[size - 1]; __syncthreads(); uint32 newPos = 0; if (p_i > 0) newPos = T_before - 1 + F_total; else newPos = threadId - T_before; if (newPos >= size) printf("newPos is invalid: newPos=%u\n", newPos); inVals[newPos] = x_i; inPos[newPos] = pos_i; } void your_sort(uint32* const d_inputVals, uint32* const d_inputPos, uint32* const d_outputVals, uint32* const d_outputPos, const size_t numElems) { uint32 *d_tempMem; size_t elemsAmount = numElems; printf("Elements amount is %i\n", elemsAmount); h_inVals = (uint32*)malloc(elemsAmount * BYTES_AMOUNT); checkCudaErrors(hipMemcpy(h_inVals, d_inputVals, elemsAmount * BYTES_AMOUNT, hipMemcpyDeviceToHost)); checkCudaErrors(hipMalloc(&d_tempMem, elemsAmount * BYTES_AMOUNT)); h_tempMem = (uint32*)malloc(elemsAmount * BYTES_AMOUNT); h_goldMem = (uint32*)malloc(elemsAmount * BYTES_AMOUNT); memset(h_goldMem, 0, elemsAmount * BYTES_AMOUNT); const size_t blocksAmount = elemsAmount / THREADS_AMOUNT; for(uint32 bit = 0; bit < BITS_AMOUNT; bit++ ) { checkCudaErrors(hipMemset(d_tempMem, 0, elemsAmount * BYTES_AMOUNT)); printf("processing bit %i\n", bit); hipLaunchKernelGGL(( prescan), dim3(blocksAmount), dim3(THREADS_AMOUNT), blocksAmount * BITS_AMOUNT * BYTES_AMOUNT * 2, 0, d_tempMem, d_inputVals, bit); //plus_scan<<<blocksAmount, THREADS_AMOUNT, THREADS_AMOUNT * BYTES_AMOUNT>>>(d_tempMem, d_inputVals, bit); hipDeviceSynchronize(); hipMemcpy(h_tempMem, d_tempMem, elemsAmount * BYTES_AMOUNT, hipMemcpyDeviceToHost); hipDeviceSynchronize(); for (uint32 i = 1; i < elemsAmount; ++i) h_goldMem[i] = h_goldMem[i - 1] + ((h_inVals[i - 1] >> bit) & 1); /* printf("\n"); for(uint32 i = 0; i < elemsAmount; ++i) printf("in[%i]=%i\t", i, ((h_inVals[i] >> bit) & 1)); printf("\n"); for(uint32 i = 0; i < elemsAmount; ++i) printf("out[%i]=%i\t", i, h_tempMem[i]); printf("\n"); for(uint32 i = 0; i < elemsAmount; ++i) printf("gold[%i]=%i\t", i, h_goldMem[i]); printf("\n"); for(uint32 i = 0; i < elemsAmount; ++i) if (h_goldMem[i] != h_tempMem[i]) printf("invalid value in %i\n", i); printf("\n"); printf("\n"); */ hipLaunchKernelGGL(( partition_by_bit), dim3(blocksAmount), dim3(THREADS_AMOUNT), 0, 0, d_inputVals, d_inputPos, d_tempMem, bit); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } hipMemcpy(d_outputVals, d_inputVals, elemsAmount * BYTES_AMOUNT, hipMemcpyDeviceToDevice); hipMemcpy(d_outputPos, d_inputPos, elemsAmount * BYTES_AMOUNT, hipMemcpyDeviceToDevice); free(h_inVals); free(h_goldMem); free(h_tempMem); hipFree(d_tempMem); }
5635dac12cf3a7110d877437d3f414a930fde01b.cu
#include "utils.h" #include <thrust/host_vector.h> #include <stdio.h> typedef unsigned int uint32; const size_t BITS_PER_BYTE = 8; const size_t BYTES_AMOUNT = sizeof(unsigned int); const size_t BITS_AMOUNT = BITS_PER_BYTE * BYTES_AMOUNT; const size_t THREADS_AMOUNT = 320; uint32* h_goldMem; uint32* h_inVals; uint32* h_tempMem; uint32* d_tempMem; __global__ void prescan(uint32* g_odata, uint32* g_idata, int bit){ extern __shared__ uint32 temp[];// allocated on invocation const uint32 thid = threadIdx.x; const uint32 doubleId = thid << 1; const uint32 n = blockDim.x * gridDim.x; uint32 offset = 1; temp[doubleId] = (g_idata[doubleId] >> bit) & 1; // load input into shared memory temp[doubleId + 1] = (g_idata[doubleId + 1] >> bit) & 1; for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree __syncthreads(); if (thid < d) { int ai = offset * (doubleId + 1) - 1; int bi = offset * (doubleId + 2) - 1; temp[bi] += temp[ai]; } offset <<= 1; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * (doubleId + 1) - 1; int bi = offset * (doubleId + 2) - 1; uint32 t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[doubleId] = temp[doubleId]; // write results to device memory g_odata[doubleId + 1] = temp[doubleId + 1]; } __global__ void partition_by_bit(uint32* const inVals, uint32* const inPos, uint32* scanResult, uint32 bit) { uint32 threadId = threadIdx.x + blockDim.x * blockIdx.x; uint32 size = blockDim.x * gridDim.x; uint32 x_i = inVals[threadId]; uint32 pos_i = inPos[threadId]; uint32 p_i = (x_i >> bit) & 1u; __syncthreads(); uint32 T_before = scanResult[threadId]; __syncthreads(); uint32 F_total = size - scanResult[size - 1]; __syncthreads(); uint32 newPos = 0; if (p_i > 0) newPos = T_before - 1 + F_total; else newPos = threadId - T_before; if (newPos >= size) printf("newPos is invalid: newPos=%u\n", newPos); inVals[newPos] = x_i; inPos[newPos] = pos_i; } void your_sort(uint32* const d_inputVals, uint32* const d_inputPos, uint32* const d_outputVals, uint32* const d_outputPos, const size_t numElems) { uint32 *d_tempMem; size_t elemsAmount = numElems; printf("Elements amount is %i\n", elemsAmount); h_inVals = (uint32*)malloc(elemsAmount * BYTES_AMOUNT); checkCudaErrors(cudaMemcpy(h_inVals, d_inputVals, elemsAmount * BYTES_AMOUNT, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMalloc(&d_tempMem, elemsAmount * BYTES_AMOUNT)); h_tempMem = (uint32*)malloc(elemsAmount * BYTES_AMOUNT); h_goldMem = (uint32*)malloc(elemsAmount * BYTES_AMOUNT); memset(h_goldMem, 0, elemsAmount * BYTES_AMOUNT); const size_t blocksAmount = elemsAmount / THREADS_AMOUNT; for(uint32 bit = 0; bit < BITS_AMOUNT; bit++ ) { checkCudaErrors(cudaMemset(d_tempMem, 0, elemsAmount * BYTES_AMOUNT)); printf("processing bit %i\n", bit); prescan<<<blocksAmount, THREADS_AMOUNT, blocksAmount * BITS_AMOUNT * BYTES_AMOUNT * 2>>>(d_tempMem, d_inputVals, bit); //plus_scan<<<blocksAmount, THREADS_AMOUNT, THREADS_AMOUNT * BYTES_AMOUNT>>>(d_tempMem, d_inputVals, bit); cudaDeviceSynchronize(); cudaMemcpy(h_tempMem, d_tempMem, elemsAmount * BYTES_AMOUNT, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (uint32 i = 1; i < elemsAmount; ++i) h_goldMem[i] = h_goldMem[i - 1] + ((h_inVals[i - 1] >> bit) & 1); /* printf("\n"); for(uint32 i = 0; i < elemsAmount; ++i) printf("in[%i]=%i\t", i, ((h_inVals[i] >> bit) & 1)); printf("\n"); for(uint32 i = 0; i < elemsAmount; ++i) printf("out[%i]=%i\t", i, h_tempMem[i]); printf("\n"); for(uint32 i = 0; i < elemsAmount; ++i) printf("gold[%i]=%i\t", i, h_goldMem[i]); printf("\n"); for(uint32 i = 0; i < elemsAmount; ++i) if (h_goldMem[i] != h_tempMem[i]) printf("invalid value in %i\n", i); printf("\n"); printf("\n"); */ partition_by_bit<<<blocksAmount, THREADS_AMOUNT>>>(d_inputVals, d_inputPos, d_tempMem, bit); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } cudaMemcpy(d_outputVals, d_inputVals, elemsAmount * BYTES_AMOUNT, cudaMemcpyDeviceToDevice); cudaMemcpy(d_outputPos, d_inputPos, elemsAmount * BYTES_AMOUNT, cudaMemcpyDeviceToDevice); free(h_inVals); free(h_goldMem); free(h_tempMem); cudaFree(d_tempMem); }