hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
57e17955cffa0ab653a4d1c6954729d564ad092e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "bp_weight_fc.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_weight = NULL; hipMalloc(&d_weight, XSIZE*YSIZE); float *d_preact = NULL; hipMalloc(&d_preact, XSIZE*YSIZE); float *p_output = NULL; hipMalloc(&p_output, XSIZE*YSIZE); const int size = 1; const int in_channel = 1; const int out_channel = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( bp_weight_fc), dim3(gridBlock),dim3(threadBlock), 0, 0, d_weight,d_preact,p_output,size,in_channel,out_channel); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( bp_weight_fc), dim3(gridBlock),dim3(threadBlock), 0, 0, d_weight,d_preact,p_output,size,in_channel,out_channel); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( bp_weight_fc), dim3(gridBlock),dim3(threadBlock), 0, 0, d_weight,d_preact,p_output,size,in_channel,out_channel); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
57e17955cffa0ab653a4d1c6954729d564ad092e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "bp_weight_fc.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_weight = NULL; cudaMalloc(&d_weight, XSIZE*YSIZE); float *d_preact = NULL; cudaMalloc(&d_preact, XSIZE*YSIZE); float *p_output = NULL; cudaMalloc(&p_output, XSIZE*YSIZE); const int size = 1; const int in_channel = 1; const int out_channel = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); bp_weight_fc<<<gridBlock,threadBlock>>>(d_weight,d_preact,p_output,size,in_channel,out_channel); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { bp_weight_fc<<<gridBlock,threadBlock>>>(d_weight,d_preact,p_output,size,in_channel,out_channel); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { bp_weight_fc<<<gridBlock,threadBlock>>>(d_weight,d_preact,p_output,size,in_channel,out_channel); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
05fce86665dcb772566cd0abb47b6e44be568db1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <system/Environment.h> #include <loops/transform_any.h> #include <types/types.h> #include <system/op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformAnySimple( const void *x, const Nd4jLong *xShapeInfo, int xRank, void *params, void *z, const Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { functions::transform::TransformAny<X,Z>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer,tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformAny<X,Y>::executeTransformShaped( dim3 launchDims, hipStream_t *stream, const int opNum, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_ANY_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformAny<X,Z>::transformCuda( const void *vx, const Nd4jLong *xShapeInfo, void *vparams, void *vz, const Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto params = reinterpret_cast<X*>(vparams); auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer); __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } }; template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformAny<X,Z>::intermediateShaped( dim3 launchDims, hipStream_t *stream, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { hipLaunchKernelGGL(( transformAnySimple<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); sd::DebugHelper::checkErrorCode(stream, "transformAny(...) failed"); } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformAny, , LIBND4J_TYPES, LIBND4J_TYPES); } }
05fce86665dcb772566cd0abb47b6e44be568db1.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <system/Environment.h> #include <loops/transform_any.h> #include <types/types.h> #include <system/op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformAnySimple( const void *x, const Nd4jLong *xShapeInfo, int xRank, void *params, void *z, const Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { functions::transform::TransformAny<X,Z>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer,tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformAny<X,Y>::executeTransformShaped( dim3 launchDims, cudaStream_t *stream, const int opNum, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_ANY_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformAny<X,Z>::transformCuda( const void *vx, const Nd4jLong *xShapeInfo, void *vparams, void *vz, const Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto params = reinterpret_cast<X*>(vparams); auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer); __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } }; template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformAny<X,Z>::intermediateShaped( dim3 launchDims, cudaStream_t *stream, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { transformAnySimple<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); sd::DebugHelper::checkErrorCode(stream, "transformAny(...) failed"); } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformAny, , LIBND4J_TYPES, LIBND4J_TYPES); } }
4e3431fa2ef2db600f1d5574e9d051e2443efe22.hip
// !!! This is a file automatically generated by hipify!!! // // sunnet project // Copyright (C) 2018 by Contributors <https://github.com/Tyill/sunnet> // // This code is licensed under the MIT License. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files(the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and / or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions : // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #include <hip/hip_runtime.h> #include <cudnn.h> #include "../stdafx.h" #include "snOperatorCUDA/src/Operator/deconvolution.h" using namespace std; using namespace SN_Base; struct gpuParams{ cudnnHandle_t cudnn = 0; cudnnConvolutionDescriptor_t conv_desc = 0; cudnnTensorDescriptor_t in_desc = 0; cudnnTensorDescriptor_t out_desc = 0; cudnnTensorDescriptor_t grin_desc = 0; cudnnTensorDescriptor_t grout_desc = 0; cudnnFilterDescriptor_t w_desc = 0; cudnnFilterDescriptor_t dw_desc = 0; cudnnTensorDescriptor_t bias_desc = 0; cudnnConvolutionFwdAlgo_t algoFwd; cudnnConvolutionBwdDataAlgo_t algoBwdData; cudnnConvolutionBwdFilterAlgo_t algoBwdW; size_t wsFwdSz = 0; size_t wsBwdDataSz = 0; size_t wsBwdWSz = 0; size_t inszMem = 0; void* d_wsFwd = 0; void* d_wsBwdData = 0; void* d_wsBwdW = 0; }; void Deconvolution::iniParamCUDA(bool isLern, const snSize& insz, const snSize& outsz, const deconvParams& prms, void** pGpuPrm){ bool isFirst = false; gpuParams* gpuPrm = (gpuParams*)*pGpuPrm; if (!gpuPrm){ hipDeviceProp_t cu_deviceProps; hipGetDeviceProperties(&cu_deviceProps, 0); if (cu_deviceProps.major < 3){ ERROR_MESS("%s requires SM >= 3.0"); return; } gpuPrm = new gpuParams(); memset(gpuPrm, 0, sizeof(gpuParams)); *pGpuPrm = gpuPrm; cudnnHandle_t cudnn = nullptr; cuCHECK(cudnnCreate(&cudnn)); gpuPrm->cudnn = cudnn; isFirst = true; } // input cudnnTensorDescriptor_t in_desc = nullptr; cuCHECK(cudnnCreateTensorDescriptor(&in_desc)); cuCHECK(cudnnSetTensor4dDescriptor(in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->in_desc)); gpuPrm->in_desc = in_desc; // w cudnnFilterDescriptor_t w_desc = nullptr; cuCHECK(cudnnCreateFilterDescriptor(&w_desc)); cuCHECK(cudnnSetFilter4dDescriptor(w_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, int(insz.d), int(outsz.d), int(prms.fHeight), int(prms.fWidth))); if (!isFirst) cuCHECK(cudnnDestroyFilterDescriptor((cudnnFilterDescriptor_t)gpuPrm->w_desc)); gpuPrm->w_desc = w_desc; // conv cudnnConvolutionDescriptor_t conv_desc = nullptr; cuCHECK(cudnnCreateConvolutionDescriptor(&conv_desc)); cuCHECK(cudnnSetConvolution2dDescriptor(conv_desc, 0, 0, int(prms.stride), int(prms.stride), 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); if (!isFirst) cuCHECK(cudnnDestroyConvolutionDescriptor((cudnnConvolutionDescriptor_t)gpuPrm->conv_desc)); gpuPrm->conv_desc = conv_desc; // output cudnnTensorDescriptor_t out_desc; cuCHECK(cudnnCreateTensorDescriptor(&out_desc)); cuCHECK(cudnnSetTensor4dDescriptor(out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(outsz.n), int(outsz.d), int(outsz.h), int(outsz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->out_desc)); gpuPrm->out_desc = out_desc; // algorithm cudnnConvolutionBwdDataAlgo_t algoBwdData; cuCHECK(cudnnGetConvolutionBackwardDataAlgorithm(gpuPrm->cudnn, w_desc, in_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algoBwdData)); gpuPrm->algoBwdData = algoBwdData; // workspace size_t wsBwdDataSz = 0; cuCHECK(cudnnGetConvolutionBackwardDataWorkspaceSize(gpuPrm->cudnn, w_desc, in_desc, conv_desc, out_desc, algoBwdData, &wsBwdDataSz)); gpuPrm->wsBwdDataSz = wsBwdDataSz; size_t wsFwdSz = 0, wsBwdWSz = 0; if (isLern){ // grin cudnnTensorDescriptor_t grin_desc; cuCHECK(cudnnCreateTensorDescriptor(&grin_desc)); cuCHECK(cudnnSetTensor4dDescriptor(grin_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(outsz.n), int(outsz.d), int(outsz.h), int(outsz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->grin_desc)); gpuPrm->grin_desc = grin_desc; // grout cudnnTensorDescriptor_t grout_desc; cuCHECK(cudnnCreateTensorDescriptor(&grout_desc)); cuCHECK(cudnnSetTensor4dDescriptor(grout_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->grout_desc)); gpuPrm->grout_desc = grout_desc; // dw cudnnFilterDescriptor_t dw_desc = nullptr; cuCHECK(cudnnCreateFilterDescriptor(&dw_desc)); cuCHECK(cudnnSetFilter4dDescriptor(dw_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, int(insz.d), int(outsz.d), int(prms.fHeight), int(prms.fWidth))); if (!isFirst) cuCHECK(cudnnDestroyFilterDescriptor((cudnnFilterDescriptor_t)gpuPrm->dw_desc)); gpuPrm->dw_desc = dw_desc; // bias cudnnTensorDescriptor_t bias_desc; cuCHECK(cudnnCreateTensorDescriptor(&bias_desc)); cuCHECK(cudnnSetTensor4dDescriptor(bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, int(insz.d), 1, 1)); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->bias_desc)); gpuPrm->bias_desc = bias_desc; // algorithm cudnnConvolutionFwdAlgo_t algoFwd; cuCHECK(cudnnGetConvolutionForwardAlgorithm(gpuPrm->cudnn, grin_desc, w_desc, conv_desc, grout_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algoFwd)); gpuPrm->algoFwd = algoFwd; cudnnConvolutionBwdFilterAlgo_t algoBwdW; cuCHECK(cudnnGetConvolutionBackwardFilterAlgorithm(gpuPrm->cudnn, grin_desc, in_desc, conv_desc, dw_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoBwdW)); gpuPrm->algoBwdW = algoBwdW; // workspace cuCHECK(cudnnGetConvolutionForwardWorkspaceSize(gpuPrm->cudnn, grin_desc, w_desc, conv_desc, grout_desc, algoFwd, &wsFwdSz)); gpuPrm->wsFwdSz = wsFwdSz; cuCHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize(gpuPrm->cudnn, grin_desc, in_desc, conv_desc, dw_desc, algoBwdW, &wsBwdWSz)); gpuPrm->wsBwdWSz = wsBwdWSz; } if (isFirst){ cuCHECK(hipMalloc(&gpuPrm->d_wsBwdData, wsBwdDataSz)); if (isLern){ cuCHECK(hipMalloc(&gpuPrm->d_wsFwd, wsFwdSz)); cuCHECK(hipMalloc(&gpuPrm->d_wsBwdW, wsBwdWSz)); } } else if (gpuPrm->inszMem < insz.size()){ cuCHECK(hipFree(gpuPrm->d_wsBwdData)); gpuPrm->d_wsBwdData = 0; cuCHECK(hipMalloc(&gpuPrm->d_wsBwdData, wsBwdDataSz)); if (isLern){ cuCHECK(hipFree(gpuPrm->d_wsFwd)); gpuPrm->d_wsFwd = 0; cuCHECK(hipFree(gpuPrm->d_wsBwdW)); gpuPrm->d_wsBwdW = 0; cuCHECK(hipMalloc(&gpuPrm->d_wsFwd, wsFwdSz)); cuCHECK(hipMalloc(&gpuPrm->d_wsBwdW, wsBwdWSz)); } gpuPrm->inszMem = insz.size(); } } void Deconvolution::freeParamCUDA(void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; if (!gpuPrm) return; cuCHECK(cudnnDestroy(gpuPrm->cudnn)); cuCHECK(cudnnDestroyConvolutionDescriptor(gpuPrm->conv_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->in_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->out_desc)); cuCHECK(cudnnDestroyFilterDescriptor(gpuPrm->w_desc)); cuCHECK(hipFree(gpuPrm->d_wsBwdData)); if (gpuPrm->grin_desc){ // isLern cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grin_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grout_desc)); cuCHECK(cudnnDestroyFilterDescriptor(gpuPrm->dw_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->bias_desc)); cuCHECK(hipFree(gpuPrm->d_wsFwd)); cuCHECK(hipFree(gpuPrm->d_wsBwdW)); } } void Deconvolution::forwardCUDA(const deconvParams& prms, const snFloat* weight, const snSize& insz, const snFloat* input, const snSize& outsz, snFloat* output, void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; // run snFloat alpha = 1.f, beta = 0.f; cuCHECK(cudnnConvolutionBackwardData(gpuPrm->cudnn, &alpha, gpuPrm->w_desc, weight, gpuPrm->in_desc, input, gpuPrm->conv_desc, gpuPrm->algoBwdData, gpuPrm->d_wsBwdData, gpuPrm->wsBwdDataSz, &beta, gpuPrm->out_desc, output)); } __global__ void cuBwdBias(snSize insz, const snFloat* bias, snFloat* grout){ size_t isz = insz.w * insz.h; snFloat* pGrOut = grout + isz * blockIdx.x + isz * insz.d * blockIdx.y; snFloat b = bias[blockIdx.x]; unsigned int i = threadIdx.x; while (i < isz){ pGrOut[i] += b; i += blockDim.x; } } void Deconvolution::backwardCUDA_GW(const deconvParams& prms, const snFloat* weight, const snSize& insz, const snFloat* input, const snSize& outsz, const snFloat* gradIn, snFloat* gradOut, snFloat* dWeightOut, void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; size_t wStepByN = prms.fWidth * prms.fHeight * insz.d * outsz.d; // run snFloat alpha = 1.f, beta = 0.f; cuCHECK(cudnnConvolutionForward(gpuPrm->cudnn, &alpha, gpuPrm->grin_desc, gradIn, gpuPrm->w_desc, weight, gpuPrm->conv_desc, gpuPrm->algoFwd, gpuPrm->d_wsFwd, gpuPrm->wsFwdSz, &beta, gpuPrm->grout_desc, gradOut)); cuCHECK(cudnnConvolutionBackwardFilter(gpuPrm->cudnn, &alpha, gpuPrm->grin_desc, gradIn, gpuPrm->in_desc, input, gpuPrm->conv_desc, gpuPrm->algoBwdW, gpuPrm->d_wsBwdW, gpuPrm->wsBwdWSz, &beta, gpuPrm->dw_desc, dWeightOut)); cuCHECK(cudnnConvolutionBackwardBias(gpuPrm->cudnn, &alpha, gpuPrm->in_desc, input, &beta, gpuPrm->bias_desc, dWeightOut + wStepByN)); // +bias dim3 dimBlock(128); dim3 dimGrid(int(insz.d), int(insz.n)); cuBwdBias << < dimGrid, dimBlock >> > (insz, weight + wStepByN, gradOut); } void Deconvolution::backwardCUDA_G(const deconvParams& prms, const snFloat* weight, const snSize& insz, const snSize& outsz, const snFloat* gradIn, snFloat* gradOut, void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; size_t wStepByN = prms.fWidth * prms.fHeight * insz.d * outsz.d; // run snFloat alpha = 1.f, beta = 0.f; cuCHECK(cudnnConvolutionForward(gpuPrm->cudnn, &alpha, gpuPrm->grin_desc, gradIn, gpuPrm->w_desc, weight, gpuPrm->conv_desc, gpuPrm->algoFwd, gpuPrm->d_wsFwd, gpuPrm->wsFwdSz, &beta, gpuPrm->grout_desc, gradOut)); // +bias cuBwdBias << < int(insz.n), 128 >> > (insz, weight + wStepByN, gradOut); }
4e3431fa2ef2db600f1d5574e9d051e2443efe22.cu
// // sunnet project // Copyright (C) 2018 by Contributors <https://github.com/Tyill/sunnet> // // This code is licensed under the MIT License. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files(the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and / or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions : // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #include <cuda_runtime.h> #include <cudnn.h> #include "../stdafx.h" #include "snOperatorCUDA/src/Operator/deconvolution.h" using namespace std; using namespace SN_Base; struct gpuParams{ cudnnHandle_t cudnn = 0; cudnnConvolutionDescriptor_t conv_desc = 0; cudnnTensorDescriptor_t in_desc = 0; cudnnTensorDescriptor_t out_desc = 0; cudnnTensorDescriptor_t grin_desc = 0; cudnnTensorDescriptor_t grout_desc = 0; cudnnFilterDescriptor_t w_desc = 0; cudnnFilterDescriptor_t dw_desc = 0; cudnnTensorDescriptor_t bias_desc = 0; cudnnConvolutionFwdAlgo_t algoFwd; cudnnConvolutionBwdDataAlgo_t algoBwdData; cudnnConvolutionBwdFilterAlgo_t algoBwdW; size_t wsFwdSz = 0; size_t wsBwdDataSz = 0; size_t wsBwdWSz = 0; size_t inszMem = 0; void* d_wsFwd = 0; void* d_wsBwdData = 0; void* d_wsBwdW = 0; }; void Deconvolution::iniParamCUDA(bool isLern, const snSize& insz, const snSize& outsz, const deconvParams& prms, void** pGpuPrm){ bool isFirst = false; gpuParams* gpuPrm = (gpuParams*)*pGpuPrm; if (!gpuPrm){ cudaDeviceProp cu_deviceProps; cudaGetDeviceProperties(&cu_deviceProps, 0); if (cu_deviceProps.major < 3){ ERROR_MESS("%s requires SM >= 3.0"); return; } gpuPrm = new gpuParams(); memset(gpuPrm, 0, sizeof(gpuParams)); *pGpuPrm = gpuPrm; cudnnHandle_t cudnn = nullptr; cuCHECK(cudnnCreate(&cudnn)); gpuPrm->cudnn = cudnn; isFirst = true; } // input cudnnTensorDescriptor_t in_desc = nullptr; cuCHECK(cudnnCreateTensorDescriptor(&in_desc)); cuCHECK(cudnnSetTensor4dDescriptor(in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->in_desc)); gpuPrm->in_desc = in_desc; // w cudnnFilterDescriptor_t w_desc = nullptr; cuCHECK(cudnnCreateFilterDescriptor(&w_desc)); cuCHECK(cudnnSetFilter4dDescriptor(w_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, int(insz.d), int(outsz.d), int(prms.fHeight), int(prms.fWidth))); if (!isFirst) cuCHECK(cudnnDestroyFilterDescriptor((cudnnFilterDescriptor_t)gpuPrm->w_desc)); gpuPrm->w_desc = w_desc; // conv cudnnConvolutionDescriptor_t conv_desc = nullptr; cuCHECK(cudnnCreateConvolutionDescriptor(&conv_desc)); cuCHECK(cudnnSetConvolution2dDescriptor(conv_desc, 0, 0, int(prms.stride), int(prms.stride), 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); if (!isFirst) cuCHECK(cudnnDestroyConvolutionDescriptor((cudnnConvolutionDescriptor_t)gpuPrm->conv_desc)); gpuPrm->conv_desc = conv_desc; // output cudnnTensorDescriptor_t out_desc; cuCHECK(cudnnCreateTensorDescriptor(&out_desc)); cuCHECK(cudnnSetTensor4dDescriptor(out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(outsz.n), int(outsz.d), int(outsz.h), int(outsz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->out_desc)); gpuPrm->out_desc = out_desc; // algorithm cudnnConvolutionBwdDataAlgo_t algoBwdData; cuCHECK(cudnnGetConvolutionBackwardDataAlgorithm(gpuPrm->cudnn, w_desc, in_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algoBwdData)); gpuPrm->algoBwdData = algoBwdData; // workspace size_t wsBwdDataSz = 0; cuCHECK(cudnnGetConvolutionBackwardDataWorkspaceSize(gpuPrm->cudnn, w_desc, in_desc, conv_desc, out_desc, algoBwdData, &wsBwdDataSz)); gpuPrm->wsBwdDataSz = wsBwdDataSz; size_t wsFwdSz = 0, wsBwdWSz = 0; if (isLern){ // grin cudnnTensorDescriptor_t grin_desc; cuCHECK(cudnnCreateTensorDescriptor(&grin_desc)); cuCHECK(cudnnSetTensor4dDescriptor(grin_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(outsz.n), int(outsz.d), int(outsz.h), int(outsz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->grin_desc)); gpuPrm->grin_desc = grin_desc; // grout cudnnTensorDescriptor_t grout_desc; cuCHECK(cudnnCreateTensorDescriptor(&grout_desc)); cuCHECK(cudnnSetTensor4dDescriptor(grout_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->grout_desc)); gpuPrm->grout_desc = grout_desc; // dw cudnnFilterDescriptor_t dw_desc = nullptr; cuCHECK(cudnnCreateFilterDescriptor(&dw_desc)); cuCHECK(cudnnSetFilter4dDescriptor(dw_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, int(insz.d), int(outsz.d), int(prms.fHeight), int(prms.fWidth))); if (!isFirst) cuCHECK(cudnnDestroyFilterDescriptor((cudnnFilterDescriptor_t)gpuPrm->dw_desc)); gpuPrm->dw_desc = dw_desc; // bias cudnnTensorDescriptor_t bias_desc; cuCHECK(cudnnCreateTensorDescriptor(&bias_desc)); cuCHECK(cudnnSetTensor4dDescriptor(bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, int(insz.d), 1, 1)); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->bias_desc)); gpuPrm->bias_desc = bias_desc; // algorithm cudnnConvolutionFwdAlgo_t algoFwd; cuCHECK(cudnnGetConvolutionForwardAlgorithm(gpuPrm->cudnn, grin_desc, w_desc, conv_desc, grout_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algoFwd)); gpuPrm->algoFwd = algoFwd; cudnnConvolutionBwdFilterAlgo_t algoBwdW; cuCHECK(cudnnGetConvolutionBackwardFilterAlgorithm(gpuPrm->cudnn, grin_desc, in_desc, conv_desc, dw_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoBwdW)); gpuPrm->algoBwdW = algoBwdW; // workspace cuCHECK(cudnnGetConvolutionForwardWorkspaceSize(gpuPrm->cudnn, grin_desc, w_desc, conv_desc, grout_desc, algoFwd, &wsFwdSz)); gpuPrm->wsFwdSz = wsFwdSz; cuCHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize(gpuPrm->cudnn, grin_desc, in_desc, conv_desc, dw_desc, algoBwdW, &wsBwdWSz)); gpuPrm->wsBwdWSz = wsBwdWSz; } if (isFirst){ cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdData, wsBwdDataSz)); if (isLern){ cuCHECK(cudaMalloc(&gpuPrm->d_wsFwd, wsFwdSz)); cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdW, wsBwdWSz)); } } else if (gpuPrm->inszMem < insz.size()){ cuCHECK(cudaFree(gpuPrm->d_wsBwdData)); gpuPrm->d_wsBwdData = 0; cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdData, wsBwdDataSz)); if (isLern){ cuCHECK(cudaFree(gpuPrm->d_wsFwd)); gpuPrm->d_wsFwd = 0; cuCHECK(cudaFree(gpuPrm->d_wsBwdW)); gpuPrm->d_wsBwdW = 0; cuCHECK(cudaMalloc(&gpuPrm->d_wsFwd, wsFwdSz)); cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdW, wsBwdWSz)); } gpuPrm->inszMem = insz.size(); } } void Deconvolution::freeParamCUDA(void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; if (!gpuPrm) return; cuCHECK(cudnnDestroy(gpuPrm->cudnn)); cuCHECK(cudnnDestroyConvolutionDescriptor(gpuPrm->conv_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->in_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->out_desc)); cuCHECK(cudnnDestroyFilterDescriptor(gpuPrm->w_desc)); cuCHECK(cudaFree(gpuPrm->d_wsBwdData)); if (gpuPrm->grin_desc){ // isLern cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grin_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grout_desc)); cuCHECK(cudnnDestroyFilterDescriptor(gpuPrm->dw_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->bias_desc)); cuCHECK(cudaFree(gpuPrm->d_wsFwd)); cuCHECK(cudaFree(gpuPrm->d_wsBwdW)); } } void Deconvolution::forwardCUDA(const deconvParams& prms, const snFloat* weight, const snSize& insz, const snFloat* input, const snSize& outsz, snFloat* output, void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; // run snFloat alpha = 1.f, beta = 0.f; cuCHECK(cudnnConvolutionBackwardData(gpuPrm->cudnn, &alpha, gpuPrm->w_desc, weight, gpuPrm->in_desc, input, gpuPrm->conv_desc, gpuPrm->algoBwdData, gpuPrm->d_wsBwdData, gpuPrm->wsBwdDataSz, &beta, gpuPrm->out_desc, output)); } __global__ void cuBwdBias(snSize insz, const snFloat* bias, snFloat* grout){ size_t isz = insz.w * insz.h; snFloat* pGrOut = grout + isz * blockIdx.x + isz * insz.d * blockIdx.y; snFloat b = bias[blockIdx.x]; unsigned int i = threadIdx.x; while (i < isz){ pGrOut[i] += b; i += blockDim.x; } } void Deconvolution::backwardCUDA_GW(const deconvParams& prms, const snFloat* weight, const snSize& insz, const snFloat* input, const snSize& outsz, const snFloat* gradIn, snFloat* gradOut, snFloat* dWeightOut, void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; size_t wStepByN = prms.fWidth * prms.fHeight * insz.d * outsz.d; // run snFloat alpha = 1.f, beta = 0.f; cuCHECK(cudnnConvolutionForward(gpuPrm->cudnn, &alpha, gpuPrm->grin_desc, gradIn, gpuPrm->w_desc, weight, gpuPrm->conv_desc, gpuPrm->algoFwd, gpuPrm->d_wsFwd, gpuPrm->wsFwdSz, &beta, gpuPrm->grout_desc, gradOut)); cuCHECK(cudnnConvolutionBackwardFilter(gpuPrm->cudnn, &alpha, gpuPrm->grin_desc, gradIn, gpuPrm->in_desc, input, gpuPrm->conv_desc, gpuPrm->algoBwdW, gpuPrm->d_wsBwdW, gpuPrm->wsBwdWSz, &beta, gpuPrm->dw_desc, dWeightOut)); cuCHECK(cudnnConvolutionBackwardBias(gpuPrm->cudnn, &alpha, gpuPrm->in_desc, input, &beta, gpuPrm->bias_desc, dWeightOut + wStepByN)); // +bias dim3 dimBlock(128); dim3 dimGrid(int(insz.d), int(insz.n)); cuBwdBias << < dimGrid, dimBlock >> > (insz, weight + wStepByN, gradOut); } void Deconvolution::backwardCUDA_G(const deconvParams& prms, const snFloat* weight, const snSize& insz, const snSize& outsz, const snFloat* gradIn, snFloat* gradOut, void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; size_t wStepByN = prms.fWidth * prms.fHeight * insz.d * outsz.d; // run snFloat alpha = 1.f, beta = 0.f; cuCHECK(cudnnConvolutionForward(gpuPrm->cudnn, &alpha, gpuPrm->grin_desc, gradIn, gpuPrm->w_desc, weight, gpuPrm->conv_desc, gpuPrm->algoFwd, gpuPrm->d_wsFwd, gpuPrm->wsFwdSz, &beta, gpuPrm->grout_desc, gradOut)); // +bias cuBwdBias << < int(insz.n), 128 >> > (insz, weight + wStepByN, gradOut); }
3fd53b313f1bd2ebee7cb9a3683b4a20ed1741f7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/null_mask.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/extract.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <strings/regex/regex.cuh> #include <strings/utilities.hpp> namespace cudf { namespace strings { namespace detail { using string_index_pair = thrust::pair<const char*, size_type>; namespace { /** * @brief This functor handles extracting strings by applying the compiled regex pattern * and creating string_index_pairs for all the substrings. * * @tparam stack_size Correlates to the regex instructions state to maintain for each string. * Each instruction requires a fixed amount of overhead data. */ template <size_t stack_size> struct extract_fn { reprog_device prog; column_device_view d_strings; size_type column_index; __device__ string_index_pair operator()(size_type idx) { u_char data1[stack_size], data2[stack_size]; prog.set_stack_mem(data1, data2); if (d_strings.is_null(idx)) return string_index_pair{nullptr, 0}; string_view d_str = d_strings.element<string_view>(idx); string_index_pair result{nullptr, 0}; int32_t begin = 0; int32_t end = -1; // handles empty strings automatically if ((prog.find(idx, d_str, begin, end) > 0) && (prog.extract(idx, d_str, begin, end, column_index) > 0)) { auto offset = d_str.byte_offset(begin); // build index-pair result = string_index_pair{d_str.data() + offset, d_str.byte_offset(end) - offset}; } return result; } }; } // namespace // std::unique_ptr<table> extract( strings_column_view const& strings, std::string const& pattern, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { auto strings_count = strings.size(); auto strings_column = column_device_view::create(strings.parent(), stream); auto d_strings = *strings_column; // compile regex into device object auto prog = reprog_device::create(pattern, get_character_flags_table(), strings_count, stream); auto d_prog = *prog; // extract should include groups int groups = d_prog.group_counts(); CUDF_EXPECTS(groups > 0, "Group indicators not found in regex pattern"); // build a result column for each group std::vector<std::unique_ptr<column>> results; auto execpol = rmm::exec_policy(stream); auto regex_insts = d_prog.insts_counts(); for (int32_t column_index = 0; column_index < groups; ++column_index) { rmm::device_vector<string_index_pair> indices(strings_count); string_index_pair* d_indices = indices.data().get(); if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS)) thrust::transform(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), d_indices, extract_fn<RX_STACK_SMALL>{d_prog, d_strings, column_index}); else if (regex_insts <= RX_MEDIUM_INSTS) thrust::transform(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), d_indices, extract_fn<RX_STACK_MEDIUM>{d_prog, d_strings, column_index}); else thrust::transform(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), d_indices, extract_fn<RX_STACK_LARGE>{d_prog, d_strings, column_index}); // results.emplace_back(make_strings_column(indices, stream, mr)); } return std::make_unique<table>(std::move(results)); } } // namespace detail // external API std::unique_ptr<table> extract(strings_column_view const& strings, std::string const& pattern, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::extract(strings, pattern, mr); } } // namespace strings } // namespace cudf
3fd53b313f1bd2ebee7cb9a3683b4a20ed1741f7.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/null_mask.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/extract.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <strings/regex/regex.cuh> #include <strings/utilities.hpp> namespace cudf { namespace strings { namespace detail { using string_index_pair = thrust::pair<const char*, size_type>; namespace { /** * @brief This functor handles extracting strings by applying the compiled regex pattern * and creating string_index_pairs for all the substrings. * * @tparam stack_size Correlates to the regex instructions state to maintain for each string. * Each instruction requires a fixed amount of overhead data. */ template <size_t stack_size> struct extract_fn { reprog_device prog; column_device_view d_strings; size_type column_index; __device__ string_index_pair operator()(size_type idx) { u_char data1[stack_size], data2[stack_size]; prog.set_stack_mem(data1, data2); if (d_strings.is_null(idx)) return string_index_pair{nullptr, 0}; string_view d_str = d_strings.element<string_view>(idx); string_index_pair result{nullptr, 0}; int32_t begin = 0; int32_t end = -1; // handles empty strings automatically if ((prog.find(idx, d_str, begin, end) > 0) && (prog.extract(idx, d_str, begin, end, column_index) > 0)) { auto offset = d_str.byte_offset(begin); // build index-pair result = string_index_pair{d_str.data() + offset, d_str.byte_offset(end) - offset}; } return result; } }; } // namespace // std::unique_ptr<table> extract( strings_column_view const& strings, std::string const& pattern, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { auto strings_count = strings.size(); auto strings_column = column_device_view::create(strings.parent(), stream); auto d_strings = *strings_column; // compile regex into device object auto prog = reprog_device::create(pattern, get_character_flags_table(), strings_count, stream); auto d_prog = *prog; // extract should include groups int groups = d_prog.group_counts(); CUDF_EXPECTS(groups > 0, "Group indicators not found in regex pattern"); // build a result column for each group std::vector<std::unique_ptr<column>> results; auto execpol = rmm::exec_policy(stream); auto regex_insts = d_prog.insts_counts(); for (int32_t column_index = 0; column_index < groups; ++column_index) { rmm::device_vector<string_index_pair> indices(strings_count); string_index_pair* d_indices = indices.data().get(); if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS)) thrust::transform(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), d_indices, extract_fn<RX_STACK_SMALL>{d_prog, d_strings, column_index}); else if (regex_insts <= RX_MEDIUM_INSTS) thrust::transform(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), d_indices, extract_fn<RX_STACK_MEDIUM>{d_prog, d_strings, column_index}); else thrust::transform(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), d_indices, extract_fn<RX_STACK_LARGE>{d_prog, d_strings, column_index}); // results.emplace_back(make_strings_column(indices, stream, mr)); } return std::make_unique<table>(std::move(results)); } } // namespace detail // external API std::unique_ptr<table> extract(strings_column_view const& strings, std::string const& pattern, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::extract(strings, pattern, mr); } } // namespace strings } // namespace cudf
90836a7bc4efb3fef11351ee82b7aef5c2d28634.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* ********************************* BLUEBOTTLE ********************************** ******************************************************************************* * * Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen, * The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ #include <hip/hip_runtime.h> #include <thrust/sort.h> #include "cuda_physalis.h" #include "cuda_particle.h" #include <helper_cuda.h> #include <thrust/scan.h> #include <thrust/device_ptr.h> __constant__ real _A1; __constant__ real _A2; __constant__ real _A3; __constant__ real _B; __constant__ int _nn[NCOEFFS]; __constant__ int _mm[NCOEFFS]; __constant__ real _node_t[NNODES]; __constant__ real _node_p[NNODES]; real *_int_Yp_re; real *_int_Yp_im; real *_int_rDYu_re; real *_int_rDYu_im; real *_int_xXDYu_re; real *_int_xXDYu_im; real *_sum_send_e; real *_sum_send_w; real *_sum_send_n; real *_sum_send_s; real *_sum_send_t; real *_sum_send_b; real *_sum_recv_e; real *_sum_recv_w; real *_sum_recv_n; real *_sum_recv_s; real *_sum_recv_t; real *_sum_recv_b; extern "C" void cuda_init_physalis(void) { if (NPARTS > 0) { /* set up coefficient table */ int nn[NCOEFFS] = {0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4}; int mm[NCOEFFS] = {0, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 4}; /* set up quadrature nodes for 7th-order Lebedev quadrature */ // NOTE: Higher order quadratures exist as comments in bluebottle, in // cuda_quadrature.cu:cuda_Lamb() real PI14 = 0.25 * PI; real PI12 = 0.5 * PI; real PI34 = 0.75 * PI; real PI54 = 1.25 * PI; real PI32 = 1.5 * PI; real PI74 = 1.75 * PI; real alph1 = 0.955316618124509; real alph2 = 2.186276035465284; /* weights */ real A1 = 0.598398600683775; real A2 = 0.478718880547015; real A3 = 0.403919055461543; real B = 0.; /* nodes */ // Find a more elegant way of fixing the divide by sin(0) real a1_t[6] = {PI12, PI12, PI12, PI12, 0.+DIV_ST, PI-DIV_ST}; real a1_p[6] = {0., PI12, PI, PI32, 0., 0.}; real a2_t[12] = {PI12, PI12, PI12, PI12, PI14, PI14, PI14, PI14, PI34, PI34, PI34, PI34}; real a2_p[12] = {PI14, PI34, PI54, PI74, 0., PI12, PI, PI32, 0., PI12, PI, PI32}; real a3_t[8] = {alph1, alph1, alph1, alph1, alph2, alph2, alph2, alph2}; real a3_p[8] = {PI14, PI34, PI54, PI74, PI14, PI34, PI54, PI74}; /* put all quadrature nodes together for interpolation */ real node_t[NNODES]; real node_p[NNODES]; for (int i = 0; i < 6; i++) { node_t[i] = a1_t[i]; node_p[i] = a1_p[i]; } for (int i = 0; i < 12; i++) { node_t[6+i] = a2_t[i]; node_p[6+i] = a2_p[i]; } for (int i = 0; i < 8; i++) { node_t[18+i] = a3_t[i]; node_p[18+i] = a3_p[i]; } /* Bind to cuda device constant memory */ checkCudaErrors(hipMemcpyToSymbol(_nn, &nn, NCOEFFS * sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(_mm, &mm, NCOEFFS * sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(_A1, &A1, sizeof(real))); checkCudaErrors(hipMemcpyToSymbol(_A2, &A2, sizeof(real))); checkCudaErrors(hipMemcpyToSymbol(_A3, &A3, sizeof(real))); checkCudaErrors(hipMemcpyToSymbol(_B, &B, sizeof(real))); checkCudaErrors(hipMemcpyToSymbol(_node_t, &node_t, NNODES * sizeof(real))); checkCudaErrors(hipMemcpyToSymbol(_node_p, &node_p, NNODES * sizeof(real))); } } extern "C" void cuda_lamb(void) { /* CUDA exec config */ dim3 num_parts(nparts); // nparts blocks with nnodes threads each dim3 dim_nodes(NNODES); dim3 num_partcoeff(nparts, ncoeffs_max); dim3 dim_coeff(ncoeffs_max); //printf("N%d >> Determining Lamb's coefficients (nparts = %d)\n", rank, nparts); if (nparts > 0) { /* Temp storage for field variables at quadrature nodes */ real *_pp; // pressure real *_ur; // radial velocity real *_ut; // theta velocity real *_up; // phi velocity checkCudaErrors(hipMalloc(&_pp, NNODES * nparts * sizeof(real))); checkCudaErrors(hipMalloc(&_ur, NNODES * nparts * sizeof(real))); checkCudaErrors(hipMalloc(&_ut, NNODES * nparts * sizeof(real))); checkCudaErrors(hipMalloc(&_up, NNODES * nparts * sizeof(real))); /* Interpolate field varaibles to quadrature nodes */ hipLaunchKernelGGL(( check_nodes), dim3(num_parts), dim3(dim_nodes), 0, 0, nparts, _parts, _bc, _DOM); hipLaunchKernelGGL(( interpolate_nodes), dim3(num_parts), dim3(dim_nodes), 0, 0, _p, _u, _v, _w, rho_f, nu, gradP, _parts, _pp, _ur, _ut, _up, _bc, s_beta, s_ref, g); /* Create scalar product storage using max particle coefficient size */ int sp_size = nparts * NNODES * ncoeffs_max; checkCudaErrors(hipMalloc(&_int_Yp_re, sp_size * sizeof(real))); checkCudaErrors(hipMalloc(&_int_Yp_im, sp_size * sizeof(real))); checkCudaErrors(hipMalloc(&_int_rDYu_re, sp_size * sizeof(real))); checkCudaErrors(hipMalloc(&_int_rDYu_im, sp_size * sizeof(real))); checkCudaErrors(hipMalloc(&_int_xXDYu_re, sp_size * sizeof(real))); checkCudaErrors(hipMalloc(&_int_xXDYu_im, sp_size * sizeof(real))); /* Perform partial sums of lebedev quadrature */ hipLaunchKernelGGL(( lebedev_quadrature), dim3(num_partcoeff), dim3(dim_nodes), 0, 0, _parts, ncoeffs_max, _pp, _ur, _ut, _up, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); checkCudaErrors(hipFree(_pp)); checkCudaErrors(hipFree(_ur)); checkCudaErrors(hipFree(_ut)); checkCudaErrors(hipFree(_up)); } /* Accumulate partial sums (all procs need to be involved) */ cuda_partial_sum_i(); // 2a) Calculate partial sums over x face cuda_partial_sum_j(); // 2b) Calculate partial sums over y face cuda_partial_sum_k(); // 2c) Calculate partial sums over z face if (nparts > 0) { /* Compute lambs coefficients from partial sums */ hipLaunchKernelGGL(( compute_lambs_coeffs), dim3(num_parts), dim3(dim_coeff), 0, 0, _parts, lamb_relax, mu, nu, ncoeffs_max, nparts, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); /* Calculate hydrodynamic forces */ // Thread over nparts int t_nparts = nparts * (nparts < MAX_THREADS_1D) + MAX_THREADS_1D * (nparts >= MAX_THREADS_1D); int b_nparts = (int) ceil((real) nparts / (real) t_nparts); dim3 dim_nparts(t_nparts); dim3 num_nparts(b_nparts); hipLaunchKernelGGL(( calc_forces), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, gradP.x, gradP.y, gradP.z, rho_f, mu, nu, s_beta, s_ref, g); /* Free */ checkCudaErrors(hipFree(_int_Yp_re)); checkCudaErrors(hipFree(_int_Yp_im)); checkCudaErrors(hipFree(_int_rDYu_re)); checkCudaErrors(hipFree(_int_rDYu_im)); checkCudaErrors(hipFree(_int_xXDYu_re)); checkCudaErrors(hipFree(_int_xXDYu_im)); } } extern "C" void cuda_partial_sum_i(void) { //printf("N%d >> Communicating partial sums in i (nparts %d)\n", rank, nparts); /* Outline of communication of partial sums for Lebedev integration * 1) Finish local Lebedev integration in lebedev_quad<<<>>>. For a given * scalar product, the partial sum for the jth coefficient of the nth * particle is stored in: _int_someint[0 + NNODES*j + nparts*NNODES*n] * 2) All particles at the outermost two bin planes need their sums * accumulated (e.g., (j,k) planes at _bins.Gcc.{_isb->_is,_ie->_ieb}) * 3) Bin the particles using i indexing (find _bin_{start,end,count}) * 4) Reduce _bin_count at _isb:_is, _ie:_ieb to find nparts_send_{e,w} * 5) Communicate nparts_send_{e,w} with adjacent subdomains to find * nparts_recv_{w,e} * 6) Excl. prefix scan _bin_count over the _isb:_is, _ie:_ieb planes to find * destination index for particle data packed into sending aray * 7) Allocate send array, int_send_{e,w} * 6 * sizeof(real). 6 comes from * the number of integrals * 8) Allocate recv array, int_recv_{e,w} * 6 * sizeof(real). * 9) Communicate int_send_{e,w} to int_recv_{e,w} * 10) Excl. prefix scan _bin_count over _isb:_is, _ie:_ieb planes to find unpacking * incides - this already exists from earlier * 11) Unpack and accumulate * 12) Repeat for j, k */ /* Initialize execution config */ // Thread over east/west faces int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM); int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM); int by = (int) ceil((real) bins.Gcc.jnb / (real) ty); int bz = (int) ceil((real) bins.Gcc.knb / (real) tz); dim3 bin_num_inb(by, bz); dim3 bin_dim_inb(ty, tz); // Thread over nparts int t_nparts = nparts * (nparts < MAX_THREADS_1D) + MAX_THREADS_1D * (nparts >= MAX_THREADS_1D); int b_nparts = (int) ceil((real) nparts / (real) t_nparts); dim3 dim_nparts(t_nparts); dim3 num_nparts(b_nparts); /* Declare things we might need */ int s1b, s2b; // custom strides int offset; /* Allocate */ checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int))); checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int))); thrust::device_ptr<int> t_part_ind(_part_ind); thrust::device_ptr<int> t_part_bin(_part_bin); int *_offset_e; int *_offset_w; checkCudaErrors(hipMalloc(&_offset_e, 2 * bins.Gcc.s2b_i * sizeof(int))); checkCudaErrors(hipMalloc(&_offset_w, 2 * bins.Gcc.s2b_i * sizeof(int))); thrust::device_ptr<int> t_offset_e(_offset_e); thrust::device_ptr<int> t_offset_w(_offset_w); checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int))); thrust::device_ptr<int> t_bin_count(_bin_count); if (nparts > 0) { /* Find each particle's bin */ hipLaunchKernelGGL(( bin_fill_i), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts, _DOM); /* Sort _part_ind by _part_bin (sort key by value) */ if (nparts > 1) { thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind); } /* Find start and ending index of each bin */ int smem_size = (nparts + 1) * sizeof(int); hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end, _part_bin, nparts); /* Find number of particles in each bin */ hipLaunchKernelGGL(( count_bin_parts_i), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _bin_start, _bin_end, _bin_count); /* Find number of particles to send and packing offsets */ s1b = bins.Gcc.jnb; s2b = s1b * bins.Gcc.knb; // East: _ie and _ieb planes if (dom[rank].e != MPI_PROC_NULL) { // _bin_count is indexed with i varying slowest -- can do a reduction // directly from _bin_count, given the offset of the start of the _ie plane offset = GFX_LOC(bins.Gcc._ie, 0, 0, s1b, s2b); nparts_send[EAST] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_i, 0., thrust::plus<int>()); /* Determine packing offsets with an excl prefix scan */ if (nparts_send[EAST] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_i, t_offset_e); } else { hipMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); } } else { // no parts to send nparts_send[EAST] = 0; hipMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); } // West: _isb and _is planes if (dom[rank].w != MPI_PROC_NULL) { offset = GFX_LOC(bins.Gcc._isb, 0, 0, s1b, s2b); nparts_send[WEST] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_i, 0., thrust::plus<int>()); if (nparts_send[WEST] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_i, t_offset_w); } else { hipMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); } } else { nparts_send[WEST] = 0; hipMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); } } else { // nparts <= 0 checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int))); checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int))); nparts_send[EAST] = 0; nparts_send[WEST] = 0; hipMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); hipMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); } // Sending and receiving is the same since the outer two bin planes are shared nparts_recv[EAST] = nparts_send[EAST]; nparts_recv[WEST] = nparts_send[WEST]; /* Send number of parts to east/west */ // origin target // nparts_send[WEST] -> nparts_recv[EAST] // nparts_recv[WEST] <- nparts_send[EAST] //nparts_recv[WEST] = 0; // init //nparts_recv[EAST] = 0; //mpi_send_nparts_i(); /* Allocate memory for send and recv partial sums */ int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs // Indexing is, for example: // _sum_send_e[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id] // where // part_id = [0, nparts) and sp = [0, 6) // 0: Yp_re 1: Yp_im // 2: rDYu_re 3: rDYu_im // 4: xXDYu_re 5: xXDYu_im // See accompanying note at the same location in cuda_transfer_parts_i int send_alloc_e = nparts_send[EAST]*(nparts_send[EAST] > 0) + (nparts_send[EAST] == 0); int send_alloc_w = nparts_send[WEST]*(nparts_send[WEST] > 0) + (nparts_send[WEST] == 0); int recv_alloc_e = nparts_recv[EAST]*(nparts_recv[EAST] > 0) + (nparts_recv[EAST] == 0); int recv_alloc_w = nparts_recv[WEST]*(nparts_recv[WEST] > 0) + (nparts_recv[WEST] == 0); checkCudaErrors(hipMalloc(&_sum_send_e, send_alloc_e*npsums*sizeof(real))); checkCudaErrors(hipMalloc(&_sum_send_w, send_alloc_w*npsums*sizeof(real))); checkCudaErrors(hipMalloc(&_sum_recv_e, recv_alloc_e*npsums*sizeof(real))); checkCudaErrors(hipMalloc(&_sum_recv_w, recv_alloc_w*npsums*sizeof(real))); /* Pack partial sums */ if (nparts_send[EAST] > 0) { hipLaunchKernelGGL(( pack_sums_e), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _sum_send_e, _offset_e, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //hipMemset(_sum_send_e, 0., send_alloc_e * npsums * sizeof(real)); } if (nparts_send[WEST] > 0) { hipLaunchKernelGGL(( pack_sums_w), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _sum_send_w, _offset_w, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //hipMemset(_sum_send_w, 0., send_alloc_w * npsums * sizeof(real)); } hipDeviceSynchronize(); // ensure packing is complete /* Communicate partial sums with MPI */ mpi_send_psums_i(); // Offsets are the same since they're over both ghost bins and edge bins /* Unpack and complete partial sums */ if (nparts_recv[EAST] > 0) { hipLaunchKernelGGL(( unpack_sums_e), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _sum_recv_e, _offset_e, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } if (nparts_recv[WEST] > 0) { hipLaunchKernelGGL(( unpack_sums_w), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _sum_recv_w, _offset_w, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } hipDeviceSynchronize(); // ensure packing is complete /* Free */ hipFree(_sum_send_e); hipFree(_sum_send_w); hipFree(_sum_recv_e); hipFree(_sum_recv_w); hipFree(_part_ind); hipFree(_part_bin); hipFree(_offset_e); hipFree(_offset_w); } extern "C" void cuda_partial_sum_j(void) { //printf("N%d >> Communicating partial sums in j\n", rank); /* Initialize execution config */ // Thread over north/south faces int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM); int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM); int bz = (int) ceil((real) bins.Gcc.knb / (real) tz); int bx = (int) ceil((real) bins.Gcc.inb / (real) tx); dim3 bin_num_jnb(bz, bx); dim3 bin_dim_jnb(tz, tx); // Thread over nparts int t_nparts = nparts * (nparts < MAX_THREADS_1D) + MAX_THREADS_1D * (nparts >= MAX_THREADS_1D); int b_nparts = (int) ceil((real) nparts / (real) t_nparts); dim3 dim_nparts(t_nparts); dim3 num_nparts(b_nparts); /* Declare things we might need */ int s1b, s2b; // custom strides int offset; /* Allocate */ checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int))); checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int))); thrust::device_ptr<int> t_part_ind(_part_ind); thrust::device_ptr<int> t_part_bin(_part_bin); int *_offset_n; int *_offset_s; checkCudaErrors(hipMalloc(&_offset_n, 2 * bins.Gcc.s2b_j * sizeof(int))); checkCudaErrors(hipMalloc(&_offset_s, 2 * bins.Gcc.s2b_j * sizeof(int))); thrust::device_ptr<int> t_offset_n(_offset_n); thrust::device_ptr<int> t_offset_s(_offset_s); checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int))); thrust::device_ptr<int> t_bin_count(_bin_count); if (nparts > 0) { /* Find each particle's bin */ hipLaunchKernelGGL(( bin_fill_j), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts, _DOM); /* Sort _part_ind by _part_bin (sort key by value) */ if (nparts > 1) { thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind); } /* Find start and ending index of each bin */ int smem_size = (nparts + 1) * sizeof(int); hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end, _part_bin, nparts); /* Find number of particles in each bin */ hipLaunchKernelGGL(( count_bin_parts_j), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _bin_start, _bin_end, _bin_count); /* Find number of particles to send and packing offsets */ s1b = bins.Gcc.knb; s2b = s1b * bins.Gcc.inb; // North: _je and _jeb planes if (dom[rank].n != MPI_PROC_NULL) { // _bin_count is indexed with i varying slowest -- can do a reduction // directly from _bin_count, given the offset of the start of the _je plane offset = GFY_LOC(0, bins.Gcc._je, 0, s1b, s2b); nparts_send[NORTH] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_j, 0., thrust::plus<int>()); /* Determine packing offsets with an excl prefix scan */ if (nparts_send[NORTH] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_j, t_offset_n); } else { hipMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); } } else { // no parts to send nparts_send[NORTH] = 0; hipMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); } // South: _jsb and _js planes if (dom[rank].s != MPI_PROC_NULL) { offset = GFY_LOC(0, bins.Gcc._jsb, 0, s1b, s2b); nparts_send[SOUTH] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_j, 0., thrust::plus<int>()); if (nparts_send[SOUTH] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_j, t_offset_s); } else { hipMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); } } else { nparts_send[SOUTH] = 0; hipMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); } } else { // nparts == 0 checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int))); checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int))); nparts_send[NORTH] = 0; nparts_send[SOUTH] = 0; hipMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); hipMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); } // Sending and receiving is the same since the outer two bin planes are shared nparts_recv[NORTH] = nparts_send[NORTH]; nparts_recv[SOUTH] = nparts_send[SOUTH]; /* Send number of parts to north/south */ // origin target // nparts_send[SOUTH] -> nparts_recv[NORTH] // nparts_recv[SOUTH] <- nparts_send[NORTH] //nparts_recv[SOUTH] = 0; // init //nparts_recv[NORTH] = 0; //mpi_send_nparts_j(); /* Allocate memory for send and recv partial sums */ int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs // Indexing is, for example: // _sum_send_n[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id] // where // part_id = [0, nparts) and sp = [0, 6) // 0: Yp_re 1: Yp_im // 2: rDYu_re 3: rDYu_im // 4: xXDYu_re 5: xXDYu_im // See accompanying note at the same location in cuda_transfer_parts_i int send_alloc_n = nparts_send[NORTH]*(nparts_send[NORTH] > 0) + (nparts_send[NORTH] == 0); int send_alloc_s = nparts_send[SOUTH]*(nparts_send[SOUTH] > 0) + (nparts_send[SOUTH] == 0); int recv_alloc_n = nparts_recv[NORTH]*(nparts_recv[NORTH] > 0) + (nparts_recv[NORTH] == 0); int recv_alloc_s = nparts_recv[SOUTH]*(nparts_recv[SOUTH] > 0) + (nparts_recv[SOUTH] == 0); checkCudaErrors(hipMalloc(&_sum_send_n, send_alloc_n*npsums*sizeof(real))); checkCudaErrors(hipMalloc(&_sum_send_s, send_alloc_s*npsums*sizeof(real))); checkCudaErrors(hipMalloc(&_sum_recv_n, recv_alloc_n*npsums*sizeof(real))); checkCudaErrors(hipMalloc(&_sum_recv_s, recv_alloc_s*npsums*sizeof(real))); /* Pack partial sums */ if (nparts_send[NORTH] > 0) { hipLaunchKernelGGL(( pack_sums_n), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _sum_send_n, _offset_n, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //hipMemset(_sum_send_n, 0., send_alloc_n * npsums * sizeof(real)); } if (nparts_send[SOUTH] > 0) { hipLaunchKernelGGL(( pack_sums_s), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _sum_send_s, _offset_s, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //hipMemset(_sum_send_s, 0., send_alloc_s * npsums * sizeof(real)); } hipDeviceSynchronize(); // ensure packing is complete /* Communicate partial sums with MPI */ mpi_send_psums_j(); // Offsets are the same since they're over both ghost bins and edge bins /* Unpack and complete partial sums */ if (nparts_recv[NORTH] > 0) { hipLaunchKernelGGL(( unpack_sums_n), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _sum_recv_n, _offset_n, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } if (nparts_recv[SOUTH] > 0) { hipLaunchKernelGGL(( unpack_sums_s), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _sum_recv_s, _offset_s, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } hipDeviceSynchronize(); // ensure packing is complete /* Free */ hipFree(_sum_send_n); hipFree(_sum_send_s); hipFree(_sum_recv_n); hipFree(_sum_recv_s); hipFree(_part_ind); hipFree(_part_bin); hipFree(_offset_n); hipFree(_offset_s); } extern "C" void cuda_partial_sum_k(void) { //printf("N%d >> Communicating partial sums in k\n", rank); /* Initialize execution config */ // Thread over top/bottom faces int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM); int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM); int bx = (int) ceil((real) bins.Gcc.inb / (real) tx); int by = (int) ceil((real) bins.Gcc.jnb / (real) ty); dim3 bin_num_knb(bx, by); dim3 bin_dim_knb(tx, ty); // Thread over nparts int t_nparts = nparts * (nparts < MAX_THREADS_1D) + MAX_THREADS_1D * (nparts >= MAX_THREADS_1D); int b_nparts = (int) ceil((real) nparts / (real) t_nparts); dim3 dim_nparts(t_nparts); dim3 num_nparts(b_nparts); /* Declare things we might need */ int s1b, s2b; // custom strides int offset; /* Allocate */ checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int))); checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int))); thrust::device_ptr<int> t_part_ind(_part_ind); thrust::device_ptr<int> t_part_bin(_part_bin); int *_offset_t; int *_offset_b; checkCudaErrors(hipMalloc(&_offset_t, 2 * bins.Gcc.s2b_k * sizeof(int))); checkCudaErrors(hipMalloc(&_offset_b, 2 * bins.Gcc.s2b_k * sizeof(int))); thrust::device_ptr<int> t_offset_t(_offset_t); thrust::device_ptr<int> t_offset_b(_offset_b); checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int))); thrust::device_ptr<int> t_bin_count(_bin_count); if (nparts > 0) { /* Find each particle's bin */ hipLaunchKernelGGL(( bin_fill_k), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts, _DOM); /* Sort _part_ind by _part_bin (sort key by value) */ if (nparts > 1) { thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind); } /* Find start and ending index of each bin */ int smem_size = (nparts + 1) * sizeof(int); hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end, _part_bin, nparts); /* Find number of particles in each bin */ hipLaunchKernelGGL(( count_bin_parts_k), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _bin_start, _bin_end, _bin_count); /* Find number of particles to send and packing offsets */ s1b = bins.Gcc.inb; s2b = s1b * bins.Gcc.jnb; // North: _ke and _keb planes if (dom[rank].t != MPI_PROC_NULL) { // _bin_count is indexed with k varying slowest -- can do a reduction // directly from _bin_count, given the offset of the start of the _ke plane offset = GFZ_LOC(0, 0, bins.Gcc._ke, s1b, s2b); nparts_send[TOP] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_k, 0., thrust::plus<int>()); /* Determine packing offsets with an excl prefix scan */ if (nparts_send[TOP] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_k, t_offset_t); } else { hipMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); } } else { // no parts to send nparts_send[TOP] = 0; hipMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); } // South: _ksb and _ks planes if (dom[rank].b != MPI_PROC_NULL) { offset = GFZ_LOC(0, 0, bins.Gcc._ksb, s1b, s2b); nparts_send[BOTTOM] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_k, 0., thrust::plus<int>()); if (nparts_send[BOTTOM] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_k, t_offset_b); } else { hipMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); } } else { nparts_send[BOTTOM] = 0; hipMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); } } else { // nparts = 0 checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int))); checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int))); nparts_send[TOP] = 0; nparts_send[BOTTOM] = 0; hipMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); hipMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); } // Sending and receiving is the same since the outer two bin planes are shared nparts_recv[TOP] = nparts_send[TOP]; nparts_recv[BOTTOM] = nparts_send[BOTTOM]; /* Send number of parts to top/bottom */ // origin target // nparts_send[BOTTOM] -> nparts_recv[TOP] // nparts_recv[BOTTOM] <- nparts_send[TOP] //nparts_recv[BOTTOM] = 0; // init //nparts_recv[TOP] = 0; //mpi_send_nparts_k(); /* Allocate memory for send and recv partial sums */ int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs // Indexing is, for example: // _sum_send_t[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id] // where // part_id = [0, nparts) and sp = [0, 6) // 0: Yp_re 1: Yp_im // 2: rDYu_re 3: rDYu_im // 4: xXDYu_re 5: xXDYu_im int send_alloc_t = nparts_send[TOP]*(nparts_send[TOP] > 0) + (nparts_send[TOP] == 0); int send_alloc_b = nparts_send[BOTTOM]*(nparts_send[BOTTOM] > 0) + (nparts_send[BOTTOM] == 0); int recv_alloc_t = nparts_recv[TOP]*(nparts_recv[TOP] > 0) + (nparts_recv[TOP] == 0); int recv_alloc_b = nparts_recv[BOTTOM]*(nparts_recv[BOTTOM] > 0) + (nparts_recv[BOTTOM] == 0); checkCudaErrors(hipMalloc(&_sum_send_t, send_alloc_t*npsums*sizeof(real))); checkCudaErrors(hipMalloc(&_sum_send_b, send_alloc_b*npsums*sizeof(real))); checkCudaErrors(hipMalloc(&_sum_recv_t, recv_alloc_t*npsums*sizeof(real))); checkCudaErrors(hipMalloc(&_sum_recv_b, recv_alloc_b*npsums*sizeof(real))); /* Pack partial sums */ if (nparts_send[TOP] > 0) { hipLaunchKernelGGL(( pack_sums_t), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _sum_send_t, _offset_t, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //hipMemset(_sum_send_t, 0., send_alloc_t * npsums * sizeof(real)); } if (nparts_send[BOTTOM] > 0) { hipLaunchKernelGGL(( pack_sums_b), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _sum_send_b, _offset_b, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //hipMemset(_sum_send_b, 0., send_alloc_b * npsums * sizeof(real)); } hipDeviceSynchronize(); // ensure packing is complete /* Communicate partial sums with MPI */ mpi_send_psums_k(); // Offsets are the same since they're over both ghost bins and edge bins /* Unpack and complete partial sums */ if (nparts_recv[TOP] > 0) { hipLaunchKernelGGL(( unpack_sums_t), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _sum_recv_t, _offset_t, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } if (nparts_recv[BOTTOM] > 0) { hipLaunchKernelGGL(( unpack_sums_b), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _sum_recv_b, _offset_b, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } hipDeviceSynchronize(); // ensure packing is complete /* Free */ hipFree(_sum_send_t); hipFree(_sum_send_b); hipFree(_sum_recv_t); hipFree(_sum_recv_b); hipFree(_part_ind); hipFree(_part_bin); hipFree(_offset_t); hipFree(_offset_b); } extern "C" void cuda_lamb_err(real *error, int *number) { //printf("N%d >> Determining Lamb's error\n", rank); real err = DBL_MIN; int num = -1; struct {double err; int num;} data; if (nparts > 0) { // create a place to store sorted coefficients and errors real *_part_errors; int *_part_nums; hipMalloc((void**) &_part_errors, nparts*sizeof(real)); hipMalloc((void**) &_part_nums, nparts*sizeof(int)); // sort the coefficients and calculate errors along the way dim3 numBlocks(nparts); dim3 dimBlocks(ncoeffs_max); hipLaunchKernelGGL(( compute_error), dim3(numBlocks), dim3(dimBlocks), 0, 0, lamb_cut, ncoeffs_max, nparts, _parts, _part_errors, _part_nums); // find maximum error of all particles thrust::device_ptr<real> t_part_errors(_part_errors); // error = thrust::reduce(t_part_errors, // t_part_errors + nparts, // 0., thrust::maximum<real>()); thrust::device_vector<real>::iterator iter = thrust::max_element(t_part_errors, t_part_errors + nparts); int pos = thrust::device_pointer_cast(&iter[0]) - t_part_errors; hipMemcpy(&err, _part_errors + pos, sizeof(real), hipMemcpyDeviceToHost); hipMemcpy(&num, _part_nums + pos, sizeof(int), hipMemcpyDeviceToHost); data.err = err; data.num = num; // clean up hipFree(_part_errors); hipFree(_part_nums); // store copy of coefficients for future calculation hipLaunchKernelGGL(( store_coeffs), dim3(numBlocks), dim3(dimBlocks), 0, 0, _parts, nparts, ncoeffs_max); } // MPI reduce to find max error and its part number MPI_Allreduce(MPI_IN_PLACE, &data, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); *error = data.err; *number = data.num; }
90836a7bc4efb3fef11351ee82b7aef5c2d28634.cu
/******************************************************************************* ********************************* BLUEBOTTLE ********************************** ******************************************************************************* * * Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen, * The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ #include <cuda.h> #include <thrust/sort.h> #include "cuda_physalis.h" #include "cuda_particle.h" #include <helper_cuda.h> #include <thrust/scan.h> #include <thrust/device_ptr.h> __constant__ real _A1; __constant__ real _A2; __constant__ real _A3; __constant__ real _B; __constant__ int _nn[NCOEFFS]; __constant__ int _mm[NCOEFFS]; __constant__ real _node_t[NNODES]; __constant__ real _node_p[NNODES]; real *_int_Yp_re; real *_int_Yp_im; real *_int_rDYu_re; real *_int_rDYu_im; real *_int_xXDYu_re; real *_int_xXDYu_im; real *_sum_send_e; real *_sum_send_w; real *_sum_send_n; real *_sum_send_s; real *_sum_send_t; real *_sum_send_b; real *_sum_recv_e; real *_sum_recv_w; real *_sum_recv_n; real *_sum_recv_s; real *_sum_recv_t; real *_sum_recv_b; extern "C" void cuda_init_physalis(void) { if (NPARTS > 0) { /* set up coefficient table */ int nn[NCOEFFS] = {0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4}; int mm[NCOEFFS] = {0, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 4}; /* set up quadrature nodes for 7th-order Lebedev quadrature */ // NOTE: Higher order quadratures exist as comments in bluebottle, in // cuda_quadrature.cu:cuda_Lamb() real PI14 = 0.25 * PI; real PI12 = 0.5 * PI; real PI34 = 0.75 * PI; real PI54 = 1.25 * PI; real PI32 = 1.5 * PI; real PI74 = 1.75 * PI; real alph1 = 0.955316618124509; real alph2 = 2.186276035465284; /* weights */ real A1 = 0.598398600683775; real A2 = 0.478718880547015; real A3 = 0.403919055461543; real B = 0.; /* nodes */ // Find a more elegant way of fixing the divide by sin(0) real a1_t[6] = {PI12, PI12, PI12, PI12, 0.+DIV_ST, PI-DIV_ST}; real a1_p[6] = {0., PI12, PI, PI32, 0., 0.}; real a2_t[12] = {PI12, PI12, PI12, PI12, PI14, PI14, PI14, PI14, PI34, PI34, PI34, PI34}; real a2_p[12] = {PI14, PI34, PI54, PI74, 0., PI12, PI, PI32, 0., PI12, PI, PI32}; real a3_t[8] = {alph1, alph1, alph1, alph1, alph2, alph2, alph2, alph2}; real a3_p[8] = {PI14, PI34, PI54, PI74, PI14, PI34, PI54, PI74}; /* put all quadrature nodes together for interpolation */ real node_t[NNODES]; real node_p[NNODES]; for (int i = 0; i < 6; i++) { node_t[i] = a1_t[i]; node_p[i] = a1_p[i]; } for (int i = 0; i < 12; i++) { node_t[6+i] = a2_t[i]; node_p[6+i] = a2_p[i]; } for (int i = 0; i < 8; i++) { node_t[18+i] = a3_t[i]; node_p[18+i] = a3_p[i]; } /* Bind to cuda device constant memory */ checkCudaErrors(cudaMemcpyToSymbol(_nn, &nn, NCOEFFS * sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(_mm, &mm, NCOEFFS * sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(_A1, &A1, sizeof(real))); checkCudaErrors(cudaMemcpyToSymbol(_A2, &A2, sizeof(real))); checkCudaErrors(cudaMemcpyToSymbol(_A3, &A3, sizeof(real))); checkCudaErrors(cudaMemcpyToSymbol(_B, &B, sizeof(real))); checkCudaErrors(cudaMemcpyToSymbol(_node_t, &node_t, NNODES * sizeof(real))); checkCudaErrors(cudaMemcpyToSymbol(_node_p, &node_p, NNODES * sizeof(real))); } } extern "C" void cuda_lamb(void) { /* CUDA exec config */ dim3 num_parts(nparts); // nparts blocks with nnodes threads each dim3 dim_nodes(NNODES); dim3 num_partcoeff(nparts, ncoeffs_max); dim3 dim_coeff(ncoeffs_max); //printf("N%d >> Determining Lamb's coefficients (nparts = %d)\n", rank, nparts); if (nparts > 0) { /* Temp storage for field variables at quadrature nodes */ real *_pp; // pressure real *_ur; // radial velocity real *_ut; // theta velocity real *_up; // phi velocity checkCudaErrors(cudaMalloc(&_pp, NNODES * nparts * sizeof(real))); checkCudaErrors(cudaMalloc(&_ur, NNODES * nparts * sizeof(real))); checkCudaErrors(cudaMalloc(&_ut, NNODES * nparts * sizeof(real))); checkCudaErrors(cudaMalloc(&_up, NNODES * nparts * sizeof(real))); /* Interpolate field varaibles to quadrature nodes */ check_nodes<<<num_parts, dim_nodes>>>(nparts, _parts, _bc, _DOM); interpolate_nodes<<<num_parts, dim_nodes>>>(_p, _u, _v, _w, rho_f, nu, gradP, _parts, _pp, _ur, _ut, _up, _bc, s_beta, s_ref, g); /* Create scalar product storage using max particle coefficient size */ int sp_size = nparts * NNODES * ncoeffs_max; checkCudaErrors(cudaMalloc(&_int_Yp_re, sp_size * sizeof(real))); checkCudaErrors(cudaMalloc(&_int_Yp_im, sp_size * sizeof(real))); checkCudaErrors(cudaMalloc(&_int_rDYu_re, sp_size * sizeof(real))); checkCudaErrors(cudaMalloc(&_int_rDYu_im, sp_size * sizeof(real))); checkCudaErrors(cudaMalloc(&_int_xXDYu_re, sp_size * sizeof(real))); checkCudaErrors(cudaMalloc(&_int_xXDYu_im, sp_size * sizeof(real))); /* Perform partial sums of lebedev quadrature */ lebedev_quadrature<<<num_partcoeff, dim_nodes>>>(_parts, ncoeffs_max, _pp, _ur, _ut, _up, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); checkCudaErrors(cudaFree(_pp)); checkCudaErrors(cudaFree(_ur)); checkCudaErrors(cudaFree(_ut)); checkCudaErrors(cudaFree(_up)); } /* Accumulate partial sums (all procs need to be involved) */ cuda_partial_sum_i(); // 2a) Calculate partial sums over x face cuda_partial_sum_j(); // 2b) Calculate partial sums over y face cuda_partial_sum_k(); // 2c) Calculate partial sums over z face if (nparts > 0) { /* Compute lambs coefficients from partial sums */ compute_lambs_coeffs<<<num_parts, dim_coeff>>>(_parts, lamb_relax, mu, nu, ncoeffs_max, nparts, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); /* Calculate hydrodynamic forces */ // Thread over nparts int t_nparts = nparts * (nparts < MAX_THREADS_1D) + MAX_THREADS_1D * (nparts >= MAX_THREADS_1D); int b_nparts = (int) ceil((real) nparts / (real) t_nparts); dim3 dim_nparts(t_nparts); dim3 num_nparts(b_nparts); calc_forces<<<num_nparts, dim_nparts>>>(_parts, nparts, gradP.x, gradP.y, gradP.z, rho_f, mu, nu, s_beta, s_ref, g); /* Free */ checkCudaErrors(cudaFree(_int_Yp_re)); checkCudaErrors(cudaFree(_int_Yp_im)); checkCudaErrors(cudaFree(_int_rDYu_re)); checkCudaErrors(cudaFree(_int_rDYu_im)); checkCudaErrors(cudaFree(_int_xXDYu_re)); checkCudaErrors(cudaFree(_int_xXDYu_im)); } } extern "C" void cuda_partial_sum_i(void) { //printf("N%d >> Communicating partial sums in i (nparts %d)\n", rank, nparts); /* Outline of communication of partial sums for Lebedev integration * 1) Finish local Lebedev integration in lebedev_quad<<<>>>. For a given * scalar product, the partial sum for the jth coefficient of the nth * particle is stored in: _int_someint[0 + NNODES*j + nparts*NNODES*n] * 2) All particles at the outermost two bin planes need their sums * accumulated (e.g., (j,k) planes at _bins.Gcc.{_isb->_is,_ie->_ieb}) * 3) Bin the particles using i indexing (find _bin_{start,end,count}) * 4) Reduce _bin_count at _isb:_is, _ie:_ieb to find nparts_send_{e,w} * 5) Communicate nparts_send_{e,w} with adjacent subdomains to find * nparts_recv_{w,e} * 6) Excl. prefix scan _bin_count over the _isb:_is, _ie:_ieb planes to find * destination index for particle data packed into sending aray * 7) Allocate send array, int_send_{e,w} * 6 * sizeof(real). 6 comes from * the number of integrals * 8) Allocate recv array, int_recv_{e,w} * 6 * sizeof(real). * 9) Communicate int_send_{e,w} to int_recv_{e,w} * 10) Excl. prefix scan _bin_count over _isb:_is, _ie:_ieb planes to find unpacking * incides - this already exists from earlier * 11) Unpack and accumulate * 12) Repeat for j, k */ /* Initialize execution config */ // Thread over east/west faces int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM); int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM); int by = (int) ceil((real) bins.Gcc.jnb / (real) ty); int bz = (int) ceil((real) bins.Gcc.knb / (real) tz); dim3 bin_num_inb(by, bz); dim3 bin_dim_inb(ty, tz); // Thread over nparts int t_nparts = nparts * (nparts < MAX_THREADS_1D) + MAX_THREADS_1D * (nparts >= MAX_THREADS_1D); int b_nparts = (int) ceil((real) nparts / (real) t_nparts); dim3 dim_nparts(t_nparts); dim3 num_nparts(b_nparts); /* Declare things we might need */ int s1b, s2b; // custom strides int offset; /* Allocate */ checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int))); checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int))); thrust::device_ptr<int> t_part_ind(_part_ind); thrust::device_ptr<int> t_part_bin(_part_bin); int *_offset_e; int *_offset_w; checkCudaErrors(cudaMalloc(&_offset_e, 2 * bins.Gcc.s2b_i * sizeof(int))); checkCudaErrors(cudaMalloc(&_offset_w, 2 * bins.Gcc.s2b_i * sizeof(int))); thrust::device_ptr<int> t_offset_e(_offset_e); thrust::device_ptr<int> t_offset_w(_offset_w); checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int))); thrust::device_ptr<int> t_bin_count(_bin_count); if (nparts > 0) { /* Find each particle's bin */ bin_fill_i<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts, _DOM); /* Sort _part_ind by _part_bin (sort key by value) */ if (nparts > 1) { thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind); } /* Find start and ending index of each bin */ int smem_size = (nparts + 1) * sizeof(int); find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end, _part_bin, nparts); /* Find number of particles in each bin */ count_bin_parts_i<<<bin_num_inb, bin_dim_inb>>>(_bin_start, _bin_end, _bin_count); /* Find number of particles to send and packing offsets */ s1b = bins.Gcc.jnb; s2b = s1b * bins.Gcc.knb; // East: _ie and _ieb planes if (dom[rank].e != MPI_PROC_NULL) { // _bin_count is indexed with i varying slowest -- can do a reduction // directly from _bin_count, given the offset of the start of the _ie plane offset = GFX_LOC(bins.Gcc._ie, 0, 0, s1b, s2b); nparts_send[EAST] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_i, 0., thrust::plus<int>()); /* Determine packing offsets with an excl prefix scan */ if (nparts_send[EAST] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_i, t_offset_e); } else { cudaMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); } } else { // no parts to send nparts_send[EAST] = 0; cudaMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); } // West: _isb and _is planes if (dom[rank].w != MPI_PROC_NULL) { offset = GFX_LOC(bins.Gcc._isb, 0, 0, s1b, s2b); nparts_send[WEST] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_i, 0., thrust::plus<int>()); if (nparts_send[WEST] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_i, t_offset_w); } else { cudaMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); } } else { nparts_send[WEST] = 0; cudaMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); } } else { // nparts <= 0 checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int))); checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int))); nparts_send[EAST] = 0; nparts_send[WEST] = 0; cudaMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); cudaMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int)); } // Sending and receiving is the same since the outer two bin planes are shared nparts_recv[EAST] = nparts_send[EAST]; nparts_recv[WEST] = nparts_send[WEST]; /* Send number of parts to east/west */ // origin target // nparts_send[WEST] -> nparts_recv[EAST] // nparts_recv[WEST] <- nparts_send[EAST] //nparts_recv[WEST] = 0; // init //nparts_recv[EAST] = 0; //mpi_send_nparts_i(); /* Allocate memory for send and recv partial sums */ int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs // Indexing is, for example: // _sum_send_e[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id] // where // part_id = [0, nparts) and sp = [0, 6) // 0: Yp_re 1: Yp_im // 2: rDYu_re 3: rDYu_im // 4: xXDYu_re 5: xXDYu_im // See accompanying note at the same location in cuda_transfer_parts_i int send_alloc_e = nparts_send[EAST]*(nparts_send[EAST] > 0) + (nparts_send[EAST] == 0); int send_alloc_w = nparts_send[WEST]*(nparts_send[WEST] > 0) + (nparts_send[WEST] == 0); int recv_alloc_e = nparts_recv[EAST]*(nparts_recv[EAST] > 0) + (nparts_recv[EAST] == 0); int recv_alloc_w = nparts_recv[WEST]*(nparts_recv[WEST] > 0) + (nparts_recv[WEST] == 0); checkCudaErrors(cudaMalloc(&_sum_send_e, send_alloc_e*npsums*sizeof(real))); checkCudaErrors(cudaMalloc(&_sum_send_w, send_alloc_w*npsums*sizeof(real))); checkCudaErrors(cudaMalloc(&_sum_recv_e, recv_alloc_e*npsums*sizeof(real))); checkCudaErrors(cudaMalloc(&_sum_recv_w, recv_alloc_w*npsums*sizeof(real))); /* Pack partial sums */ if (nparts_send[EAST] > 0) { pack_sums_e<<<bin_num_inb, bin_dim_inb>>>(_sum_send_e, _offset_e, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //cudaMemset(_sum_send_e, 0., send_alloc_e * npsums * sizeof(real)); } if (nparts_send[WEST] > 0) { pack_sums_w<<<bin_num_inb, bin_dim_inb>>>(_sum_send_w, _offset_w, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //cudaMemset(_sum_send_w, 0., send_alloc_w * npsums * sizeof(real)); } cudaDeviceSynchronize(); // ensure packing is complete /* Communicate partial sums with MPI */ mpi_send_psums_i(); // Offsets are the same since they're over both ghost bins and edge bins /* Unpack and complete partial sums */ if (nparts_recv[EAST] > 0) { unpack_sums_e<<<bin_num_inb, bin_dim_inb>>>(_sum_recv_e, _offset_e, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } if (nparts_recv[WEST] > 0) { unpack_sums_w<<<bin_num_inb, bin_dim_inb>>>(_sum_recv_w, _offset_w, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } cudaDeviceSynchronize(); // ensure packing is complete /* Free */ cudaFree(_sum_send_e); cudaFree(_sum_send_w); cudaFree(_sum_recv_e); cudaFree(_sum_recv_w); cudaFree(_part_ind); cudaFree(_part_bin); cudaFree(_offset_e); cudaFree(_offset_w); } extern "C" void cuda_partial_sum_j(void) { //printf("N%d >> Communicating partial sums in j\n", rank); /* Initialize execution config */ // Thread over north/south faces int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM); int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM); int bz = (int) ceil((real) bins.Gcc.knb / (real) tz); int bx = (int) ceil((real) bins.Gcc.inb / (real) tx); dim3 bin_num_jnb(bz, bx); dim3 bin_dim_jnb(tz, tx); // Thread over nparts int t_nparts = nparts * (nparts < MAX_THREADS_1D) + MAX_THREADS_1D * (nparts >= MAX_THREADS_1D); int b_nparts = (int) ceil((real) nparts / (real) t_nparts); dim3 dim_nparts(t_nparts); dim3 num_nparts(b_nparts); /* Declare things we might need */ int s1b, s2b; // custom strides int offset; /* Allocate */ checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int))); checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int))); thrust::device_ptr<int> t_part_ind(_part_ind); thrust::device_ptr<int> t_part_bin(_part_bin); int *_offset_n; int *_offset_s; checkCudaErrors(cudaMalloc(&_offset_n, 2 * bins.Gcc.s2b_j * sizeof(int))); checkCudaErrors(cudaMalloc(&_offset_s, 2 * bins.Gcc.s2b_j * sizeof(int))); thrust::device_ptr<int> t_offset_n(_offset_n); thrust::device_ptr<int> t_offset_s(_offset_s); checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int))); thrust::device_ptr<int> t_bin_count(_bin_count); if (nparts > 0) { /* Find each particle's bin */ bin_fill_j<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts, _DOM); /* Sort _part_ind by _part_bin (sort key by value) */ if (nparts > 1) { thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind); } /* Find start and ending index of each bin */ int smem_size = (nparts + 1) * sizeof(int); find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end, _part_bin, nparts); /* Find number of particles in each bin */ count_bin_parts_j<<<bin_num_jnb, bin_dim_jnb>>>(_bin_start, _bin_end, _bin_count); /* Find number of particles to send and packing offsets */ s1b = bins.Gcc.knb; s2b = s1b * bins.Gcc.inb; // North: _je and _jeb planes if (dom[rank].n != MPI_PROC_NULL) { // _bin_count is indexed with i varying slowest -- can do a reduction // directly from _bin_count, given the offset of the start of the _je plane offset = GFY_LOC(0, bins.Gcc._je, 0, s1b, s2b); nparts_send[NORTH] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_j, 0., thrust::plus<int>()); /* Determine packing offsets with an excl prefix scan */ if (nparts_send[NORTH] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_j, t_offset_n); } else { cudaMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); } } else { // no parts to send nparts_send[NORTH] = 0; cudaMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); } // South: _jsb and _js planes if (dom[rank].s != MPI_PROC_NULL) { offset = GFY_LOC(0, bins.Gcc._jsb, 0, s1b, s2b); nparts_send[SOUTH] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_j, 0., thrust::plus<int>()); if (nparts_send[SOUTH] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_j, t_offset_s); } else { cudaMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); } } else { nparts_send[SOUTH] = 0; cudaMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); } } else { // nparts == 0 checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int))); checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int))); nparts_send[NORTH] = 0; nparts_send[SOUTH] = 0; cudaMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); cudaMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int)); } // Sending and receiving is the same since the outer two bin planes are shared nparts_recv[NORTH] = nparts_send[NORTH]; nparts_recv[SOUTH] = nparts_send[SOUTH]; /* Send number of parts to north/south */ // origin target // nparts_send[SOUTH] -> nparts_recv[NORTH] // nparts_recv[SOUTH] <- nparts_send[NORTH] //nparts_recv[SOUTH] = 0; // init //nparts_recv[NORTH] = 0; //mpi_send_nparts_j(); /* Allocate memory for send and recv partial sums */ int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs // Indexing is, for example: // _sum_send_n[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id] // where // part_id = [0, nparts) and sp = [0, 6) // 0: Yp_re 1: Yp_im // 2: rDYu_re 3: rDYu_im // 4: xXDYu_re 5: xXDYu_im // See accompanying note at the same location in cuda_transfer_parts_i int send_alloc_n = nparts_send[NORTH]*(nparts_send[NORTH] > 0) + (nparts_send[NORTH] == 0); int send_alloc_s = nparts_send[SOUTH]*(nparts_send[SOUTH] > 0) + (nparts_send[SOUTH] == 0); int recv_alloc_n = nparts_recv[NORTH]*(nparts_recv[NORTH] > 0) + (nparts_recv[NORTH] == 0); int recv_alloc_s = nparts_recv[SOUTH]*(nparts_recv[SOUTH] > 0) + (nparts_recv[SOUTH] == 0); checkCudaErrors(cudaMalloc(&_sum_send_n, send_alloc_n*npsums*sizeof(real))); checkCudaErrors(cudaMalloc(&_sum_send_s, send_alloc_s*npsums*sizeof(real))); checkCudaErrors(cudaMalloc(&_sum_recv_n, recv_alloc_n*npsums*sizeof(real))); checkCudaErrors(cudaMalloc(&_sum_recv_s, recv_alloc_s*npsums*sizeof(real))); /* Pack partial sums */ if (nparts_send[NORTH] > 0) { pack_sums_n<<<bin_num_jnb, bin_dim_jnb>>>(_sum_send_n, _offset_n, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //cudaMemset(_sum_send_n, 0., send_alloc_n * npsums * sizeof(real)); } if (nparts_send[SOUTH] > 0) { pack_sums_s<<<bin_num_jnb, bin_dim_jnb>>>(_sum_send_s, _offset_s, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //cudaMemset(_sum_send_s, 0., send_alloc_s * npsums * sizeof(real)); } cudaDeviceSynchronize(); // ensure packing is complete /* Communicate partial sums with MPI */ mpi_send_psums_j(); // Offsets are the same since they're over both ghost bins and edge bins /* Unpack and complete partial sums */ if (nparts_recv[NORTH] > 0) { unpack_sums_n<<<bin_num_jnb, bin_dim_jnb>>>(_sum_recv_n, _offset_n, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } if (nparts_recv[SOUTH] > 0) { unpack_sums_s<<<bin_num_jnb, bin_dim_jnb>>>(_sum_recv_s, _offset_s, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } cudaDeviceSynchronize(); // ensure packing is complete /* Free */ cudaFree(_sum_send_n); cudaFree(_sum_send_s); cudaFree(_sum_recv_n); cudaFree(_sum_recv_s); cudaFree(_part_ind); cudaFree(_part_bin); cudaFree(_offset_n); cudaFree(_offset_s); } extern "C" void cuda_partial_sum_k(void) { //printf("N%d >> Communicating partial sums in k\n", rank); /* Initialize execution config */ // Thread over top/bottom faces int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM); int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM) + MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM); int bx = (int) ceil((real) bins.Gcc.inb / (real) tx); int by = (int) ceil((real) bins.Gcc.jnb / (real) ty); dim3 bin_num_knb(bx, by); dim3 bin_dim_knb(tx, ty); // Thread over nparts int t_nparts = nparts * (nparts < MAX_THREADS_1D) + MAX_THREADS_1D * (nparts >= MAX_THREADS_1D); int b_nparts = (int) ceil((real) nparts / (real) t_nparts); dim3 dim_nparts(t_nparts); dim3 num_nparts(b_nparts); /* Declare things we might need */ int s1b, s2b; // custom strides int offset; /* Allocate */ checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int))); checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int))); thrust::device_ptr<int> t_part_ind(_part_ind); thrust::device_ptr<int> t_part_bin(_part_bin); int *_offset_t; int *_offset_b; checkCudaErrors(cudaMalloc(&_offset_t, 2 * bins.Gcc.s2b_k * sizeof(int))); checkCudaErrors(cudaMalloc(&_offset_b, 2 * bins.Gcc.s2b_k * sizeof(int))); thrust::device_ptr<int> t_offset_t(_offset_t); thrust::device_ptr<int> t_offset_b(_offset_b); checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int))); checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int))); thrust::device_ptr<int> t_bin_count(_bin_count); if (nparts > 0) { /* Find each particle's bin */ bin_fill_k<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts, _DOM); /* Sort _part_ind by _part_bin (sort key by value) */ if (nparts > 1) { thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind); } /* Find start and ending index of each bin */ int smem_size = (nparts + 1) * sizeof(int); find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end, _part_bin, nparts); /* Find number of particles in each bin */ count_bin_parts_k<<<bin_num_knb, bin_dim_knb>>>(_bin_start, _bin_end, _bin_count); /* Find number of particles to send and packing offsets */ s1b = bins.Gcc.inb; s2b = s1b * bins.Gcc.jnb; // North: _ke and _keb planes if (dom[rank].t != MPI_PROC_NULL) { // _bin_count is indexed with k varying slowest -- can do a reduction // directly from _bin_count, given the offset of the start of the _ke plane offset = GFZ_LOC(0, 0, bins.Gcc._ke, s1b, s2b); nparts_send[TOP] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_k, 0., thrust::plus<int>()); /* Determine packing offsets with an excl prefix scan */ if (nparts_send[TOP] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_k, t_offset_t); } else { cudaMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); } } else { // no parts to send nparts_send[TOP] = 0; cudaMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); } // South: _ksb and _ks planes if (dom[rank].b != MPI_PROC_NULL) { offset = GFZ_LOC(0, 0, bins.Gcc._ksb, s1b, s2b); nparts_send[BOTTOM] = thrust::reduce(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_k, 0., thrust::plus<int>()); if (nparts_send[BOTTOM] > 0) { thrust::exclusive_scan(t_bin_count + offset, t_bin_count + offset + 2 * bins.Gcc.s2b_k, t_offset_b); } else { cudaMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); } } else { nparts_send[BOTTOM] = 0; cudaMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); } } else { // nparts = 0 checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int))); checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int))); nparts_send[TOP] = 0; nparts_send[BOTTOM] = 0; cudaMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); cudaMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int)); } // Sending and receiving is the same since the outer two bin planes are shared nparts_recv[TOP] = nparts_send[TOP]; nparts_recv[BOTTOM] = nparts_send[BOTTOM]; /* Send number of parts to top/bottom */ // origin target // nparts_send[BOTTOM] -> nparts_recv[TOP] // nparts_recv[BOTTOM] <- nparts_send[TOP] //nparts_recv[BOTTOM] = 0; // init //nparts_recv[TOP] = 0; //mpi_send_nparts_k(); /* Allocate memory for send and recv partial sums */ int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs // Indexing is, for example: // _sum_send_t[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id] // where // part_id = [0, nparts) and sp = [0, 6) // 0: Yp_re 1: Yp_im // 2: rDYu_re 3: rDYu_im // 4: xXDYu_re 5: xXDYu_im int send_alloc_t = nparts_send[TOP]*(nparts_send[TOP] > 0) + (nparts_send[TOP] == 0); int send_alloc_b = nparts_send[BOTTOM]*(nparts_send[BOTTOM] > 0) + (nparts_send[BOTTOM] == 0); int recv_alloc_t = nparts_recv[TOP]*(nparts_recv[TOP] > 0) + (nparts_recv[TOP] == 0); int recv_alloc_b = nparts_recv[BOTTOM]*(nparts_recv[BOTTOM] > 0) + (nparts_recv[BOTTOM] == 0); checkCudaErrors(cudaMalloc(&_sum_send_t, send_alloc_t*npsums*sizeof(real))); checkCudaErrors(cudaMalloc(&_sum_send_b, send_alloc_b*npsums*sizeof(real))); checkCudaErrors(cudaMalloc(&_sum_recv_t, recv_alloc_t*npsums*sizeof(real))); checkCudaErrors(cudaMalloc(&_sum_recv_b, recv_alloc_b*npsums*sizeof(real))); /* Pack partial sums */ if (nparts_send[TOP] > 0) { pack_sums_t<<<bin_num_knb, bin_dim_knb>>>(_sum_send_t, _offset_t, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //cudaMemset(_sum_send_t, 0., send_alloc_t * npsums * sizeof(real)); } if (nparts_send[BOTTOM] > 0) { pack_sums_b<<<bin_num_knb, bin_dim_knb>>>(_sum_send_b, _offset_b, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } else { //cudaMemset(_sum_send_b, 0., send_alloc_b * npsums * sizeof(real)); } cudaDeviceSynchronize(); // ensure packing is complete /* Communicate partial sums with MPI */ mpi_send_psums_k(); // Offsets are the same since they're over both ghost bins and edge bins /* Unpack and complete partial sums */ if (nparts_recv[TOP] > 0) { unpack_sums_t<<<bin_num_knb, bin_dim_knb>>>(_sum_recv_t, _offset_t, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } if (nparts_recv[BOTTOM] > 0) { unpack_sums_b<<<bin_num_knb, bin_dim_knb>>>(_sum_recv_b, _offset_b, _bin_start, _bin_count, _part_ind, ncoeffs_max, _int_Yp_re, _int_Yp_im, _int_rDYu_re, _int_rDYu_im, _int_xXDYu_re, _int_xXDYu_im); } cudaDeviceSynchronize(); // ensure packing is complete /* Free */ cudaFree(_sum_send_t); cudaFree(_sum_send_b); cudaFree(_sum_recv_t); cudaFree(_sum_recv_b); cudaFree(_part_ind); cudaFree(_part_bin); cudaFree(_offset_t); cudaFree(_offset_b); } extern "C" void cuda_lamb_err(real *error, int *number) { //printf("N%d >> Determining Lamb's error\n", rank); real err = DBL_MIN; int num = -1; struct {double err; int num;} data; if (nparts > 0) { // create a place to store sorted coefficients and errors real *_part_errors; int *_part_nums; cudaMalloc((void**) &_part_errors, nparts*sizeof(real)); cudaMalloc((void**) &_part_nums, nparts*sizeof(int)); // sort the coefficients and calculate errors along the way dim3 numBlocks(nparts); dim3 dimBlocks(ncoeffs_max); compute_error<<<numBlocks, dimBlocks>>>(lamb_cut, ncoeffs_max, nparts, _parts, _part_errors, _part_nums); // find maximum error of all particles thrust::device_ptr<real> t_part_errors(_part_errors); // error = thrust::reduce(t_part_errors, // t_part_errors + nparts, // 0., thrust::maximum<real>()); thrust::device_vector<real>::iterator iter = thrust::max_element(t_part_errors, t_part_errors + nparts); int pos = thrust::device_pointer_cast(&iter[0]) - t_part_errors; cudaMemcpy(&err, _part_errors + pos, sizeof(real), cudaMemcpyDeviceToHost); cudaMemcpy(&num, _part_nums + pos, sizeof(int), cudaMemcpyDeviceToHost); data.err = err; data.num = num; // clean up cudaFree(_part_errors); cudaFree(_part_nums); // store copy of coefficients for future calculation store_coeffs<<<numBlocks, dimBlocks>>>(_parts, nparts, ncoeffs_max); } // MPI reduce to find max error and its part number MPI_Allreduce(MPI_IN_PLACE, &data, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); *error = data.err; *number = data.num; }
d6a1dc139ea094a3c10ff3c30c9249fb8be30ef0.hip
// !!! This is a file automatically generated by hipify!!! #include <immintrin.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <errno.h> #include <hip/hip_runtime.h> extern "C" { #include "timer.h" } #include "matrix_lib.h" float scalar_value = 0.0f; struct matrix matrixA, matrixB, matrixC; int store_matrix(struct matrix *matrix, char *filename) { unsigned long int i = 0; unsigned long int N = 0; FILE *fd = NULL; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) return 0; /* Try to open file of floats */ if ((fd = fopen(filename, "wb")) == NULL) { printf("Unable to open file %s\n", filename); return 0; } float *nxt_a = matrix->h_rows; for (i = 0; i < N; i += 8, nxt_a += 8) { if (fwrite(nxt_a, sizeof(float), 8, fd) != 8) { printf("Error writing to file %s: short write (less than 8 floats)\n", filename); return 0; } } if (fd != NULL) fclose(fd); return 1; } int alloc_matrix(struct matrix* m, int height, int width) { hipError_t hipError_t; m->height = height; m->width = width; m->h_rows = (float *) malloc(height * width * sizeof(float)); if (m->h_rows == NULL) { printf("Malloc error\n"); return 0; } hipError_t = hipMalloc(&(m->d_rows), DATASET_SIZE * sizeof(float)); return hipError_t != hipSuccess ? 0 : 1; } int load_matrix(struct matrix *matrix, char *filename) { int count = 0; FILE *file; file = fopen(filename, "r"); if (file == NULL) { perror("Error opening file\n"); return -1; } while (!feof(file) && (count < matrix->height * matrix->width)) { fscanf(file, "%f", &(matrix->h_rows[count])); count++; } fclose(file); return 1; } int initialize_matrix(struct matrix *matrix, float value, float inc) { unsigned long int i; unsigned long int N; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) return 0; for (i = 0; i < N; i ++, inc = inc + inc) { matrix->h_rows[i] = value + inc; } return 1; } int print_matrix(struct matrix *matrix) { unsigned long int i; unsigned long int N; unsigned long int nxt_newLine; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) { printf("Matrix null\n\n"); return 0; } /* Initialize new line controol */ nxt_newLine = matrix->width - 1; /* Print matrix elements */ for (i = 0; i < N; i++) { printf("%5.1f ", matrix->h_rows[i]); if (i == nxt_newLine) { printf("\n"); nxt_newLine += matrix->width; } if (i == 255) { printf("Ooops...256 printing limit found...skipping printing...\n"); break; } } return 1; } int check_errors(struct matrix *matrix, float scalar_value) { unsigned long int i; unsigned long int N; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) return 0; /* Check for errors (all values should be equal to scalar_value) */ float maxError = 0.0f; float diffError = 0.0f; for (i = 0; i < N; i++) maxError = (maxError > (diffError = fabs(matrix->h_rows[i] - scalar_value))) ? maxError : diffError; printf("Max error: %f\n", maxError); return 1; } int main(int argc, char *argv[]) { unsigned long int DimA_M, DimA_N, DimB_M, DimB_N, n_threads, n_blocks; char *matrixA_filename, *matrixB_filename, *result1_filename, *result2_filename; char *eptr = NULL; struct timeval overall_t1, overall_t2, start, stop; // Mark overall start time gettimeofday(&overall_t1, NULL); // Check arguments if (argc != 12) { printf("Usage: %s <scalar_value> <DimA_M> <DimA_N> <DimB_M> <DimB_N> <n_threads> <n_blocks> <matrixA_filename> <matrixB_filename> <result1_filename> <result2_filename>\n", argv[0]); return 0; } // Convert arguments scalar_value = strtof(argv[1], NULL); DimA_M = strtol(argv[2], &eptr, 10); DimA_N = strtol(argv[3], &eptr, 10); DimB_M = strtol(argv[4], &eptr, 10); DimB_N = strtol(argv[5], &eptr, 10); n_blocks = strtol(argv[6], &eptr, 10); n_threads = strtol(argv[7], &eptr, 10); matrixA_filename = argv[8]; matrixB_filename = argv[9]; result1_filename = argv[10]; result2_filename = argv[11]; if ((scalar_value == 0.0f) || (DimA_M == 0) || (DimA_N == 0) || (DimB_M == 0) || (DimB_N == 0)) { printf("%s: erro na conversao do argumento: errno = %d\n", argv[0], errno); /* If a conversion error occurred, display a message and exit */ if (errno == EINVAL) { printf("Conversion error occurred: %d\n", errno); return 1; } /* If the value provided was out of range, display a warning message */ if (errno == ERANGE) { printf("The value provided was out of rangei: %d\n", errno); return 1; } } /* Initialize the three matrixes */ alloc_matrix(&matrixA, DimA_M, DimA_N); //if (!initialize_matrix(&matrixA, 5.0f, 0.0f)) { if (!load_matrix(&matrixA, matrixA_filename)) { printf("%s: matrixA initialization problem.\n", argv[0]); return 1; } /* Print matrix */ printf("---------- Matrix A ----------\n"); print_matrix(&matrixA); alloc_matrix(&matrixB, DimB_M, DimB_N); //if (!initialize_matrix(&matrixB, 1.0f, 0.0f)) { if (!load_matrix(&matrixB, matrixB_filename)) { printf("%s: matrixB initialization problem.\n", argv[0]); return 1; } /* Print matrix */ printf("---------- Matrix B ----------\n"); print_matrix(&matrixB); alloc_matrix(&matrixC, DimA_M, DimB_N); if (!initialize_matrix(&matrixC, 0.0f, 0.0f)) { printf("%s: matrixC initialization problem.\n", argv[0]); return 1; } /* Print matrix */ printf("---------- Matrix C ----------\n"); print_matrix(&matrixC); /*Set grid size*/ set_grid_size(n_threads, n_blocks); /* Scalar product of matrix A */ printf("Executing scalar_matrix_mult(%5.1f, matrixA)...\n", scalar_value); gettimeofday(&start, NULL); if (!scalar_matrix_mult(scalar_value, &matrixA)) { printf("%s: scalar_matrix_mult problem.\n", argv[0]); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); /* Print matrix */ printf("---------- Matrix A ----------\n"); print_matrix(&matrixA); /* Write first result */ printf("Writing first result: %s...\n", result1_filename); if (!store_matrix(&matrixA, result1_filename)) { printf("%s: failed to write first result to file.\n", argv[0]); return 1; } /* Check for errors */ //check_errors(&matrixA, 10.0f); /* Calculate the product between matrix A and matrix B */ printf("Executing matrix_matrix_mult(matrixA, matrixB, matrixC)...\n"); gettimeofday(&start, NULL); if (!matrix_matrix_mult(&matrixA, &matrixB, &matrixC)) { printf("%s: matrix_matrix_mult problem.\n", argv[0]); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); /* Print matrix */ printf("---------- Matrix C ----------\n"); print_matrix(&matrixC); /* Write second result */ printf("Writing second result: %s...\n", result2_filename); if (!store_matrix(&matrixC, result2_filename)) { printf("%s: failed to write second result to file.\n", argv[0]); return 1; } /* Check foor errors */ printf("Checking matrixC for errors...\n"); gettimeofday(&start, NULL); check_errors(&matrixC, 10240.0f); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); //Mark overall stop time gettimeofday(&overall_t2, NULL); // Show elased time printf("Overall time: %f ms\n", timedifference_msec(overall_t1, overall_t2)); return 0; }
d6a1dc139ea094a3c10ff3c30c9249fb8be30ef0.cu
#include <immintrin.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <errno.h> #include <cuda_runtime.h> extern "C" { #include "timer.h" } #include "matrix_lib.h" float scalar_value = 0.0f; struct matrix matrixA, matrixB, matrixC; int store_matrix(struct matrix *matrix, char *filename) { unsigned long int i = 0; unsigned long int N = 0; FILE *fd = NULL; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) return 0; /* Try to open file of floats */ if ((fd = fopen(filename, "wb")) == NULL) { printf("Unable to open file %s\n", filename); return 0; } float *nxt_a = matrix->h_rows; for (i = 0; i < N; i += 8, nxt_a += 8) { if (fwrite(nxt_a, sizeof(float), 8, fd) != 8) { printf("Error writing to file %s: short write (less than 8 floats)\n", filename); return 0; } } if (fd != NULL) fclose(fd); return 1; } int alloc_matrix(struct matrix* m, int height, int width) { cudaError_t cudaError; m->height = height; m->width = width; m->h_rows = (float *) malloc(height * width * sizeof(float)); if (m->h_rows == NULL) { printf("Malloc error\n"); return 0; } cudaError = cudaMalloc(&(m->d_rows), DATASET_SIZE * sizeof(float)); return cudaError != cudaSuccess ? 0 : 1; } int load_matrix(struct matrix *matrix, char *filename) { int count = 0; FILE *file; file = fopen(filename, "r"); if (file == NULL) { perror("Error opening file\n"); return -1; } while (!feof(file) && (count < matrix->height * matrix->width)) { fscanf(file, "%f", &(matrix->h_rows[count])); count++; } fclose(file); return 1; } int initialize_matrix(struct matrix *matrix, float value, float inc) { unsigned long int i; unsigned long int N; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) return 0; for (i = 0; i < N; i ++, inc = inc + inc) { matrix->h_rows[i] = value + inc; } return 1; } int print_matrix(struct matrix *matrix) { unsigned long int i; unsigned long int N; unsigned long int nxt_newLine; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) { printf("Matrix null\n\n"); return 0; } /* Initialize new line controol */ nxt_newLine = matrix->width - 1; /* Print matrix elements */ for (i = 0; i < N; i++) { printf("%5.1f ", matrix->h_rows[i]); if (i == nxt_newLine) { printf("\n"); nxt_newLine += matrix->width; } if (i == 255) { printf("Ooops...256 printing limit found...skipping printing...\n"); break; } } return 1; } int check_errors(struct matrix *matrix, float scalar_value) { unsigned long int i; unsigned long int N; /* Check the numbers of the elements of the matrix */ N = matrix->height * matrix->width; /* Check the integrity of the matrix */ if (N == 0 || matrix->h_rows == NULL) return 0; /* Check for errors (all values should be equal to scalar_value) */ float maxError = 0.0f; float diffError = 0.0f; for (i = 0; i < N; i++) maxError = (maxError > (diffError = fabs(matrix->h_rows[i] - scalar_value))) ? maxError : diffError; printf("Max error: %f\n", maxError); return 1; } int main(int argc, char *argv[]) { unsigned long int DimA_M, DimA_N, DimB_M, DimB_N, n_threads, n_blocks; char *matrixA_filename, *matrixB_filename, *result1_filename, *result2_filename; char *eptr = NULL; struct timeval overall_t1, overall_t2, start, stop; // Mark overall start time gettimeofday(&overall_t1, NULL); // Check arguments if (argc != 12) { printf("Usage: %s <scalar_value> <DimA_M> <DimA_N> <DimB_M> <DimB_N> <n_threads> <n_blocks> <matrixA_filename> <matrixB_filename> <result1_filename> <result2_filename>\n", argv[0]); return 0; } // Convert arguments scalar_value = strtof(argv[1], NULL); DimA_M = strtol(argv[2], &eptr, 10); DimA_N = strtol(argv[3], &eptr, 10); DimB_M = strtol(argv[4], &eptr, 10); DimB_N = strtol(argv[5], &eptr, 10); n_blocks = strtol(argv[6], &eptr, 10); n_threads = strtol(argv[7], &eptr, 10); matrixA_filename = argv[8]; matrixB_filename = argv[9]; result1_filename = argv[10]; result2_filename = argv[11]; if ((scalar_value == 0.0f) || (DimA_M == 0) || (DimA_N == 0) || (DimB_M == 0) || (DimB_N == 0)) { printf("%s: erro na conversao do argumento: errno = %d\n", argv[0], errno); /* If a conversion error occurred, display a message and exit */ if (errno == EINVAL) { printf("Conversion error occurred: %d\n", errno); return 1; } /* If the value provided was out of range, display a warning message */ if (errno == ERANGE) { printf("The value provided was out of rangei: %d\n", errno); return 1; } } /* Initialize the three matrixes */ alloc_matrix(&matrixA, DimA_M, DimA_N); //if (!initialize_matrix(&matrixA, 5.0f, 0.0f)) { if (!load_matrix(&matrixA, matrixA_filename)) { printf("%s: matrixA initialization problem.\n", argv[0]); return 1; } /* Print matrix */ printf("---------- Matrix A ----------\n"); print_matrix(&matrixA); alloc_matrix(&matrixB, DimB_M, DimB_N); //if (!initialize_matrix(&matrixB, 1.0f, 0.0f)) { if (!load_matrix(&matrixB, matrixB_filename)) { printf("%s: matrixB initialization problem.\n", argv[0]); return 1; } /* Print matrix */ printf("---------- Matrix B ----------\n"); print_matrix(&matrixB); alloc_matrix(&matrixC, DimA_M, DimB_N); if (!initialize_matrix(&matrixC, 0.0f, 0.0f)) { printf("%s: matrixC initialization problem.\n", argv[0]); return 1; } /* Print matrix */ printf("---------- Matrix C ----------\n"); print_matrix(&matrixC); /*Set grid size*/ set_grid_size(n_threads, n_blocks); /* Scalar product of matrix A */ printf("Executing scalar_matrix_mult(%5.1f, matrixA)...\n", scalar_value); gettimeofday(&start, NULL); if (!scalar_matrix_mult(scalar_value, &matrixA)) { printf("%s: scalar_matrix_mult problem.\n", argv[0]); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); /* Print matrix */ printf("---------- Matrix A ----------\n"); print_matrix(&matrixA); /* Write first result */ printf("Writing first result: %s...\n", result1_filename); if (!store_matrix(&matrixA, result1_filename)) { printf("%s: failed to write first result to file.\n", argv[0]); return 1; } /* Check for errors */ //check_errors(&matrixA, 10.0f); /* Calculate the product between matrix A and matrix B */ printf("Executing matrix_matrix_mult(matrixA, matrixB, matrixC)...\n"); gettimeofday(&start, NULL); if (!matrix_matrix_mult(&matrixA, &matrixB, &matrixC)) { printf("%s: matrix_matrix_mult problem.\n", argv[0]); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); /* Print matrix */ printf("---------- Matrix C ----------\n"); print_matrix(&matrixC); /* Write second result */ printf("Writing second result: %s...\n", result2_filename); if (!store_matrix(&matrixC, result2_filename)) { printf("%s: failed to write second result to file.\n", argv[0]); return 1; } /* Check foor errors */ printf("Checking matrixC for errors...\n"); gettimeofday(&start, NULL); check_errors(&matrixC, 10240.0f); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); //Mark overall stop time gettimeofday(&overall_t2, NULL); // Show elased time printf("Overall time: %f ms\n", timedifference_msec(overall_t1, overall_t2)); return 0; }
44053f8ba038c3317a8a634d94170311c5682168.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define spmv_NBLOCKS 12*8*22 //22 #define spmv_BLOCK_SIZE 256 #define WARP_SIZE 32 texture<float,1,hipReadModeElementType> tex_vec; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; hipHostMalloc(newA_ptr, paddedSize * sizeof(float)); hipHostMalloc(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col];//tex1Dfetch(tex_vec,col); } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(volatile float* val, volatile int* cols, const int * rowDelimiters, const float * vec, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); // __shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1]; __shared__ volatile float partialSums[spmv_BLOCK_SIZE]; /* if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1) rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x]; __syncthreads(); */ if (myRow < dim) { int warpStart = rowDelimiters[myRow]; int warpEnd = rowDelimiters[myRow+1]; float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { int col = cols[j]; mySum += val[j] * tex1Dfetch(tex_vec,col);//vec[col]; /*if(blockIdx.x==1&&blockIdx.y==0&&(j-warpStart)/WARP_SIZE<=10){ // printf("1 0 0 %d %d %d\n",(j-warpStart)/WARP_SIZE,threadIdx.x,j); printf("2 0 0 %d %d %d\n",(j-warpStart)/WARP_SIZE,threadIdx.x,col); // printf("3 0 0 %d %d %d\n",(j-warpStart)/WARP_SIZE,threadIdx.x,j); }*/ } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { hipSetDevice(2); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero float maxval = 200.0; hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float)); hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int)); hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice); hipBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float)); hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); float kernel_time = 0.0f; hipEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE)); hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(spmv_BLOCK_SIZE), 0, 0, d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); hipDeviceSynchronize(); hipEventRecord(kernel_stop, 0); hipEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
44053f8ba038c3317a8a634d94170311c5682168.cu
#include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define spmv_NBLOCKS 12*8*22 //22 #define spmv_BLOCK_SIZE 256 #define WARP_SIZE 32 texture<float,1,cudaReadModeElementType> tex_vec; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; cudaMallocHost(newA_ptr, paddedSize * sizeof(float)); cudaMallocHost(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col];//tex1Dfetch(tex_vec,col); } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(volatile float* val, volatile int* cols, const int * rowDelimiters, const float * vec, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); // __shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1]; __shared__ volatile float partialSums[spmv_BLOCK_SIZE]; /* if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1) rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x]; __syncthreads(); */ if (myRow < dim) { int warpStart = rowDelimiters[myRow]; int warpEnd = rowDelimiters[myRow+1]; float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { int col = cols[j]; mySum += val[j] * tex1Dfetch(tex_vec,col);//vec[col]; /*if(blockIdx.x==1&&blockIdx.y==0&&(j-warpStart)/WARP_SIZE<=10){ // printf("1 0 0 %d %d %d\n",(j-warpStart)/WARP_SIZE,threadIdx.x,j); printf("2 0 0 %d %d %d\n",(j-warpStart)/WARP_SIZE,threadIdx.x,col); // printf("3 0 0 %d %d %d\n",(j-warpStart)/WARP_SIZE,threadIdx.x,j); }*/ } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { cudaSetDevice(2); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero float maxval = 200.0; cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float)); cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int)); cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice); cudaBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float)); cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); float kernel_time = 0.0f; cudaEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE)); spmv_kernel <<<spmv_grid, spmv_BLOCK_SIZE>>> (d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); cudaDeviceSynchronize(); cudaEventRecord(kernel_stop, 0); cudaEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
2b5acba05aa319db4c23da55c8651afcab956ab8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright 2020 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <thrust/sort.h> #include <thrust/sequence.h> #include "modules/perception/inference/migraphx/plugins/kernels.h" #include "modules/perception/inference/migraphx/plugins/rpn_proposal_ssd_plugin.h" namespace apollo { namespace perception { namespace inference { // TODO(chenjiahao): add heat_map_b as anchor_offset // output anchors dims: [H, W, num_anchor_per_point, 4] __global__ void generate_anchors_kernel(const int height, const int width, const float anchor_stride, const int num_anchor_per_point, const float *anchor_heights, const float *anchor_widths, float *anchors) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int num_anchor = height * width * num_anchor_per_point; if (index >= num_anchor) { return; } float anchor_offset = 0; int pos_index = index / num_anchor_per_point; int anchor_id = index % num_anchor_per_point; int w_i = pos_index % width; int h_i = pos_index / width; // center coordinates float x_ctr = w_i * anchor_stride + anchor_offset; float y_ctr = h_i * anchor_stride + anchor_offset; float x_min = x_ctr - 0.5 * (anchor_widths[anchor_id] - 1); float y_min = y_ctr - 0.5 * (anchor_heights[anchor_id] - 1); float x_max = x_ctr + 0.5 * (anchor_widths[anchor_id] - 1); float y_max = y_ctr + 0.5 * (anchor_heights[anchor_id] - 1); anchors[index * 4] = x_min; anchors[index * 4 + 1] = y_min; anchors[index * 4 + 2] = x_max; anchors[index * 4 + 3] = y_max; } // in_boxes dims: [N, num_box_per_point * 4, H, W], // out_boxes dims: [N, H * W * num_box_per_point 4] template <typename Dtype> __global__ void reshape_boxes_kernel(const int nthreads, const Dtype *in_boxes, const int height, const int width, const int num_box_per_point, Dtype *out_boxes) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { int num_point = height * width; int batch_id = index / num_point / num_box_per_point / 4; int feature_id = index % 4; int box_id = (index / 4) % num_box_per_point; int point_id = (index / num_box_per_point / 4) % num_point; int in_index = ((batch_id * num_box_per_point + box_id) * 4 + feature_id) * num_point + point_id; out_boxes[index] = in_boxes[in_index]; } } // in_scores dims: [N, 2 * num_box_per_point, H, W], // out_scores dims: [N, H * W * num_box_per_point, 2] template <typename Dtype> __global__ void reshape_scores_kernel(const int nthreads, const Dtype *in_scores, const int height, const int width, const int num_box_per_point, Dtype *out_scores) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { int num_point = height * width; int batch_id = index / num_point / num_box_per_point / 2; int class_id = index % 2; int box_id = (index / 2) % num_box_per_point; int point_id = (index / num_box_per_point / 2) % num_point; int in_index = ((batch_id * 2 + class_id) * num_box_per_point + box_id) * num_point + point_id; out_scores[index] = in_scores[in_index]; } } int RPNProposalSSDPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { // dimsNCHW: [N, 2 * num_anchor_per_point, H, W] const float *rpn_cls_prob_reshape = reinterpret_cast<const float *>(inputs[0]); // dimsNCHW: [N, num_anchor_per_point * 4, H, W] const float *rpn_bbox_pred = reinterpret_cast<const float *>(inputs[1]); // dims: [N, 6, 1, 1] const float *im_info = reinterpret_cast<const float *>(inputs[2]); float *out_rois = reinterpret_cast<float *>(outputs[0]); float *host_im_info = new float[batchSize * 6](); BASE_GPU_CHECK(hipMemcpyAsync(host_im_info, im_info, batchSize * 6 * sizeof(float), hipMemcpyDeviceToHost, stream)); const int origin_height = (int)(host_im_info[0]); const int origin_width = (int)(host_im_info[1]); int num_anchor = height_ * width_ * num_anchor_per_point_; int rpn_bbox_pred_size = batchSize * num_anchor * 4; int scores_size = batchSize * num_anchor * 2; int anchors_size = num_anchor * 4; int out_rois_size = batchSize * top_n_ * 5; // Using thrust::fill might cause crash float *init_out_rois = new float[out_rois_size](); std::fill_n(init_out_rois, out_rois_size, -1.0f); BASE_GPU_CHECK(hipMemcpyAsync(out_rois, init_out_rois, out_rois_size * sizeof(float), hipMemcpyHostToDevice, stream)); int block_size, nthreads; // reshape to [N, num_anchor, 4] float *temp_rpn_bbox_pred; BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&temp_rpn_bbox_pred), rpn_bbox_pred_size * sizeof(float))); nthreads = rpn_bbox_pred_size; block_size = (nthreads - 1) / thread_size_ + 1; hipLaunchKernelGGL(( reshape_boxes_kernel), dim3(block_size), dim3(thread_size_), 0, stream, nthreads, rpn_bbox_pred, height_, width_, num_anchor_per_point_, temp_rpn_bbox_pred); // Normalization float *dev_bbox_mean, *dev_bbox_std; BASE_GPU_CHECK( hipMalloc(reinterpret_cast<void **>(&dev_bbox_mean), 4 * sizeof(float))); BASE_GPU_CHECK( hipMalloc(reinterpret_cast<void **>(&dev_bbox_std), 4 * sizeof(float))); BASE_GPU_CHECK(hipMemcpyAsync(dev_bbox_mean, bbox_mean_, 4 * sizeof(float), hipMemcpyHostToDevice, stream)); BASE_GPU_CHECK(hipMemcpyAsync(dev_bbox_std, bbox_std_, 4 * sizeof(float), hipMemcpyHostToDevice, stream)); repeatedly_mul_cuda(block_size, thread_size_, 0, stream, nthreads, temp_rpn_bbox_pred, temp_rpn_bbox_pred, dev_bbox_std, 4); repeatedly_add_cuda(block_size, thread_size_, 0, stream, nthreads, temp_rpn_bbox_pred, temp_rpn_bbox_pred, dev_bbox_mean, 4); // generate anchors float *anchors, *dev_anchor_heights, *dev_anchor_widths; BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&anchors), anchors_size * sizeof(float))); BASE_GPU_CHECK( hipMemsetAsync(anchors, 0, anchors_size * sizeof(float), stream)); BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&dev_anchor_heights), num_anchor_per_point_ * sizeof(float))); BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&dev_anchor_widths), num_anchor_per_point_ * sizeof(float))); BASE_GPU_CHECK(hipMemsetAsync( dev_anchor_heights, 0, num_anchor_per_point_ * sizeof(float), stream)); BASE_GPU_CHECK(hipMemsetAsync( dev_anchor_widths, 0, num_anchor_per_point_ * sizeof(float), stream)); BASE_GPU_CHECK(hipMemcpyAsync(dev_anchor_heights, anchor_heights_, num_anchor_per_point_ * sizeof(float), hipMemcpyHostToDevice, stream)); BASE_GPU_CHECK(hipMemcpyAsync(dev_anchor_widths, anchor_widths_, num_anchor_per_point_ * sizeof(float), hipMemcpyHostToDevice, stream)); block_size = (anchors_size - 1) / thread_size_ + 1; hipLaunchKernelGGL(( generate_anchors_kernel), dim3(block_size), dim3(thread_size_), 0, stream, height_, width_, heat_map_a_, num_anchor_per_point_, dev_anchor_heights, dev_anchor_widths, anchors); // decode bbox float *proposals; BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&proposals), rpn_bbox_pred_size * sizeof(float))); BASE_GPU_CHECK(hipMemsetAsync(proposals, 0, rpn_bbox_pred_size * sizeof(float), stream)); nthreads = batchSize * num_anchor; block_size = (nthreads - 1) / thread_size_ + 1; bbox_transform_inv_cuda(block_size, thread_size_, 0, stream, nthreads, anchors, temp_rpn_bbox_pred, num_anchor, 1, proposals); // clip boxes, i.e. refine proposals which are out of map if (refine_out_of_map_bbox_) { nthreads = rpn_bbox_pred_size; block_size = (nthreads - 1) / thread_size_ + 1; clip_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, proposals, (float)origin_height, (float)origin_width); } // reshape scores to [N, num_anchor, 2] float *temp_scores; BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&temp_scores), scores_size * sizeof(float))); nthreads = scores_size; block_size = (nthreads - 1) / thread_size_ + 1; hipLaunchKernelGGL(( reshape_scores_kernel), dim3(block_size), dim3(thread_size_), 0, stream, nthreads, rpn_cls_prob_reshape, height_, width_, num_anchor_per_point_, temp_scores); // filter boxes according to min_size_mode and threshold_objectness float *filtered_proposals, *filtered_scores; int *filtered_count; BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&filtered_proposals), rpn_bbox_pred_size * sizeof(float))); BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&filtered_scores), batchSize * num_anchor * sizeof(float))); BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&filtered_count), batchSize * sizeof(int))); BASE_GPU_CHECK(hipMemsetAsync(filtered_proposals, 0, rpn_bbox_pred_size * sizeof(float), stream)); BASE_GPU_CHECK(hipMemsetAsync( filtered_scores, 0, batchSize * num_anchor * sizeof(float), stream)); BASE_GPU_CHECK( hipMemsetAsync(filtered_count, 0, batchSize * sizeof(int), stream)); nthreads = batchSize * num_anchor; block_size = (nthreads - 1) / thread_size_ + 1; // TODO(chenjiahao): filter area filter_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, proposals, temp_scores, nullptr, num_anchor, 1, 2, 0, 0, 1, min_size_mode_, min_size_h_, min_size_w_, threshold_objectness_, filtered_proposals, filtered_scores, nullptr, filtered_count); int *host_filtered_count = new int[batchSize](); BASE_GPU_CHECK(hipMemcpyAsync(host_filtered_count, filtered_count, batchSize * sizeof(int), hipMemcpyDeviceToHost, stream)); // descending sort proposals by score int *sorted_indexes; BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&sorted_indexes), batchSize * num_anchor * sizeof(int))); for (int i = 0; i < batchSize; ++i) { thrust::sequence(thrust::device, sorted_indexes + i * num_anchor, sorted_indexes + i * num_anchor + host_filtered_count[i]); thrust::sort_by_key( thrust::device, filtered_scores + size_t(i * num_anchor), filtered_scores + size_t(i * num_anchor + host_filtered_count[i]), sorted_indexes + i * num_anchor, thrust::greater<float>()); } // keep max N candidates float *pre_nms_proposals; BASE_GPU_CHECK(hipMalloc(reinterpret_cast<void **>(&pre_nms_proposals), batchSize * max_candidate_n_ * 4 * sizeof(float))); BASE_GPU_CHECK(hipMemsetAsync( pre_nms_proposals, 0, batchSize * max_candidate_n_ * 4 * sizeof(float), stream)); nthreads = batchSize * max_candidate_n_; block_size = (nthreads - 1) / thread_size_ + 1; keep_topN_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, filtered_proposals, nullptr, nullptr, sorted_indexes, filtered_count, false, num_anchor, 0, max_candidate_n_, pre_nms_proposals, nullptr, nullptr); // Nms, keep top N proposals and output final proposals // output dims: [num_roi, 5] (axis-1: batch_id, x_min, y_min, x_max, y_max) int acc_box_num = 0; for (int i = 0; i < batchSize; ++i) { int cur_filter_count = ::min(host_filtered_count[i], max_candidate_n_); NmsForward( false, cur_filter_count, 4, overlap_ratio_, max_candidate_n_, top_n_, i, 0, pre_nms_proposals + size_t(i * max_candidate_n_ * 4), nullptr, nullptr, out_rois + size_t(acc_box_num * 5), &acc_box_num, stream); } out_rois_num_ = acc_box_num; // Free cuda memory BASE_GPU_CHECK(hipFree(temp_rpn_bbox_pred)); BASE_GPU_CHECK(hipFree(dev_bbox_mean)); BASE_GPU_CHECK(hipFree(dev_bbox_std)); BASE_GPU_CHECK(hipFree(anchors)); BASE_GPU_CHECK(hipFree(dev_anchor_heights)); BASE_GPU_CHECK(hipFree(dev_anchor_widths)); BASE_GPU_CHECK(hipFree(proposals)); BASE_GPU_CHECK(hipFree(temp_scores)); BASE_GPU_CHECK(hipFree(filtered_proposals)); BASE_GPU_CHECK(hipFree(filtered_scores)); BASE_GPU_CHECK(hipFree(filtered_count)); BASE_GPU_CHECK(hipFree(sorted_indexes)); BASE_GPU_CHECK(hipFree(pre_nms_proposals)); // Free host memory delete[] host_im_info; delete[] host_filtered_count; delete[] init_out_rois; return 0; } } // namespace inference } // namespace perception } // namespace apollo
2b5acba05aa319db4c23da55c8651afcab956ab8.cu
/****************************************************************************** * Copyright 2020 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <thrust/sort.h> #include <thrust/sequence.h> #include "modules/perception/inference/migraphx/plugins/kernels.h" #include "modules/perception/inference/migraphx/plugins/rpn_proposal_ssd_plugin.h" namespace apollo { namespace perception { namespace inference { // TODO(chenjiahao): add heat_map_b as anchor_offset // output anchors dims: [H, W, num_anchor_per_point, 4] __global__ void generate_anchors_kernel(const int height, const int width, const float anchor_stride, const int num_anchor_per_point, const float *anchor_heights, const float *anchor_widths, float *anchors) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int num_anchor = height * width * num_anchor_per_point; if (index >= num_anchor) { return; } float anchor_offset = 0; int pos_index = index / num_anchor_per_point; int anchor_id = index % num_anchor_per_point; int w_i = pos_index % width; int h_i = pos_index / width; // center coordinates float x_ctr = w_i * anchor_stride + anchor_offset; float y_ctr = h_i * anchor_stride + anchor_offset; float x_min = x_ctr - 0.5 * (anchor_widths[anchor_id] - 1); float y_min = y_ctr - 0.5 * (anchor_heights[anchor_id] - 1); float x_max = x_ctr + 0.5 * (anchor_widths[anchor_id] - 1); float y_max = y_ctr + 0.5 * (anchor_heights[anchor_id] - 1); anchors[index * 4] = x_min; anchors[index * 4 + 1] = y_min; anchors[index * 4 + 2] = x_max; anchors[index * 4 + 3] = y_max; } // in_boxes dims: [N, num_box_per_point * 4, H, W], // out_boxes dims: [N, H * W * num_box_per_point, 4] template <typename Dtype> __global__ void reshape_boxes_kernel(const int nthreads, const Dtype *in_boxes, const int height, const int width, const int num_box_per_point, Dtype *out_boxes) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { int num_point = height * width; int batch_id = index / num_point / num_box_per_point / 4; int feature_id = index % 4; int box_id = (index / 4) % num_box_per_point; int point_id = (index / num_box_per_point / 4) % num_point; int in_index = ((batch_id * num_box_per_point + box_id) * 4 + feature_id) * num_point + point_id; out_boxes[index] = in_boxes[in_index]; } } // in_scores dims: [N, 2 * num_box_per_point, H, W], // out_scores dims: [N, H * W * num_box_per_point, 2] template <typename Dtype> __global__ void reshape_scores_kernel(const int nthreads, const Dtype *in_scores, const int height, const int width, const int num_box_per_point, Dtype *out_scores) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { int num_point = height * width; int batch_id = index / num_point / num_box_per_point / 2; int class_id = index % 2; int box_id = (index / 2) % num_box_per_point; int point_id = (index / num_box_per_point / 2) % num_point; int in_index = ((batch_id * 2 + class_id) * num_box_per_point + box_id) * num_point + point_id; out_scores[index] = in_scores[in_index]; } } int RPNProposalSSDPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { // dimsNCHW: [N, 2 * num_anchor_per_point, H, W] const float *rpn_cls_prob_reshape = reinterpret_cast<const float *>(inputs[0]); // dimsNCHW: [N, num_anchor_per_point * 4, H, W] const float *rpn_bbox_pred = reinterpret_cast<const float *>(inputs[1]); // dims: [N, 6, 1, 1] const float *im_info = reinterpret_cast<const float *>(inputs[2]); float *out_rois = reinterpret_cast<float *>(outputs[0]); float *host_im_info = new float[batchSize * 6](); BASE_GPU_CHECK(cudaMemcpyAsync(host_im_info, im_info, batchSize * 6 * sizeof(float), cudaMemcpyDeviceToHost, stream)); const int origin_height = (int)(host_im_info[0]); const int origin_width = (int)(host_im_info[1]); int num_anchor = height_ * width_ * num_anchor_per_point_; int rpn_bbox_pred_size = batchSize * num_anchor * 4; int scores_size = batchSize * num_anchor * 2; int anchors_size = num_anchor * 4; int out_rois_size = batchSize * top_n_ * 5; // Using thrust::fill might cause crash float *init_out_rois = new float[out_rois_size](); std::fill_n(init_out_rois, out_rois_size, -1.0f); BASE_GPU_CHECK(cudaMemcpyAsync(out_rois, init_out_rois, out_rois_size * sizeof(float), cudaMemcpyHostToDevice, stream)); int block_size, nthreads; // reshape to [N, num_anchor, 4] float *temp_rpn_bbox_pred; BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&temp_rpn_bbox_pred), rpn_bbox_pred_size * sizeof(float))); nthreads = rpn_bbox_pred_size; block_size = (nthreads - 1) / thread_size_ + 1; reshape_boxes_kernel<<<block_size, thread_size_, 0, stream>>>( nthreads, rpn_bbox_pred, height_, width_, num_anchor_per_point_, temp_rpn_bbox_pred); // Normalization float *dev_bbox_mean, *dev_bbox_std; BASE_GPU_CHECK( cudaMalloc(reinterpret_cast<void **>(&dev_bbox_mean), 4 * sizeof(float))); BASE_GPU_CHECK( cudaMalloc(reinterpret_cast<void **>(&dev_bbox_std), 4 * sizeof(float))); BASE_GPU_CHECK(cudaMemcpyAsync(dev_bbox_mean, bbox_mean_, 4 * sizeof(float), cudaMemcpyHostToDevice, stream)); BASE_GPU_CHECK(cudaMemcpyAsync(dev_bbox_std, bbox_std_, 4 * sizeof(float), cudaMemcpyHostToDevice, stream)); repeatedly_mul_cuda(block_size, thread_size_, 0, stream, nthreads, temp_rpn_bbox_pred, temp_rpn_bbox_pred, dev_bbox_std, 4); repeatedly_add_cuda(block_size, thread_size_, 0, stream, nthreads, temp_rpn_bbox_pred, temp_rpn_bbox_pred, dev_bbox_mean, 4); // generate anchors float *anchors, *dev_anchor_heights, *dev_anchor_widths; BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&anchors), anchors_size * sizeof(float))); BASE_GPU_CHECK( cudaMemsetAsync(anchors, 0, anchors_size * sizeof(float), stream)); BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&dev_anchor_heights), num_anchor_per_point_ * sizeof(float))); BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&dev_anchor_widths), num_anchor_per_point_ * sizeof(float))); BASE_GPU_CHECK(cudaMemsetAsync( dev_anchor_heights, 0, num_anchor_per_point_ * sizeof(float), stream)); BASE_GPU_CHECK(cudaMemsetAsync( dev_anchor_widths, 0, num_anchor_per_point_ * sizeof(float), stream)); BASE_GPU_CHECK(cudaMemcpyAsync(dev_anchor_heights, anchor_heights_, num_anchor_per_point_ * sizeof(float), cudaMemcpyHostToDevice, stream)); BASE_GPU_CHECK(cudaMemcpyAsync(dev_anchor_widths, anchor_widths_, num_anchor_per_point_ * sizeof(float), cudaMemcpyHostToDevice, stream)); block_size = (anchors_size - 1) / thread_size_ + 1; generate_anchors_kernel<<<block_size, thread_size_, 0, stream>>>( height_, width_, heat_map_a_, num_anchor_per_point_, dev_anchor_heights, dev_anchor_widths, anchors); // decode bbox float *proposals; BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&proposals), rpn_bbox_pred_size * sizeof(float))); BASE_GPU_CHECK(cudaMemsetAsync(proposals, 0, rpn_bbox_pred_size * sizeof(float), stream)); nthreads = batchSize * num_anchor; block_size = (nthreads - 1) / thread_size_ + 1; bbox_transform_inv_cuda(block_size, thread_size_, 0, stream, nthreads, anchors, temp_rpn_bbox_pred, num_anchor, 1, proposals); // clip boxes, i.e. refine proposals which are out of map if (refine_out_of_map_bbox_) { nthreads = rpn_bbox_pred_size; block_size = (nthreads - 1) / thread_size_ + 1; clip_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, proposals, (float)origin_height, (float)origin_width); } // reshape scores to [N, num_anchor, 2] float *temp_scores; BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&temp_scores), scores_size * sizeof(float))); nthreads = scores_size; block_size = (nthreads - 1) / thread_size_ + 1; reshape_scores_kernel<<<block_size, thread_size_, 0, stream>>>( nthreads, rpn_cls_prob_reshape, height_, width_, num_anchor_per_point_, temp_scores); // filter boxes according to min_size_mode and threshold_objectness float *filtered_proposals, *filtered_scores; int *filtered_count; BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_proposals), rpn_bbox_pred_size * sizeof(float))); BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_scores), batchSize * num_anchor * sizeof(float))); BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_count), batchSize * sizeof(int))); BASE_GPU_CHECK(cudaMemsetAsync(filtered_proposals, 0, rpn_bbox_pred_size * sizeof(float), stream)); BASE_GPU_CHECK(cudaMemsetAsync( filtered_scores, 0, batchSize * num_anchor * sizeof(float), stream)); BASE_GPU_CHECK( cudaMemsetAsync(filtered_count, 0, batchSize * sizeof(int), stream)); nthreads = batchSize * num_anchor; block_size = (nthreads - 1) / thread_size_ + 1; // TODO(chenjiahao): filter area filter_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, proposals, temp_scores, nullptr, num_anchor, 1, 2, 0, 0, 1, min_size_mode_, min_size_h_, min_size_w_, threshold_objectness_, filtered_proposals, filtered_scores, nullptr, filtered_count); int *host_filtered_count = new int[batchSize](); BASE_GPU_CHECK(cudaMemcpyAsync(host_filtered_count, filtered_count, batchSize * sizeof(int), cudaMemcpyDeviceToHost, stream)); // descending sort proposals by score int *sorted_indexes; BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&sorted_indexes), batchSize * num_anchor * sizeof(int))); for (int i = 0; i < batchSize; ++i) { thrust::sequence(thrust::device, sorted_indexes + i * num_anchor, sorted_indexes + i * num_anchor + host_filtered_count[i]); thrust::sort_by_key( thrust::device, filtered_scores + size_t(i * num_anchor), filtered_scores + size_t(i * num_anchor + host_filtered_count[i]), sorted_indexes + i * num_anchor, thrust::greater<float>()); } // keep max N candidates float *pre_nms_proposals; BASE_GPU_CHECK(cudaMalloc(reinterpret_cast<void **>(&pre_nms_proposals), batchSize * max_candidate_n_ * 4 * sizeof(float))); BASE_GPU_CHECK(cudaMemsetAsync( pre_nms_proposals, 0, batchSize * max_candidate_n_ * 4 * sizeof(float), stream)); nthreads = batchSize * max_candidate_n_; block_size = (nthreads - 1) / thread_size_ + 1; keep_topN_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, filtered_proposals, nullptr, nullptr, sorted_indexes, filtered_count, false, num_anchor, 0, max_candidate_n_, pre_nms_proposals, nullptr, nullptr); // Nms, keep top N proposals and output final proposals // output dims: [num_roi, 5] (axis-1: batch_id, x_min, y_min, x_max, y_max) int acc_box_num = 0; for (int i = 0; i < batchSize; ++i) { int cur_filter_count = std::min(host_filtered_count[i], max_candidate_n_); NmsForward( false, cur_filter_count, 4, overlap_ratio_, max_candidate_n_, top_n_, i, 0, pre_nms_proposals + size_t(i * max_candidate_n_ * 4), nullptr, nullptr, out_rois + size_t(acc_box_num * 5), &acc_box_num, stream); } out_rois_num_ = acc_box_num; // Free cuda memory BASE_GPU_CHECK(cudaFree(temp_rpn_bbox_pred)); BASE_GPU_CHECK(cudaFree(dev_bbox_mean)); BASE_GPU_CHECK(cudaFree(dev_bbox_std)); BASE_GPU_CHECK(cudaFree(anchors)); BASE_GPU_CHECK(cudaFree(dev_anchor_heights)); BASE_GPU_CHECK(cudaFree(dev_anchor_widths)); BASE_GPU_CHECK(cudaFree(proposals)); BASE_GPU_CHECK(cudaFree(temp_scores)); BASE_GPU_CHECK(cudaFree(filtered_proposals)); BASE_GPU_CHECK(cudaFree(filtered_scores)); BASE_GPU_CHECK(cudaFree(filtered_count)); BASE_GPU_CHECK(cudaFree(sorted_indexes)); BASE_GPU_CHECK(cudaFree(pre_nms_proposals)); // Free host memory delete[] host_im_info; delete[] host_filtered_count; delete[] init_out_rois; return 0; } } // namespace inference } // namespace perception } // namespace apollo
c106ffefc6966609344b61b790bfbd3ece6289df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ImageProcessing.cuh" // global variables bool draw_on_right = false; bool draw_on_left = false; __global__ void convertToGrayscale(unsigned char *gray, unsigned char *r, unsigned char *g, unsigned char *b, int dimension) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < dimension) gray[index] = 1 / 3.0 * (r[index] + g[index] + b[index]); } __global__ void getHistrogram(unsigned int *histogram, unsigned char *image, int dimension) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < dimension) { int color = image[index]; atomicAdd(&histogram[color], 1); } } __global__ void getNormalizedHistogram(double *norm_histogram, unsigned int* histogram, int dimension) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < LIMIT + 1) { norm_histogram[index] = (double)histogram[index] / dimension; } } __global__ void histogramEqualization(unsigned char *eq_image, unsigned char* image, double *cumulative_sum, int dimension) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < dimension) { if (floor(LIMIT * cumulative_sum[image[index]]) < LIMIT || floor(LIMIT * cumulative_sum[image[index]]) > 0) eq_image[index] = floor(LIMIT * cumulative_sum[image[index]]); else eq_image[index] = image[index]; } } // Exclusive scan on CUDA. __global__ void exclusiveScanGPU(double *d_array, double *d_result, int N, double *d_aux) { extern __shared__ double temp[]; int realIndex = 2 * threadIdx.x + blockDim.x * 2 * blockIdx.x; int threadIndex = threadIdx.x; int index = 2 * threadIndex; int offset = 1; // Copy from the array to shared memory. temp[index] = d_array[realIndex]; temp[index + 1] = d_array[realIndex + 1]; // Reduce by storing the intermediate values. The last element will be // the sum of n-1 elements. for (int d = blockDim.x; d > 0; d = d / 2) { __syncthreads(); // Regulates the amount of threads operating. if (threadIndex < d) { // Swap the numbers int current = offset * (index + 1) - 1; int next = offset * (index + 2) - 1; temp[next] += temp[current]; } // Increase the offset by multiple of 2. offset *= 2; } // Only one thread performs this. if (threadIndex == 0) { // Store the sum to the auxiliary array. if (d_aux) { d_aux[blockIdx.x] = temp[N - 1]; } // Reset the last element with identity. Only the first thread will do // the job. temp[N - 1] = 0; } // Down sweep to build scan. for (int d = 1; d < blockDim.x * 2; d *= 2) { // Reduce the offset by division of 2. offset = offset / 2; __syncthreads(); if (threadIndex < d) { int current = offset * (index + 1) - 1; int next = offset * (index + 2) - 1; // Swap double tempCurrent = temp[current]; temp[current] = temp[next]; temp[next] += tempCurrent; } } __syncthreads(); d_result[realIndex] = temp[index]; // write results to device memory d_result[realIndex + 1] = temp[index + 1]; } __global__ void sobelFilter(unsigned char * image, unsigned char * filtered_image, int height, int width) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; double dx, dy; if (x > 0 && y > 0 && x < width - 1 && y < height - 1) { dx = (-1 * image[(y - 1) * width + (x - 1)]) + (-2 * image[y * width + (x - 1)]) + (-1 * image[(y + 1) * width + (x - 1)]) + (image[(y - 1) * width + (x + 1)]) + (2 * image[y * width + (x + 1)]) + (image[(y + 1) * width + (x + 1)]); dy = (image[(y - 1) * width + (x - 1)]) + (2 * image[(y - 1) * width + x]) + (image[(y - 1) * width + (x + 1)]) + (-1 * image[(y + 1) * width + (x - 1)]) + (-2 * image[(y + 1) * width + x]) + (-1 * image[(y + 1) * width + (x + 1)]); filtered_image[y * width + x] = sqrt(dx * dx + dy * dy); } } __global__ void gaussianBlur(unsigned char *image, unsigned char *output_image, int width, int height, const int* const kernel, const int dim_kernel, int sum_of_elements){ int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; float partial_sum = 0.0; if (x > 0 && y > 0 && x < width - 1 && y < height - 1) { for (int row = 0; row < dim_kernel; row++) { for (int col = 0; col < dim_kernel; col++) { int index_image_x = x + col - dim_kernel / 2; int index_image_y = y + row - dim_kernel / 2; index_image_x = min(max(index_image_x, 0), width - 1); index_image_y = min(max(index_image_y, 0), height - 1); partial_sum += kernel[row * dim_kernel + col] * image[index_image_y * width + index_image_x]; } } output_image[y * width + x] = int((float)partial_sum / sum_of_elements); } } __global__ void binaryThreshold(unsigned char * image, unsigned char * output_image, int width, int height, int threshold) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x > 0 && y > 0 && x < width - 1 && y < height - 1) { if (image[y * width + x] < threshold) output_image[y * width + x] = 0; else output_image[y * width + x] = 255; } } __global__ void bitwiseAnd(unsigned char *image, unsigned char *mask, unsigned char *output_image, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x > 0 && y > 0 && x < width - 1 && y < height - 1) { output_image[y * width + x] = image[y * width + x] * mask[y * width + x]; } } __device__ int min_int(int a, int b) { return a <= b ? a : b; } __device__ int max_int(int a, int b) { return a >= b ? a : b; } hipError_t bgrToGrayscale(unsigned char *gray, Mat image_rgb, unsigned int size) { // Host input vectors. unsigned char *red = new unsigned char[size]; unsigned char *green = new unsigned char[size]; unsigned char *blue = new unsigned char[size]; // Init vectors with rgb values. for (int y = 0; y < image_rgb.rows; ++y) { for (int x = 0; x < image_rgb.cols; ++x) { blue[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 0]; green[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 1]; red[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 2]; } } hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); // Device input vectors. unsigned char *d_red; unsigned char *d_green; unsigned char *d_blue; unsigned char *d_gray; // Allocate GPU buffers. cudaStatus = hipMalloc(&d_red, size * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_green, size * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_blue, size * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_gray, size * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers cudaStatus = hipMemcpy(d_red, red, size * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemcpy(d_green, green, size * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemcpy(d_blue, blue, size * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } int no_threads = 1024; int no_blocks = (int)ceil((float)size / no_threads); convertToGrayscale << <no_blocks, no_threads >> > (d_gray, d_red, d_green, d_blue, size); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "convert_to_grayscale launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(gray, d_gray, size * sizeof(unsigned char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(d_red); hipFree(d_green); hipFree(d_blue); hipFree(d_gray); delete[] red; delete[] green; delete[] blue; return cudaStatus; } hipError_t getHistogramN(double *cumulativeSumHistogram, double *norm_histogram, unsigned int *histogram, unsigned char *grayScaleImage, int size) { hipError_t cudaStatus; unsigned int *d_histogram; unsigned char *d_gray_scale_image; double *d_norm_histogram; double *d_cumulative_sum; double *d_aux_for_cumulative_sum; // Threads size int threads = 256; int N = 256; // Size of the array. int blocks = N / threads + ((N%threads == 0) ? 0 : 1); // Perform on CUDA. const dim3 blockSize(threads / 2, 1, 1); const dim3 gridSize(blocks, 1, 1); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = hipMalloc(&d_aux_for_cumulative_sum, (LIMIT + 1) * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_cumulative_sum, (LIMIT + 1) * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_norm_histogram, (LIMIT + 1) * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_histogram, (LIMIT + 1) * sizeof(unsigned int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_gray_scale_image, size * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemcpy(d_gray_scale_image, grayScaleImage, size * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemset(d_histogram, 0, LIMIT + 1); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!"); goto Error; } cudaStatus = hipMemset(d_norm_histogram, 0, LIMIT + 1); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!"); goto Error; } cudaStatus = hipMemset(d_cumulative_sum, 0, LIMIT + 1); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!"); goto Error; } cudaStatus = hipMemset(d_aux_for_cumulative_sum, 0, LIMIT + 1); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!"); goto Error; } int no_threads = 1024; int no_blocks = (int)ceil((float)size / no_threads); getHistrogram << <no_blocks, no_threads >> > (d_histogram, d_gray_scale_image, size); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching getHistrogram!\n", cudaStatus); goto Error; } getNormalizedHistogram << <1, 256 >> > (d_norm_histogram, d_histogram, size); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching getNormalizedHistogram!\n", cudaStatus); goto Error; } exclusiveScanGPU << < gridSize, blockSize, blocks * threads * sizeof(double) >> > (d_norm_histogram, d_cumulative_sum, N, d_aux_for_cumulative_sum); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching exclusiveScanGPU!\n", cudaStatus); goto Error; } // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "exclusiveScanGPU launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } cudaStatus = hipMemcpy(histogram, d_histogram, (LIMIT + 1) * sizeof(unsigned int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(norm_histogram, d_norm_histogram, (LIMIT + 1) * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(cumulativeSumHistogram, d_cumulative_sum, N * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(d_gray_scale_image); hipFree(d_histogram); hipFree(d_norm_histogram); hipFree(d_cumulative_sum); hipFree(d_aux_for_cumulative_sum); return cudaStatus; } hipError_t doHistogramEqualization(unsigned char *eq_image, unsigned char *image, double *cumulative_sum, int dimension) { hipError_t cudaStatus; unsigned char *d_eq_image; unsigned char *d_image; double *d_cumulative_sum; int no_thread = 1024; int no_block = (int)ceil((float)dimension / no_thread); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = hipMalloc(&d_eq_image, dimension * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_image, dimension * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_cumulative_sum, (LIMIT + 1) * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemcpy(d_image, image, dimension * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(d_cumulative_sum, cumulative_sum, (LIMIT + 1) * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } histogramEqualization << <no_block, no_thread >> > (d_eq_image, d_image, d_cumulative_sum, dimension); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "histogramEqualization launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching histogramEqualization!\n", cudaStatus); goto Error; } cudaStatus = hipMemcpy(eq_image, d_eq_image, dimension * sizeof(unsigned char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(d_cumulative_sum); hipFree(d_eq_image); hipFree(d_image); return cudaStatus; } hipError_t applySobelFilter(unsigned char *image, unsigned char *filtered_image, int width, int height) { hipError_t cudaStatus; unsigned char *d_image; unsigned char *d_filtered_image; cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = hipMalloc(&d_image, width * height * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_filtered_image, width * height * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemcpy(d_image, image, width * height * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } double number_of_threads = 32; dim3 threadsPerBlock(number_of_threads, number_of_threads, 1); dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1); sobelFilter << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, height, width); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "sobelFilter launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching sobelFilter!\n", cudaStatus); goto Error; } cudaStatus = hipMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(d_filtered_image); hipFree(d_image); return cudaStatus; } hipError_t applyGaussianFilter(unsigned char *image, unsigned char *filtered_image, int width, int height, const int dim_kernel) { int kernel[25] = { 1, 4, 6, 4, 1, 4, 16, 24, 16, 4, 6, 24, 36, 24, 6, 4, 16, 24, 16, 4, 1, 4, 6, 4, 1 }; hipError_t cudaStatus; unsigned char *d_image; unsigned char *d_filtered_image; int *d_kernel; cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = hipMalloc(&d_image, width * height * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_filtered_image, width * height * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_kernel, dim_kernel * dim_kernel * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemcpy(d_kernel, kernel, dim_kernel * dim_kernel * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(d_image, image, width * height * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } double number_of_threads = 32; dim3 threadsPerBlock(number_of_threads, number_of_threads, 1); dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1); gaussianBlur << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, width, height, d_kernel, dim_kernel, 256); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "gaussianBlur launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching gaussianBlur!\n", cudaStatus); goto Error; } cudaStatus = hipMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(d_filtered_image); hipFree(d_image); hipFree(d_kernel); return cudaStatus; } hipError_t applyBinaryThreshold(unsigned char * image, unsigned char * filtered_image, int width, int height, const int threshold) { hipError_t cudaStatus; unsigned char *d_image; unsigned char *d_filtered_image; cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = hipMalloc(&d_image, width * height * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_filtered_image, width * height * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemcpy(d_image, image, width * height * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } double number_of_threads = 32; dim3 threadsPerBlock(number_of_threads, number_of_threads, 1); dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1); binaryThreshold << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, width, height, threshold); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "binaryThreshold launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching binaryThreshold!\n", cudaStatus); goto Error; } cudaStatus = hipMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(d_filtered_image); hipFree(d_image); return cudaStatus; } hipError_t extractROI(unsigned char *image, unsigned char *roi, int width, int height) { hipError_t cudaStatus; unsigned char *d_image; unsigned char *d_roi; unsigned char *d_mask; Mat mask = Mat::zeros(Size(width, height), CV_8U); Point points[4] = { Point(1013, 700), Point(275, 700), Point(490, 365), Point(678, 365), }; //Create mask. fillConvexPoly(mask, points, 4, Scalar(255, 0, 0)); cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = hipMalloc(&d_image, width * height * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_mask, width * height * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc(&d_roi, width * height * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemset(d_roi, 0, width * height); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!"); goto Error; } cudaStatus = hipMemcpy(d_image, image, width * height * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(d_mask, mask.data, width * height * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } //Bitwise - And. double number_of_threads = 32; dim3 threadsPerBlock(number_of_threads, number_of_threads, 1); dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1); bitwiseAnd << <numBlocks, threadsPerBlock>> > (d_image, d_mask, d_roi, width, height); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "bitwiseAnd launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching bitwiseAnd!\n", cudaStatus); goto Error; } cudaStatus = hipMemcpy(roi, d_roi, width * height * sizeof(unsigned char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(d_image); hipFree(d_mask); hipFree(d_roi); return cudaStatus; } vector<Vec4i> lineClassification(vector<Vec4i> lines, double ts, bool on_right) { vector<Vec4i> right_line, left_line, filtered_lines; vector<Point> right_points, left_points; Vec4d single_left_line, single_right_line; vector<double> slope_container; for (auto line : lines) { Point start_point = Point(line[0], line[1]); Point end_point = Point(line[2], line[3]); double slope = (double(end_point.y - start_point.y) / double(end_point.x - start_point.x)); if (abs(slope) > ts) { if (slope > 0) { right_line.push_back(line); right_points.push_back(start_point); right_points.push_back(end_point); draw_on_right = true; } else { left_line.push_back(line); left_points.push_back(start_point); left_points.push_back(end_point); draw_on_left = true; } } } if (right_points.size() > 0) { fitLine(right_points, single_right_line, DIST_L12, 0, 0.01, 0.01); } if (left_points.size() > 0) { fitLine(left_points, single_left_line, DIST_L12, 0, 0.01, 0.01); } if (on_right) return right_line; else return left_line; } void plotLines(vector<Vec4i> lines, Mat image_rgb) { vector<Vec4i> right_line = lineClassification(lines, 0.5, true); vector<Vec4i> left_line = lineClassification(lines, 0.5, false); // maybe i wiil draw later some line with this for (size_t i = 0; i < right_line.size(); i++) { Vec4i l = right_line[i]; line(image_rgb, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 255, 0), 3, LINE_AA); } for (size_t i = 0; i < left_line.size(); i++) { Vec4i l = left_line[i]; line(image_rgb, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 255, 0), 3, LINE_AA); } imshow("frame", image_rgb); //imshow("region_of_interest", region_of_interest); waitKey(0); }
c106ffefc6966609344b61b790bfbd3ece6289df.cu
#include "ImageProcessing.cuh" // global variables bool draw_on_right = false; bool draw_on_left = false; __global__ void convertToGrayscale(unsigned char *gray, unsigned char *r, unsigned char *g, unsigned char *b, int dimension) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < dimension) gray[index] = 1 / 3.0 * (r[index] + g[index] + b[index]); } __global__ void getHistrogram(unsigned int *histogram, unsigned char *image, int dimension) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < dimension) { int color = image[index]; atomicAdd(&histogram[color], 1); } } __global__ void getNormalizedHistogram(double *norm_histogram, unsigned int* histogram, int dimension) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < LIMIT + 1) { norm_histogram[index] = (double)histogram[index] / dimension; } } __global__ void histogramEqualization(unsigned char *eq_image, unsigned char* image, double *cumulative_sum, int dimension) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < dimension) { if (floor(LIMIT * cumulative_sum[image[index]]) < LIMIT || floor(LIMIT * cumulative_sum[image[index]]) > 0) eq_image[index] = floor(LIMIT * cumulative_sum[image[index]]); else eq_image[index] = image[index]; } } // Exclusive scan on CUDA. __global__ void exclusiveScanGPU(double *d_array, double *d_result, int N, double *d_aux) { extern __shared__ double temp[]; int realIndex = 2 * threadIdx.x + blockDim.x * 2 * blockIdx.x; int threadIndex = threadIdx.x; int index = 2 * threadIndex; int offset = 1; // Copy from the array to shared memory. temp[index] = d_array[realIndex]; temp[index + 1] = d_array[realIndex + 1]; // Reduce by storing the intermediate values. The last element will be // the sum of n-1 elements. for (int d = blockDim.x; d > 0; d = d / 2) { __syncthreads(); // Regulates the amount of threads operating. if (threadIndex < d) { // Swap the numbers int current = offset * (index + 1) - 1; int next = offset * (index + 2) - 1; temp[next] += temp[current]; } // Increase the offset by multiple of 2. offset *= 2; } // Only one thread performs this. if (threadIndex == 0) { // Store the sum to the auxiliary array. if (d_aux) { d_aux[blockIdx.x] = temp[N - 1]; } // Reset the last element with identity. Only the first thread will do // the job. temp[N - 1] = 0; } // Down sweep to build scan. for (int d = 1; d < blockDim.x * 2; d *= 2) { // Reduce the offset by division of 2. offset = offset / 2; __syncthreads(); if (threadIndex < d) { int current = offset * (index + 1) - 1; int next = offset * (index + 2) - 1; // Swap double tempCurrent = temp[current]; temp[current] = temp[next]; temp[next] += tempCurrent; } } __syncthreads(); d_result[realIndex] = temp[index]; // write results to device memory d_result[realIndex + 1] = temp[index + 1]; } __global__ void sobelFilter(unsigned char * image, unsigned char * filtered_image, int height, int width) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; double dx, dy; if (x > 0 && y > 0 && x < width - 1 && y < height - 1) { dx = (-1 * image[(y - 1) * width + (x - 1)]) + (-2 * image[y * width + (x - 1)]) + (-1 * image[(y + 1) * width + (x - 1)]) + (image[(y - 1) * width + (x + 1)]) + (2 * image[y * width + (x + 1)]) + (image[(y + 1) * width + (x + 1)]); dy = (image[(y - 1) * width + (x - 1)]) + (2 * image[(y - 1) * width + x]) + (image[(y - 1) * width + (x + 1)]) + (-1 * image[(y + 1) * width + (x - 1)]) + (-2 * image[(y + 1) * width + x]) + (-1 * image[(y + 1) * width + (x + 1)]); filtered_image[y * width + x] = sqrt(dx * dx + dy * dy); } } __global__ void gaussianBlur(unsigned char *image, unsigned char *output_image, int width, int height, const int* const kernel, const int dim_kernel, int sum_of_elements){ int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; float partial_sum = 0.0; if (x > 0 && y > 0 && x < width - 1 && y < height - 1) { for (int row = 0; row < dim_kernel; row++) { for (int col = 0; col < dim_kernel; col++) { int index_image_x = x + col - dim_kernel / 2; int index_image_y = y + row - dim_kernel / 2; index_image_x = min(max(index_image_x, 0), width - 1); index_image_y = min(max(index_image_y, 0), height - 1); partial_sum += kernel[row * dim_kernel + col] * image[index_image_y * width + index_image_x]; } } output_image[y * width + x] = int((float)partial_sum / sum_of_elements); } } __global__ void binaryThreshold(unsigned char * image, unsigned char * output_image, int width, int height, int threshold) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x > 0 && y > 0 && x < width - 1 && y < height - 1) { if (image[y * width + x] < threshold) output_image[y * width + x] = 0; else output_image[y * width + x] = 255; } } __global__ void bitwiseAnd(unsigned char *image, unsigned char *mask, unsigned char *output_image, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x > 0 && y > 0 && x < width - 1 && y < height - 1) { output_image[y * width + x] = image[y * width + x] * mask[y * width + x]; } } __device__ int min_int(int a, int b) { return a <= b ? a : b; } __device__ int max_int(int a, int b) { return a >= b ? a : b; } cudaError_t bgrToGrayscale(unsigned char *gray, Mat image_rgb, unsigned int size) { // Host input vectors. unsigned char *red = new unsigned char[size]; unsigned char *green = new unsigned char[size]; unsigned char *blue = new unsigned char[size]; // Init vectors with rgb values. for (int y = 0; y < image_rgb.rows; ++y) { for (int x = 0; x < image_rgb.cols; ++x) { blue[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 0]; green[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 1]; red[y * image_rgb.cols + x] = image_rgb.data[image_rgb.channels() * (y * image_rgb.cols + x) + 2]; } } cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); // Device input vectors. unsigned char *d_red; unsigned char *d_green; unsigned char *d_blue; unsigned char *d_gray; // Allocate GPU buffers. cudaStatus = cudaMalloc(&d_red, size * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_green, size * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_blue, size * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_gray, size * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers cudaStatus = cudaMemcpy(d_red, red, size * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(d_green, green, size * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(d_blue, blue, size * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } int no_threads = 1024; int no_blocks = (int)ceil((float)size / no_threads); convertToGrayscale << <no_blocks, no_threads >> > (d_gray, d_red, d_green, d_blue, size); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "convert_to_grayscale launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(gray, d_gray, size * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_red); cudaFree(d_green); cudaFree(d_blue); cudaFree(d_gray); delete[] red; delete[] green; delete[] blue; return cudaStatus; } cudaError_t getHistogramN(double *cumulativeSumHistogram, double *norm_histogram, unsigned int *histogram, unsigned char *grayScaleImage, int size) { cudaError_t cudaStatus; unsigned int *d_histogram; unsigned char *d_gray_scale_image; double *d_norm_histogram; double *d_cumulative_sum; double *d_aux_for_cumulative_sum; // Threads size int threads = 256; int N = 256; // Size of the array. int blocks = N / threads + ((N%threads == 0) ? 0 : 1); // Perform on CUDA. const dim3 blockSize(threads / 2, 1, 1); const dim3 gridSize(blocks, 1, 1); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaMalloc(&d_aux_for_cumulative_sum, (LIMIT + 1) * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_cumulative_sum, (LIMIT + 1) * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_norm_histogram, (LIMIT + 1) * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_histogram, (LIMIT + 1) * sizeof(unsigned int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_gray_scale_image, size * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(d_gray_scale_image, grayScaleImage, size * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemset(d_histogram, 0, LIMIT + 1); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); goto Error; } cudaStatus = cudaMemset(d_norm_histogram, 0, LIMIT + 1); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); goto Error; } cudaStatus = cudaMemset(d_cumulative_sum, 0, LIMIT + 1); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); goto Error; } cudaStatus = cudaMemset(d_aux_for_cumulative_sum, 0, LIMIT + 1); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); goto Error; } int no_threads = 1024; int no_blocks = (int)ceil((float)size / no_threads); getHistrogram << <no_blocks, no_threads >> > (d_histogram, d_gray_scale_image, size); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching getHistrogram!\n", cudaStatus); goto Error; } getNormalizedHistogram << <1, 256 >> > (d_norm_histogram, d_histogram, size); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching getNormalizedHistogram!\n", cudaStatus); goto Error; } exclusiveScanGPU << < gridSize, blockSize, blocks * threads * sizeof(double) >> > (d_norm_histogram, d_cumulative_sum, N, d_aux_for_cumulative_sum); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching exclusiveScanGPU!\n", cudaStatus); goto Error; } // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "exclusiveScanGPU launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(histogram, d_histogram, (LIMIT + 1) * sizeof(unsigned int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(norm_histogram, d_norm_histogram, (LIMIT + 1) * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(cumulativeSumHistogram, d_cumulative_sum, N * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_gray_scale_image); cudaFree(d_histogram); cudaFree(d_norm_histogram); cudaFree(d_cumulative_sum); cudaFree(d_aux_for_cumulative_sum); return cudaStatus; } cudaError_t doHistogramEqualization(unsigned char *eq_image, unsigned char *image, double *cumulative_sum, int dimension) { cudaError_t cudaStatus; unsigned char *d_eq_image; unsigned char *d_image; double *d_cumulative_sum; int no_thread = 1024; int no_block = (int)ceil((float)dimension / no_thread); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaMalloc(&d_eq_image, dimension * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_image, dimension * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_cumulative_sum, (LIMIT + 1) * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(d_image, image, dimension * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(d_cumulative_sum, cumulative_sum, (LIMIT + 1) * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } histogramEqualization << <no_block, no_thread >> > (d_eq_image, d_image, d_cumulative_sum, dimension); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "histogramEqualization launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching histogramEqualization!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(eq_image, d_eq_image, dimension * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_cumulative_sum); cudaFree(d_eq_image); cudaFree(d_image); return cudaStatus; } cudaError_t applySobelFilter(unsigned char *image, unsigned char *filtered_image, int width, int height) { cudaError_t cudaStatus; unsigned char *d_image; unsigned char *d_filtered_image; cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaMalloc(&d_image, width * height * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_filtered_image, width * height * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(d_image, image, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } double number_of_threads = 32; dim3 threadsPerBlock(number_of_threads, number_of_threads, 1); dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1); sobelFilter << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, height, width); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "sobelFilter launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching sobelFilter!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_filtered_image); cudaFree(d_image); return cudaStatus; } cudaError_t applyGaussianFilter(unsigned char *image, unsigned char *filtered_image, int width, int height, const int dim_kernel) { int kernel[25] = { 1, 4, 6, 4, 1, 4, 16, 24, 16, 4, 6, 24, 36, 24, 6, 4, 16, 24, 16, 4, 1, 4, 6, 4, 1 }; cudaError_t cudaStatus; unsigned char *d_image; unsigned char *d_filtered_image; int *d_kernel; cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaMalloc(&d_image, width * height * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_filtered_image, width * height * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_kernel, dim_kernel * dim_kernel * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(d_kernel, kernel, dim_kernel * dim_kernel * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(d_image, image, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } double number_of_threads = 32; dim3 threadsPerBlock(number_of_threads, number_of_threads, 1); dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1); gaussianBlur << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, width, height, d_kernel, dim_kernel, 256); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "gaussianBlur launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching gaussianBlur!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_filtered_image); cudaFree(d_image); cudaFree(d_kernel); return cudaStatus; } cudaError_t applyBinaryThreshold(unsigned char * image, unsigned char * filtered_image, int width, int height, const int threshold) { cudaError_t cudaStatus; unsigned char *d_image; unsigned char *d_filtered_image; cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaMalloc(&d_image, width * height * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_filtered_image, width * height * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(d_image, image, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } double number_of_threads = 32; dim3 threadsPerBlock(number_of_threads, number_of_threads, 1); dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1); binaryThreshold << <numBlocks, threadsPerBlock >> > (d_image, d_filtered_image, width, height, threshold); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "binaryThreshold launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching binaryThreshold!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(filtered_image, d_filtered_image, height * width * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_filtered_image); cudaFree(d_image); return cudaStatus; } cudaError_t extractROI(unsigned char *image, unsigned char *roi, int width, int height) { cudaError_t cudaStatus; unsigned char *d_image; unsigned char *d_roi; unsigned char *d_mask; Mat mask = Mat::zeros(Size(width, height), CV_8U); Point points[4] = { Point(1013, 700), Point(275, 700), Point(490, 365), Point(678, 365), }; //Create mask. fillConvexPoly(mask, points, 4, Scalar(255, 0, 0)); cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaMalloc(&d_image, width * height * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_mask, width * height * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc(&d_roi, width * height * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemset(d_roi, 0, width * height); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); goto Error; } cudaStatus = cudaMemcpy(d_image, image, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(d_mask, mask.data, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } //Bitwise - And. double number_of_threads = 32; dim3 threadsPerBlock(number_of_threads, number_of_threads, 1); dim3 numBlocks(ceil(width / number_of_threads), ceil(height / number_of_threads), 1); bitwiseAnd << <numBlocks, threadsPerBlock>> > (d_image, d_mask, d_roi, width, height); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "bitwiseAnd launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching bitwiseAnd!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(roi, d_roi, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_image); cudaFree(d_mask); cudaFree(d_roi); return cudaStatus; } vector<Vec4i> lineClassification(vector<Vec4i> lines, double ts, bool on_right) { vector<Vec4i> right_line, left_line, filtered_lines; vector<Point> right_points, left_points; Vec4d single_left_line, single_right_line; vector<double> slope_container; for (auto line : lines) { Point start_point = Point(line[0], line[1]); Point end_point = Point(line[2], line[3]); double slope = (double(end_point.y - start_point.y) / double(end_point.x - start_point.x)); if (abs(slope) > ts) { if (slope > 0) { right_line.push_back(line); right_points.push_back(start_point); right_points.push_back(end_point); draw_on_right = true; } else { left_line.push_back(line); left_points.push_back(start_point); left_points.push_back(end_point); draw_on_left = true; } } } if (right_points.size() > 0) { fitLine(right_points, single_right_line, DIST_L12, 0, 0.01, 0.01); } if (left_points.size() > 0) { fitLine(left_points, single_left_line, DIST_L12, 0, 0.01, 0.01); } if (on_right) return right_line; else return left_line; } void plotLines(vector<Vec4i> lines, Mat image_rgb) { vector<Vec4i> right_line = lineClassification(lines, 0.5, true); vector<Vec4i> left_line = lineClassification(lines, 0.5, false); // maybe i wiil draw later some line with this for (size_t i = 0; i < right_line.size(); i++) { Vec4i l = right_line[i]; line(image_rgb, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 255, 0), 3, LINE_AA); } for (size_t i = 0; i < left_line.size(); i++) { Vec4i l = left_line[i]; line(image_rgb, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 255, 0), 3, LINE_AA); } imshow("frame", image_rgb); //imshow("region_of_interest", region_of_interest); waitKey(0); }
eb2e2e3ac61ed30b086d91e5663bd23bf237653b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "scanKernels.cu" #include "support_kernels.cu" //Helper functions //Reorders data extern "C" __global__ void dataReorderR4(const int n_particles, real4 *source, real4 *destination, uint *permutation) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= n_particles) return; int newIndex = permutation[idx]; destination[idx] = source[newIndex]; } extern "C" __global__ void dataReorderF2(const int n_particles, double2 *source, double2 *destination, uint *permutation) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= n_particles) return; int newIndex = permutation[idx]; destination[idx] = source[newIndex]; } extern "C" __global__ void dataReorderI1(const int n_particles, int *source, int *destination, uint *permutation) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= n_particles) return; int newIndex = permutation[idx]; destination[idx] = source[newIndex]; } //Convert a 64bit key uint2 key into a 96key with a permutation value build in extern "C" __global__ void convertKey64to96(uint4 *keys, uint4 *newKeys, const int N) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= N) return; uint4 temp = keys[idx]; newKeys[idx] = (uint4){temp.x, temp.y, temp.z, idx}; } extern "C" __global__ void extractKeyAndPerm(uint4 *newKeys, uint4 *keys, uint *permutation, const int N) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= N) return; uint4 temp = newKeys[idx]; keys[idx] = (uint4){temp.x, temp.y, temp.z, temp.w}; permutation[idx] = temp.w; } //Extract 1 of the 4 items of an uint4 key and move it into a 32bit array extern "C" __global__ void extractInt(uint4 *keys, uint *simpleKeys, const int N, int keyIdx) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= N) return; uint4 temp = keys[idx]; int simpleTemp; if(keyIdx == 0) simpleTemp = temp.x; else if(keyIdx == 1) simpleTemp = temp.y; else if(keyIdx == 2) simpleTemp = temp.z; simpleKeys[idx] = simpleTemp; } //Create range of 0 to N extern "C" __global__ void fillSequence(uint *sequence, const int N) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= N) return; sequence[idx] = idx; } //Reorder the data in the arrays according to a given permutation extern "C" __global__ void reOrderKeysValues(uint4 *keysSrc, uint4 *keysDest, uint *permutation, const int N) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= N) return; int newIndex = permutation[idx]; keysDest[idx] = keysSrc[newIndex]; } extern "C" __global__ void sort_count(volatile uint2 *valid, int *counts, const int N, setupParams sParam, int bitIdx/*, int2 *blaat*/) { const int tid = threadIdx.x; const int bid = blockDim.y * blockIdx.x + threadIdx.y; int totalNumThreads = gridDim.x*blockDim.y*blockDim.x; //120*4*32 // gridDim.x * blockDim.y; //2D !!!! volatile __shared__ int shmemSC[128]; volatile __shared__ int shmemSCTEST[128]; //Determine the parameters and loop over the particles int jobSize = (N / 2) / totalNumThreads; int offSet = jobSize * bid; int count = 0; jobSize = sParam.jobs; if(bid < sParam.blocksWithExtraJobs) jobSize++; if(bid <= sParam.blocksWithExtraJobs) offSet = (sParam.jobs+1)*64*bid; else { offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64; offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64; } offSet /= 2; //Divide by two since we do double loads (uint2) for(int i=0; i < jobSize; i++) { count += !(valid[offSet + tid].x & (1u<<bitIdx)); count += !(valid[offSet + tid].y & (1u<<bitIdx)); offSet += blockDim.x; } //Reduce to get the count of this block shmemSC[32*threadIdx.y + tid] = count; reduce_block2(tid, &shmemSC[32*threadIdx.y], count); //Save the values / count of the current block if(threadIdx.x == 0) counts[bid] = shmemSC[32*threadIdx.y]; //Block 0 handles any extra elements that couldn't be divided equally if(bid == 0) { //Here i use single element reads for ease of boundary conditions and steps count = 0; offSet = sParam.extraOffset; uint* valid2 = (uint*) valid; for(int i=0 ; i < sParam.extraElements; i += blockDim.x) { if((offSet + i + tid) < (N)) //Make sure we dont read more than there are items { count += !(valid2[offSet + i + tid] & (1u<<bitIdx)); } } //Reduce shmemSCTEST[tid] = count; __syncthreads(); if(tid < 16){ shmemSCTEST[tid] = count = count + shmemSCTEST[tid+16]; shmemSCTEST[tid] = count = count + shmemSCTEST[tid+8]; shmemSCTEST[tid] = count = count + shmemSCTEST[tid+4]; shmemSCTEST[tid] = count = count + shmemSCTEST[tid+2]; shmemSCTEST[tid] = count = count + shmemSCTEST[tid+1]; } //Save the count if(tid == 0) { counts[gridDim.x*blockDim.y] = shmemSCTEST[0]; } __syncthreads(); }//end if bid==0 }//end compact_count // __device__ __forceinline__ int testTest(volatile unsigned int tmp[], uint val, const int idx, long test) // { // tmp[idx-16] = 0; tmp[idx] = val; // // // Since we set half the array to 0 we don't need ifs! // tmp[idx] = val = tmp[idx - 1] + val; // tmp[idx] = val = tmp[idx - 2] + val; // tmp[idx] = val = tmp[idx - 4] + val; // tmp[idx] = val = tmp[idx - 8] + val; // tmp[idx] = val = tmp[idx - 16] + val; // // return (idx > 0) ? tmp[idx-1] : 0; // } /* For sorting it turns out that the stage kernels works faster than the non-staged Might depend on how much has to be sorted/moved, have to do timings in the actual code */ extern "C" __global__ void sort_move_stage_key_value(uint2 *valid, int *output, uint2 *srcValues, uint *valuesOut, int *counts, const int N, setupParams sParam, int bitIdx) { //Walk the values of this block const int tid = threadIdx.x; const int bid = blockDim.y * blockIdx.x + threadIdx.y; volatile __shared__ unsigned int shmemSMSKV[192]; volatile __shared__ int stage[64*4]; volatile __shared__ int stage_values[64*4]; //Determine the parameters and loop over the particles int jobSize, offSet; jobSize = sParam.jobs; if(bid < sParam.blocksWithExtraJobs) jobSize++; if(bid <= sParam.blocksWithExtraJobs) offSet = (sParam.jobs+1)*64*bid; else { offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64; offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64; } int outputOffset = counts[bid]; //Get the start of the output offset of the invalid items //this is calculated as follows: //totalValidItems + startReadOffset - startOutputOffset //startReadOffset - startOutputOffset <- is the total number of invalid items from any blocks //before the current block int rightOutputOffset = counts[gridDim.x*blockDim.y+1]; rightOutputOffset = rightOutputOffset + offSet - outputOffset; offSet /= 2; //Divide by two since we do double loads (uint2) TODO what happens if offSet is uneven...? int curCount; int idx, ridx; outputOffset += threadIdx.x; rightOutputOffset += threadIdx.x; //Do per step the prefix scan to determine the output locations for(int i=0; i < jobSize; i++) { uint2 validBase = valid[offSet + tid]; uint2 valuesBase = srcValues[offSet + tid]; int value = !(validBase.x & (1u<<bitIdx)); value += !(validBase.y & (1u<<bitIdx)); idx = hillisSteele5(&shmemSMSKV[48*threadIdx.y+16], curCount, value, threadIdx.x); ridx = curCount + threadIdx.x*2 - idx; //lane*2 - idx , *2 since we read 2 items a time if(!(validBase.x & (1u<<bitIdx))) { stage[idx + threadIdx.y*64] = validBase.x; stage_values[idx++ + threadIdx.y*64] = valuesBase.x; } else { stage[ridx + threadIdx.y*64] = validBase.x; stage_values[ridx++ + threadIdx.y*64] = valuesBase.x; } if(!(validBase.y & (1u<<bitIdx))) { stage[idx + threadIdx.y*64] = validBase.y; stage_values[idx + threadIdx.y*64] = valuesBase.y; } else { stage[ridx + threadIdx.y*64] = validBase.y; stage_values[ridx + threadIdx.y*64] = valuesBase.y; } //Reuse value as index value = outputOffset; //Flush output, first 32 if(threadIdx.x >= curCount) value = rightOutputOffset-curCount; output[value] = stage [threadIdx.x + threadIdx.y*64]; valuesOut[value] = stage_values[threadIdx.x + threadIdx.y*64]; //2nd 32 value = outputOffset + blockDim.x; if(threadIdx.x + blockDim.x >= curCount) value = rightOutputOffset + blockDim.x - curCount; output[value] = stage [threadIdx.x + blockDim.x + threadIdx.y*64]; valuesOut[value] = stage_values[threadIdx.x + blockDim.x + threadIdx.y*64]; outputOffset += curCount; //Increase the output offset rightOutputOffset += 64 - curCount; //64 (32*2) since we do 2 items a time offSet += blockDim.x; //Step to the next N threads } //Block 0 handles any extra elements that couldn't be divided equally if(bid == 0) { //Here i use single element reads for ease of boundary conditions and steps offSet = sParam.extraOffset; outputOffset = counts[gridDim.x*blockDim.y]; rightOutputOffset = counts[gridDim.x*blockDim.y+1]; rightOutputOffset = rightOutputOffset + offSet - outputOffset; uint* valid2 = (uint*) valid; uint* srcValues2 = (uint*) srcValues; for(int i=0; i < sParam.extraElements; i += blockDim.x) { uint value = 0; uint srcValueItem = 0; if((offSet + i + tid) < (N)){ //Make sure we dont read more than there are items value = valid2[offSet + i + tid]; srcValueItem = srcValues2[offSet + i + tid]; } idx = hillisSteele5(&shmemSMSKV[48*threadIdx.y+16], curCount, !(value & (1u<<bitIdx)), threadIdx.x); ridx = threadIdx.x - idx; if((offSet + i + tid) < N) if(!(value & (1u<<bitIdx))) { output[idx + outputOffset] = value; valuesOut[idx + outputOffset] = srcValueItem; } else { output[ridx + rightOutputOffset] = value; valuesOut[ridx + rightOutputOffset] = srcValueItem; } outputOffset += curCount; //Increase the output offset rightOutputOffset += 32-curCount; //32 since we do only 1 at a time } }//end if bid==0 }//end sort_move_stage_key_value
eb2e2e3ac61ed30b086d91e5663bd23bf237653b.cu
#include "scanKernels.cu" #include "support_kernels.cu" //Helper functions //Reorders data extern "C" __global__ void dataReorderR4(const int n_particles, real4 *source, real4 *destination, uint *permutation) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= n_particles) return; int newIndex = permutation[idx]; destination[idx] = source[newIndex]; } extern "C" __global__ void dataReorderF2(const int n_particles, double2 *source, double2 *destination, uint *permutation) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= n_particles) return; int newIndex = permutation[idx]; destination[idx] = source[newIndex]; } extern "C" __global__ void dataReorderI1(const int n_particles, int *source, int *destination, uint *permutation) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= n_particles) return; int newIndex = permutation[idx]; destination[idx] = source[newIndex]; } //Convert a 64bit key uint2 key into a 96key with a permutation value build in extern "C" __global__ void convertKey64to96(uint4 *keys, uint4 *newKeys, const int N) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= N) return; uint4 temp = keys[idx]; newKeys[idx] = (uint4){temp.x, temp.y, temp.z, idx}; } extern "C" __global__ void extractKeyAndPerm(uint4 *newKeys, uint4 *keys, uint *permutation, const int N) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= N) return; uint4 temp = newKeys[idx]; keys[idx] = (uint4){temp.x, temp.y, temp.z, temp.w}; permutation[idx] = temp.w; } //Extract 1 of the 4 items of an uint4 key and move it into a 32bit array extern "C" __global__ void extractInt(uint4 *keys, uint *simpleKeys, const int N, int keyIdx) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= N) return; uint4 temp = keys[idx]; int simpleTemp; if(keyIdx == 0) simpleTemp = temp.x; else if(keyIdx == 1) simpleTemp = temp.y; else if(keyIdx == 2) simpleTemp = temp.z; simpleKeys[idx] = simpleTemp; } //Create range of 0 to N extern "C" __global__ void fillSequence(uint *sequence, const int N) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= N) return; sequence[idx] = idx; } //Reorder the data in the arrays according to a given permutation extern "C" __global__ void reOrderKeysValues(uint4 *keysSrc, uint4 *keysDest, uint *permutation, const int N) { const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int dim = blockDim.x * blockDim.y; int idx = bid * dim + tid; if (idx >= N) return; int newIndex = permutation[idx]; keysDest[idx] = keysSrc[newIndex]; } extern "C" __global__ void sort_count(volatile uint2 *valid, int *counts, const int N, setupParams sParam, int bitIdx/*, int2 *blaat*/) { const int tid = threadIdx.x; const int bid = blockDim.y * blockIdx.x + threadIdx.y; int totalNumThreads = gridDim.x*blockDim.y*blockDim.x; //120*4*32 // gridDim.x * blockDim.y; //2D !!!! volatile __shared__ int shmemSC[128]; volatile __shared__ int shmemSCTEST[128]; //Determine the parameters and loop over the particles int jobSize = (N / 2) / totalNumThreads; int offSet = jobSize * bid; int count = 0; jobSize = sParam.jobs; if(bid < sParam.blocksWithExtraJobs) jobSize++; if(bid <= sParam.blocksWithExtraJobs) offSet = (sParam.jobs+1)*64*bid; else { offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64; offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64; } offSet /= 2; //Divide by two since we do double loads (uint2) for(int i=0; i < jobSize; i++) { count += !(valid[offSet + tid].x & (1u<<bitIdx)); count += !(valid[offSet + tid].y & (1u<<bitIdx)); offSet += blockDim.x; } //Reduce to get the count of this block shmemSC[32*threadIdx.y + tid] = count; reduce_block2(tid, &shmemSC[32*threadIdx.y], count); //Save the values / count of the current block if(threadIdx.x == 0) counts[bid] = shmemSC[32*threadIdx.y]; //Block 0 handles any extra elements that couldn't be divided equally if(bid == 0) { //Here i use single element reads for ease of boundary conditions and steps count = 0; offSet = sParam.extraOffset; uint* valid2 = (uint*) valid; for(int i=0 ; i < sParam.extraElements; i += blockDim.x) { if((offSet + i + tid) < (N)) //Make sure we dont read more than there are items { count += !(valid2[offSet + i + tid] & (1u<<bitIdx)); } } //Reduce shmemSCTEST[tid] = count; __syncthreads(); if(tid < 16){ shmemSCTEST[tid] = count = count + shmemSCTEST[tid+16]; shmemSCTEST[tid] = count = count + shmemSCTEST[tid+8]; shmemSCTEST[tid] = count = count + shmemSCTEST[tid+4]; shmemSCTEST[tid] = count = count + shmemSCTEST[tid+2]; shmemSCTEST[tid] = count = count + shmemSCTEST[tid+1]; } //Save the count if(tid == 0) { counts[gridDim.x*blockDim.y] = shmemSCTEST[0]; } __syncthreads(); }//end if bid==0 }//end compact_count // __device__ __forceinline__ int testTest(volatile unsigned int tmp[], uint val, const int idx, long test) // { // tmp[idx-16] = 0; tmp[idx] = val; // // // Since we set half the array to 0 we don't need ifs! // tmp[idx] = val = tmp[idx - 1] + val; // tmp[idx] = val = tmp[idx - 2] + val; // tmp[idx] = val = tmp[idx - 4] + val; // tmp[idx] = val = tmp[idx - 8] + val; // tmp[idx] = val = tmp[idx - 16] + val; // // return (idx > 0) ? tmp[idx-1] : 0; // } /* For sorting it turns out that the stage kernels works faster than the non-staged Might depend on how much has to be sorted/moved, have to do timings in the actual code */ extern "C" __global__ void sort_move_stage_key_value(uint2 *valid, int *output, uint2 *srcValues, uint *valuesOut, int *counts, const int N, setupParams sParam, int bitIdx) { //Walk the values of this block const int tid = threadIdx.x; const int bid = blockDim.y * blockIdx.x + threadIdx.y; volatile __shared__ unsigned int shmemSMSKV[192]; volatile __shared__ int stage[64*4]; volatile __shared__ int stage_values[64*4]; //Determine the parameters and loop over the particles int jobSize, offSet; jobSize = sParam.jobs; if(bid < sParam.blocksWithExtraJobs) jobSize++; if(bid <= sParam.blocksWithExtraJobs) offSet = (sParam.jobs+1)*64*bid; else { offSet = sParam.blocksWithExtraJobs*(sParam.jobs+1)*64; offSet += (bid-sParam.blocksWithExtraJobs)*(sParam.jobs)*64; } int outputOffset = counts[bid]; //Get the start of the output offset of the invalid items //this is calculated as follows: //totalValidItems + startReadOffset - startOutputOffset //startReadOffset - startOutputOffset <- is the total number of invalid items from any blocks //before the current block int rightOutputOffset = counts[gridDim.x*blockDim.y+1]; rightOutputOffset = rightOutputOffset + offSet - outputOffset; offSet /= 2; //Divide by two since we do double loads (uint2) TODO what happens if offSet is uneven...? int curCount; int idx, ridx; outputOffset += threadIdx.x; rightOutputOffset += threadIdx.x; //Do per step the prefix scan to determine the output locations for(int i=0; i < jobSize; i++) { uint2 validBase = valid[offSet + tid]; uint2 valuesBase = srcValues[offSet + tid]; int value = !(validBase.x & (1u<<bitIdx)); value += !(validBase.y & (1u<<bitIdx)); idx = hillisSteele5(&shmemSMSKV[48*threadIdx.y+16], curCount, value, threadIdx.x); ridx = curCount + threadIdx.x*2 - idx; //lane*2 - idx , *2 since we read 2 items a time if(!(validBase.x & (1u<<bitIdx))) { stage[idx + threadIdx.y*64] = validBase.x; stage_values[idx++ + threadIdx.y*64] = valuesBase.x; } else { stage[ridx + threadIdx.y*64] = validBase.x; stage_values[ridx++ + threadIdx.y*64] = valuesBase.x; } if(!(validBase.y & (1u<<bitIdx))) { stage[idx + threadIdx.y*64] = validBase.y; stage_values[idx + threadIdx.y*64] = valuesBase.y; } else { stage[ridx + threadIdx.y*64] = validBase.y; stage_values[ridx + threadIdx.y*64] = valuesBase.y; } //Reuse value as index value = outputOffset; //Flush output, first 32 if(threadIdx.x >= curCount) value = rightOutputOffset-curCount; output[value] = stage [threadIdx.x + threadIdx.y*64]; valuesOut[value] = stage_values[threadIdx.x + threadIdx.y*64]; //2nd 32 value = outputOffset + blockDim.x; if(threadIdx.x + blockDim.x >= curCount) value = rightOutputOffset + blockDim.x - curCount; output[value] = stage [threadIdx.x + blockDim.x + threadIdx.y*64]; valuesOut[value] = stage_values[threadIdx.x + blockDim.x + threadIdx.y*64]; outputOffset += curCount; //Increase the output offset rightOutputOffset += 64 - curCount; //64 (32*2) since we do 2 items a time offSet += blockDim.x; //Step to the next N threads } //Block 0 handles any extra elements that couldn't be divided equally if(bid == 0) { //Here i use single element reads for ease of boundary conditions and steps offSet = sParam.extraOffset; outputOffset = counts[gridDim.x*blockDim.y]; rightOutputOffset = counts[gridDim.x*blockDim.y+1]; rightOutputOffset = rightOutputOffset + offSet - outputOffset; uint* valid2 = (uint*) valid; uint* srcValues2 = (uint*) srcValues; for(int i=0; i < sParam.extraElements; i += blockDim.x) { uint value = 0; uint srcValueItem = 0; if((offSet + i + tid) < (N)){ //Make sure we dont read more than there are items value = valid2[offSet + i + tid]; srcValueItem = srcValues2[offSet + i + tid]; } idx = hillisSteele5(&shmemSMSKV[48*threadIdx.y+16], curCount, !(value & (1u<<bitIdx)), threadIdx.x); ridx = threadIdx.x - idx; if((offSet + i + tid) < N) if(!(value & (1u<<bitIdx))) { output[idx + outputOffset] = value; valuesOut[idx + outputOffset] = srcValueItem; } else { output[ridx + rightOutputOffset] = value; valuesOut[ridx + rightOutputOffset] = srcValueItem; } outputOffset += curCount; //Increase the output offset rightOutputOffset += 32-curCount; //32 since we do only 1 at a time } }//end if bid==0 }//end sort_move_stage_key_value
ea8d9c2d2ba51f548221ab79b56c18f60d601d04.hip
// !!! This is a file automatically generated by hipify!!! #include "StadiumHash.cuh" #define TICKET_SIZE 4 #define NUM_POS_BITS 3 // log(32/TICKET_SIZE) #define NUM_INFO_BITS 3 // TICKET_SIZE - NUM_POS_BITS // Generic Hash Function template<typename keyT, typename valueT> template <uint nRounds, uint rShift, uint mulOp> __device__ uint StadiumHash<keyT, valueT>::hash(const keyT key) { keyT x = key; for (uint j = nRounds; j > 0; --j) { x = ((x >> rShift) ^ x) * mulOp + j; } return (uint)x; } // First Hash template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::initHash(keyT key) { uint hashed = hash<sizeof(keyT), 8, 0xE9D58A6B>(key); return __umulhi(hashed, tableSize); } // Second Hash template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::rehash(uint hashed, const keyT key) { uint h_2 = hash<sizeof(keyT), 8, 0x6E5B9D8A>(key); uint dh = hashed + 1 + __umulhi(h_2, tableSize - 1); return (dh >= tableSize) ? (dh - tableSize) : dh; } // Info Hash template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::infoHash(keyT key) { return hash<sizeof(keyT), 8, 0xCA34BE7D>(key) >> (32 - NUM_INFO_BITS); // mulOp was chosen randomly } // Allocation of the ticket board template<typename keyT, typename valueT> __host__ void StadiumHash<keyT, valueT>::allocTicketBoard() { const int ticketBoardSize = tableSize / (32 / TICKET_SIZE) + 1; // Allocate memory on device for ticket board hipError_t cudaStatus = hipMalloc((void**)&ticketBoard, ticketBoardSize * sizeof(uint)); checkStatus(cudaStatus); } // Clearing of the ticket board template<typename keyT, typename valueT> __host__ void StadiumHash<keyT, valueT>::clearTicketBoard() { const int ticketBoardSize = tableSize / (32 / TICKET_SIZE) + 1; // clear ticket board by filling all tickets with 1s hipError_t cudaStatus = hipMemset((void*)ticketBoard, 0xFF, ticketBoardSize * sizeof(uint)); checkStatus(cudaStatus); } // Deletion of the ticket board template<typename keyT, typename valueT> __host__ void StadiumHash<keyT, valueT>::freeTicketBoard() { hipFree((uint*) ticketBoard); } // Creates a mask containing info starting at infoStart and otherwise 1s template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::prepareTicket(uint info, uint infoStart) { uint mask = (1 << NUM_INFO_BITS) - 1; mask = ~(mask << infoStart); uint result = mask | (info << infoStart); return result; } // Extracts the info from a ticket template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::extractInfo(uint ticket, uint infoStart) { uint mask = (1 << NUM_INFO_BITS) - 1; mask = mask << infoStart; uint result = (mask & ticket) >> infoStart; return result; } // Calculates Ticket Board Index from the hashed key template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::getTbIndex(uint hashed) { return hashed >> NUM_POS_BITS; } // Calculates the Position of the ticket in the Ticket Board Entry from the hashed key template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::getPosInInt(uint hashed) { uint mask = (1 << NUM_POS_BITS) - 1; return (hashed & mask) << (5 - NUM_POS_BITS); } // Entry Reservation template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::tryBookASeat(uint tbIndex, uint posInInt) { uint permit = ((1 << (TICKET_SIZE - NUM_INFO_BITS)) - 1) << posInInt; uint auth = atomicAnd((uint*) (ticketBoard + tbIndex), ~permit); return (auth & permit) ? (~0) : 0; } // Entry Search template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::tryFindTheSeat(keyT key, uint hashed, uint tbIndex, uint posInInt, uint info, keyT* keys) { uint ticketHolder = ticketBoard[tbIndex]; uint permit = 1 << posInInt; // check if bucket is empty if (permit & ticketHolder) { return KEY_NOT_INSIDE_TABLE; } else { // get and compare info from ticket uint retrievedInfo = extractInfo(ticketHolder, posInInt + (TICKET_SIZE - NUM_INFO_BITS)); if (info != retrievedInfo) return 0; // compare keys return (keys[hashed] == key) ? (~0) : 0; } } // Inserts info into the ticket board template<typename keyT, typename valueT> __device__ void StadiumHash<keyT, valueT>::insertTicketInfo(uint info, uint tbIndex, uint posInInt) { uint prepTicket = prepareTicket(info, posInInt + 1); atomicAnd((uint*) (ticketBoard + tbIndex), prepTicket); } // Constructor template<typename keyT, typename valueT> StadiumHash<keyT, valueT>::StadiumHash(uint tableSize, uint ticketSize) { if ((sizeof(uint) * CHAR_BIT) % ticketSize != 0 || ticketSize == 1) { throw std::runtime_error("Ticket size for stadium hash has to be larger than 1 and devide 32 evenly!\n"); } this->tableSize = roundToNextPrime(tableSize); /* NOTE This version of stadium hashing doesn't use a configurable ticket size. Tests have shown that accessing the ticket size as a variable is slower than using a fixed defined ticket size. If flexibility is more important than performance, these existing variables can be used: this->ticketSize = ticketSize; this->numInfoBits = ticketSize - 1; this->numPosBits = log2((sizeof(uint) * CHAR_BIT) / ticketSize); */ } // Destructor template<typename keyT, typename valueT> StadiumHash<keyT, valueT>::~StadiumHash() { } // Inserts a given key value pair into the table template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::insert(keyT key, valueT value, keyT* keys, valueT* values) { // Initial Hash uint hashed = initHash(key); // Get ticket board index and the position in the integer from hashed value uint tbIndex = getTbIndex(hashed); uint posInInt = getPosInInt(hashed); uint tryCounter = 0; uint gotSeat; // loop until a bucket is succesfully reserved do { // try to reserve a free bucket gotSeat = tryBookASeat(tbIndex, posInInt); // check if bucket was already reserved if (!gotSeat) { // rehash the key with the second hash function hashed = rehash(hashed, key); // Get ticket board index and the position in the integer from hashed value tbIndex = getTbIndex(hashed); posInInt = getPosInInt(hashed); } // if the number of tries is bigger than the table size, the table is already full assert(++tryCounter < tableSize); //throw std::runtime_error("INSERT FAILED - Table is full!"); } while (!gotSeat); // Generate Info from key uint info = infoHash(key); // Insert info into the reserved bucket insertTicketInfo(info, tbIndex, posInInt); // Insert key value pair into the table keys[hashed] = key; values[hashed] = value; return hashed; } // searches fo a key in the table template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::find(keyT key, keyT* keys) { // Intial Hash uint hashed = initHash(key); // Generate Info from key uint info = infoHash(key); uint tryCounter = 0; uint seatFound; // loop until the entry with the searched key is found do { uint tbIndex = getTbIndex(hashed); uint posInInt = getPosInInt(hashed); // search for the key in the ticket board and table seatFound = tryFindTheSeat(key, hashed, tbIndex, posInInt, info, keys); // if no entry entry was found, rehash the key if (!seatFound) hashed = rehash(hashed, key); // if the number of tries is bigger than the table size, the key isn't included in the table if (++tryCounter == tableSize) seatFound = KEY_NOT_INSIDE_TABLE; } while (!seatFound && seatFound != KEY_NOT_INSIDE_TABLE); if (seatFound == KEY_NOT_INSIDE_TABLE) { // if key wasn't found in the table return tableSize as index hashed = tableSize; } return hashed; } template class StadiumHash<uint32_t, uint32_t>; template class StadiumHash<uint64_t, uint32_t>;
ea8d9c2d2ba51f548221ab79b56c18f60d601d04.cu
#include "StadiumHash.cuh" #define TICKET_SIZE 4 #define NUM_POS_BITS 3 // log(32/TICKET_SIZE) #define NUM_INFO_BITS 3 // TICKET_SIZE - NUM_POS_BITS // Generic Hash Function template<typename keyT, typename valueT> template <uint nRounds, uint rShift, uint mulOp> __device__ uint StadiumHash<keyT, valueT>::hash(const keyT key) { keyT x = key; for (uint j = nRounds; j > 0; --j) { x = ((x >> rShift) ^ x) * mulOp + j; } return (uint)x; } // First Hash template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::initHash(keyT key) { uint hashed = hash<sizeof(keyT), 8, 0xE9D58A6B>(key); return __umulhi(hashed, tableSize); } // Second Hash template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::rehash(uint hashed, const keyT key) { uint h_2 = hash<sizeof(keyT), 8, 0x6E5B9D8A>(key); uint dh = hashed + 1 + __umulhi(h_2, tableSize - 1); return (dh >= tableSize) ? (dh - tableSize) : dh; } // Info Hash template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::infoHash(keyT key) { return hash<sizeof(keyT), 8, 0xCA34BE7D>(key) >> (32 - NUM_INFO_BITS); // mulOp was chosen randomly } // Allocation of the ticket board template<typename keyT, typename valueT> __host__ void StadiumHash<keyT, valueT>::allocTicketBoard() { const int ticketBoardSize = tableSize / (32 / TICKET_SIZE) + 1; // Allocate memory on device for ticket board cudaError_t cudaStatus = cudaMalloc((void**)&ticketBoard, ticketBoardSize * sizeof(uint)); checkStatus(cudaStatus); } // Clearing of the ticket board template<typename keyT, typename valueT> __host__ void StadiumHash<keyT, valueT>::clearTicketBoard() { const int ticketBoardSize = tableSize / (32 / TICKET_SIZE) + 1; // clear ticket board by filling all tickets with 1s cudaError_t cudaStatus = cudaMemset((void*)ticketBoard, 0xFF, ticketBoardSize * sizeof(uint)); checkStatus(cudaStatus); } // Deletion of the ticket board template<typename keyT, typename valueT> __host__ void StadiumHash<keyT, valueT>::freeTicketBoard() { cudaFree((uint*) ticketBoard); } // Creates a mask containing info starting at infoStart and otherwise 1s template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::prepareTicket(uint info, uint infoStart) { uint mask = (1 << NUM_INFO_BITS) - 1; mask = ~(mask << infoStart); uint result = mask | (info << infoStart); return result; } // Extracts the info from a ticket template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::extractInfo(uint ticket, uint infoStart) { uint mask = (1 << NUM_INFO_BITS) - 1; mask = mask << infoStart; uint result = (mask & ticket) >> infoStart; return result; } // Calculates Ticket Board Index from the hashed key template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::getTbIndex(uint hashed) { return hashed >> NUM_POS_BITS; } // Calculates the Position of the ticket in the Ticket Board Entry from the hashed key template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::getPosInInt(uint hashed) { uint mask = (1 << NUM_POS_BITS) - 1; return (hashed & mask) << (5 - NUM_POS_BITS); } // Entry Reservation template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::tryBookASeat(uint tbIndex, uint posInInt) { uint permit = ((1 << (TICKET_SIZE - NUM_INFO_BITS)) - 1) << posInInt; uint auth = atomicAnd((uint*) (ticketBoard + tbIndex), ~permit); return (auth & permit) ? (~0) : 0; } // Entry Search template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::tryFindTheSeat(keyT key, uint hashed, uint tbIndex, uint posInInt, uint info, keyT* keys) { uint ticketHolder = ticketBoard[tbIndex]; uint permit = 1 << posInInt; // check if bucket is empty if (permit & ticketHolder) { return KEY_NOT_INSIDE_TABLE; } else { // get and compare info from ticket uint retrievedInfo = extractInfo(ticketHolder, posInInt + (TICKET_SIZE - NUM_INFO_BITS)); if (info != retrievedInfo) return 0; // compare keys return (keys[hashed] == key) ? (~0) : 0; } } // Inserts info into the ticket board template<typename keyT, typename valueT> __device__ void StadiumHash<keyT, valueT>::insertTicketInfo(uint info, uint tbIndex, uint posInInt) { uint prepTicket = prepareTicket(info, posInInt + 1); atomicAnd((uint*) (ticketBoard + tbIndex), prepTicket); } // Constructor template<typename keyT, typename valueT> StadiumHash<keyT, valueT>::StadiumHash(uint tableSize, uint ticketSize) { if ((sizeof(uint) * CHAR_BIT) % ticketSize != 0 || ticketSize == 1) { throw std::runtime_error("Ticket size for stadium hash has to be larger than 1 and devide 32 evenly!\n"); } this->tableSize = roundToNextPrime(tableSize); /* NOTE This version of stadium hashing doesn't use a configurable ticket size. Tests have shown that accessing the ticket size as a variable is slower than using a fixed defined ticket size. If flexibility is more important than performance, these existing variables can be used: this->ticketSize = ticketSize; this->numInfoBits = ticketSize - 1; this->numPosBits = log2((sizeof(uint) * CHAR_BIT) / ticketSize); */ } // Destructor template<typename keyT, typename valueT> StadiumHash<keyT, valueT>::~StadiumHash() { } // Inserts a given key value pair into the table template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::insert(keyT key, valueT value, keyT* keys, valueT* values) { // Initial Hash uint hashed = initHash(key); // Get ticket board index and the position in the integer from hashed value uint tbIndex = getTbIndex(hashed); uint posInInt = getPosInInt(hashed); uint tryCounter = 0; uint gotSeat; // loop until a bucket is succesfully reserved do { // try to reserve a free bucket gotSeat = tryBookASeat(tbIndex, posInInt); // check if bucket was already reserved if (!gotSeat) { // rehash the key with the second hash function hashed = rehash(hashed, key); // Get ticket board index and the position in the integer from hashed value tbIndex = getTbIndex(hashed); posInInt = getPosInInt(hashed); } // if the number of tries is bigger than the table size, the table is already full assert(++tryCounter < tableSize); //throw std::runtime_error("INSERT FAILED - Table is full!"); } while (!gotSeat); // Generate Info from key uint info = infoHash(key); // Insert info into the reserved bucket insertTicketInfo(info, tbIndex, posInInt); // Insert key value pair into the table keys[hashed] = key; values[hashed] = value; return hashed; } // searches fo a key in the table template<typename keyT, typename valueT> __device__ uint StadiumHash<keyT, valueT>::find(keyT key, keyT* keys) { // Intial Hash uint hashed = initHash(key); // Generate Info from key uint info = infoHash(key); uint tryCounter = 0; uint seatFound; // loop until the entry with the searched key is found do { uint tbIndex = getTbIndex(hashed); uint posInInt = getPosInInt(hashed); // search for the key in the ticket board and table seatFound = tryFindTheSeat(key, hashed, tbIndex, posInInt, info, keys); // if no entry entry was found, rehash the key if (!seatFound) hashed = rehash(hashed, key); // if the number of tries is bigger than the table size, the key isn't included in the table if (++tryCounter == tableSize) seatFound = KEY_NOT_INSIDE_TABLE; } while (!seatFound && seatFound != KEY_NOT_INSIDE_TABLE); if (seatFound == KEY_NOT_INSIDE_TABLE) { // if key wasn't found in the table return tableSize as index hashed = tableSize; } return hashed; } template class StadiumHash<uint32_t, uint32_t>; template class StadiumHash<uint64_t, uint32_t>;
e4a021b164e65e82148e54fde1057e26f58a143a.hip
// !!! This is a file automatically generated by hipify!!! #include "csr2dense_cuda.cuh" #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <hipsparse.h> #include <torch/extension.h> #define CHECK_CUDA(x) AT_ASSERTM(x.device().is_cuda(), #x " must be CUDA tensor") #define CHECK_INPUT(x) AT_ASSERTM(x, "Input mismatch") #define DISPATCH_CSR2DENSE_TYPES(TYPE, ...) \ [&] { \ switch (TYPE) { \ case torch::ScalarType::Float: { \ using scalar_t = float; \ const auto &cusparseXcsr2dense = hipsparseScsr2dense; \ return __VA_ARGS__(); \ } \ case torch::ScalarType::Double: { \ using scalar_t = double; \ const auto &cusparseXcsr2dense = hipsparseDcsr2dense; \ return __VA_ARGS__(); \ } \ default: \ AT_ERROR("Not implemented for '", toString(TYPE), "'"); \ } \ }() torch::Tensor csr2dense_cuda(torch::Tensor rowptr, torch::Tensor col, torch::Tensor val, torch::Tensor out) { CHECK_CUDA(rowptr); CHECK_CUDA(col); CHECK_CUDA(val); CHECK_CUDA(out); int M = out.size(0); int N = out.size(1); int ldA = out.stride(1); auto scalar_type = val.scalar_type(); AT_ASSERTM(rowptr.numel() - 1 == M, "Output size does not match sparse tensor size."); AT_ASSERTM(val.dtype() == out.dtype(), "Input and output data-types do not match"); AT_ASSERTM(out.stride(0) == 1, "Output matrix is not F-contiguous"); auto handle = at::cuda::getCurrentCUDASparseHandle(); hipsparseStatus_t status; c10::hip::HIPGuardMasqueradingAsCUDA g(rowptr.get_device()); // Convert indices to int rowptr = rowptr.toType(torch::kInt); col = col.toType(torch::kInt); // Creates default matrix descriptor (0-based and GENERAL matrix) hipsparseMatDescr_t descr; hipsparseCreateMatDescr(&descr); // Extract data pointers for the sparse matrix indices auto rowptr_data = rowptr.data_ptr<int>(); auto col_data = col.data_ptr<int>(); DISPATCH_CSR2DENSE_TYPES( scalar_type, [&] { // Extract data pointers for the dense matrix indices auto val_data = val.data_ptr<scalar_t>(); auto out_data = out.data_ptr<scalar_t>(); status = cusparseXcsr2dense(handle, M, N, descr, val_data, rowptr_data, col_data, out_data, ldA); if (status != HIPSPARSE_STATUS_SUCCESS) { hipsparseDestroyMatDescr(descr); AT_ERROR("cusparse csr2dense function failed with error code '", status, "'."); } }); hipsparseDestroyMatDescr(descr); return out; }
e4a021b164e65e82148e54fde1057e26f58a143a.cu
#include "csr2dense_cuda.cuh" #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <cusparse.h> #include <torch/extension.h> #define CHECK_CUDA(x) AT_ASSERTM(x.device().is_cuda(), #x " must be CUDA tensor") #define CHECK_INPUT(x) AT_ASSERTM(x, "Input mismatch") #define DISPATCH_CSR2DENSE_TYPES(TYPE, ...) \ [&] { \ switch (TYPE) { \ case torch::ScalarType::Float: { \ using scalar_t = float; \ const auto &cusparseXcsr2dense = cusparseScsr2dense; \ return __VA_ARGS__(); \ } \ case torch::ScalarType::Double: { \ using scalar_t = double; \ const auto &cusparseXcsr2dense = cusparseDcsr2dense; \ return __VA_ARGS__(); \ } \ default: \ AT_ERROR("Not implemented for '", toString(TYPE), "'"); \ } \ }() torch::Tensor csr2dense_cuda(torch::Tensor rowptr, torch::Tensor col, torch::Tensor val, torch::Tensor out) { CHECK_CUDA(rowptr); CHECK_CUDA(col); CHECK_CUDA(val); CHECK_CUDA(out); int M = out.size(0); int N = out.size(1); int ldA = out.stride(1); auto scalar_type = val.scalar_type(); AT_ASSERTM(rowptr.numel() - 1 == M, "Output size does not match sparse tensor size."); AT_ASSERTM(val.dtype() == out.dtype(), "Input and output data-types do not match"); AT_ASSERTM(out.stride(0) == 1, "Output matrix is not F-contiguous"); auto handle = at::cuda::getCurrentCUDASparseHandle(); cusparseStatus_t status; c10::cuda::CUDAGuard g(rowptr.get_device()); // Convert indices to int rowptr = rowptr.toType(torch::kInt); col = col.toType(torch::kInt); // Creates default matrix descriptor (0-based and GENERAL matrix) cusparseMatDescr_t descr; cusparseCreateMatDescr(&descr); // Extract data pointers for the sparse matrix indices auto rowptr_data = rowptr.data_ptr<int>(); auto col_data = col.data_ptr<int>(); DISPATCH_CSR2DENSE_TYPES( scalar_type, [&] { // Extract data pointers for the dense matrix indices auto val_data = val.data_ptr<scalar_t>(); auto out_data = out.data_ptr<scalar_t>(); status = cusparseXcsr2dense(handle, M, N, descr, val_data, rowptr_data, col_data, out_data, ldA); if (status != CUSPARSE_STATUS_SUCCESS) { cusparseDestroyMatDescr(descr); AT_ERROR("cusparse csr2dense function failed with error code '", status, "'."); } }); cusparseDestroyMatDescr(descr); return out; }
f1e498a92e654924f3f4e80e1f16a13ca3400747.hip
// !!! This is a file automatically generated by hipify!!! #include "luaT.h" #include "THH/THH.h" #include <hipfft.h> #include "fft_prod2.cu" #include "fill_hermitian.cu" #include "modulus.cu" #include "complexInterp.cu" #include "bias.cu" #include "crop.cu" #include "prod.cu" #include "graph_pool.cu" #include "fft_prod_module.cu" #include "cufft.cpp" static int prod_fprop_real(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *weight = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); luaL_argcheck(L, input->nDimension == 3, 2, "input should be 3D tensor"); luaL_argcheck(L, output->nDimension == 3, 2, "output should be 3D tensor"); luaL_argcheck(L, weight->nDimension == 3, 2, "kernel should be 3D tensor"); long nMinibatch = input->size[0]; long nOutputMaps = weight->size[0]; long nInputMaps = weight->size[1]; long dim = input->size[2]; // raw pointers float *input_data = (float*)THCudaTensor_data(NULL,input); float *weight_data = (float*)THCudaTensor_data(NULL,weight); float *output_data = (float*)THCudaTensor_data(NULL,output); spectral_prod(input_data, weight_data, output_data, dim, nMinibatch, nInputMaps*dim, nOutputMaps*dim, nInputMaps, dim, dim, nOutputMaps, nInputMaps*dim, dim); return 0; } static int prod_bprop_real(lua_State *L) { THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *weight = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); luaL_argcheck(L, gradInput->nDimension == 3, 2, "gradInput should be 3D tensor"); luaL_argcheck(L, weight->nDimension == 3, 2, "weight should be 3D tensor"); luaL_argcheck(L, gradOutput->nDimension == 3, 2, "gradOutput should be 3D tensor"); long nMinibatch = gradInput->size[0]; long nOutputMaps = weight->size[0]; long nInputMaps = weight->size[1]; long dim = gradInput->size[2]; // raw pointers float *gradOutput_data = (float*)THCudaTensor_data(NULL,gradOutput); float *weight_data = (float*)THCudaTensor_data(NULL,weight); float *gradInput_data = (float*)THCudaTensor_data(NULL,gradInput); spectral_prod(gradOutput_data, weight_data, gradInput_data, dim, nMinibatch, nOutputMaps*dim, nInputMaps*dim, nOutputMaps, dim, dim*nInputMaps, nInputMaps, dim, dim); return 0; } static int prod_accgrad_real(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradWeight = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); luaL_argcheck(L, input->nDimension == 3, 2, "input should be 3D tensor"); luaL_argcheck(L, gradOutput->nDimension == 3, 2, "gradOutput should be 3D tensor"); luaL_argcheck(L, gradWeight->nDimension == 3, 2, "gradWeight should be 3D tensor"); long nMinibatch = input->size[0]; long nOutputMaps = gradWeight->size[0]; long nInputMaps = gradWeight->size[1]; long dim = input->size[2]; // raw pointers float *input_data = (float*)THCudaTensor_data(NULL,input); float *gradOutput_data = (float*)THCudaTensor_data(NULL,gradOutput); float *gradWeight_data = (float*)THCudaTensor_data(NULL,gradWeight); spectral_prod(input_data, gradOutput_data, gradWeight_data, dim, nInputMaps, dim, dim, nMinibatch, nInputMaps*dim, nOutputMaps*dim, nOutputMaps, dim, nInputMaps*dim); return 0; } static int fill_hermitian(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); luaL_argcheck(L, THCudaTensor_isContiguous(NULL,input), 1, "input must be contiguous"); luaL_argcheck(L, THCudaTensor_isContiguous(NULL,output),2, "output must be contiguous"); luaL_argcheck(L, input->nDimension == 5, 2, "input should be 4D complex tensor"); luaL_argcheck(L, output->nDimension == 5, 2, "output should be 4D complex tensor"); luaL_argcheck(L, input->size[3] == output->size[3]/2+1, 2, "input must have N/2+1 columns"); long nMinibatch = input->size[0]; long nInputPlanes = input->size[1]; long nRows = output->size[2]; long nCols = output->size[3]; hipComplex *input_data = (hipComplex*)THCudaTensor_data(NULL,input); hipComplex *output_data = (hipComplex*)THCudaTensor_data(NULL,output); fill_hermitian_call(input_data, output_data, nMinibatch*nInputPlanes,nRows,nCols); return 0; } static const struct luaL_reg libspectralnet_init [] = { {"fft1d_r2c", fft1d_r2c}, {"fft1d_c2r", fft1d_c2r}, {"fft1d_c2c", fft1d_c2c}, {"fft2d_r2c", fft2d_r2c}, {"fft2d_c2r", fft2d_c2r}, {"fft2d_c2c", fft2d_c2c}, {"prod_fprop_real", prod_fprop_real}, {"prod_bprop_real", prod_bprop_real}, {"prod_accgrad_real", prod_accgrad_real}, {"prod_fprop_complex", prod_fprop_complex}, {"prod_bprop_complex", prod_bprop_complex}, {"prod_accgrad_complex",prod_accgrad_complex}, {"fill_hermitian",fill_hermitian}, {"modulus_updateGradInput",modulus_updateGradInput}, {"complexInterp_interpolate",complexInterp_interpolate}, {"bias_updateOutput", bias_updateOutput}, {"bias_accGradParameters", bias_accGradParameters}, {"crop_zeroborders",crop_zeroborders}, {"graph_pool_fprop", graph_pool_fprop}, {"graph_pool_bprop", graph_pool_bprop}, {NULL, NULL} }; LUA_EXTERNC int luaopen_libspectralnet(lua_State *L) { luaL_openlib(L,"libspectralnet",libspectralnet_init,0); return 1; }
f1e498a92e654924f3f4e80e1f16a13ca3400747.cu
#include "luaT.h" #include "THC/THC.h" #include <cufft.h> #include "fft_prod2.cu" #include "fill_hermitian.cu" #include "modulus.cu" #include "complexInterp.cu" #include "bias.cu" #include "crop.cu" #include "prod.cu" #include "graph_pool.cu" #include "fft_prod_module.cu" #include "cufft.cpp" static int prod_fprop_real(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *weight = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); luaL_argcheck(L, input->nDimension == 3, 2, "input should be 3D tensor"); luaL_argcheck(L, output->nDimension == 3, 2, "output should be 3D tensor"); luaL_argcheck(L, weight->nDimension == 3, 2, "kernel should be 3D tensor"); long nMinibatch = input->size[0]; long nOutputMaps = weight->size[0]; long nInputMaps = weight->size[1]; long dim = input->size[2]; // raw pointers float *input_data = (float*)THCudaTensor_data(NULL,input); float *weight_data = (float*)THCudaTensor_data(NULL,weight); float *output_data = (float*)THCudaTensor_data(NULL,output); spectral_prod(input_data, weight_data, output_data, dim, nMinibatch, nInputMaps*dim, nOutputMaps*dim, nInputMaps, dim, dim, nOutputMaps, nInputMaps*dim, dim); return 0; } static int prod_bprop_real(lua_State *L) { THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *weight = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); luaL_argcheck(L, gradInput->nDimension == 3, 2, "gradInput should be 3D tensor"); luaL_argcheck(L, weight->nDimension == 3, 2, "weight should be 3D tensor"); luaL_argcheck(L, gradOutput->nDimension == 3, 2, "gradOutput should be 3D tensor"); long nMinibatch = gradInput->size[0]; long nOutputMaps = weight->size[0]; long nInputMaps = weight->size[1]; long dim = gradInput->size[2]; // raw pointers float *gradOutput_data = (float*)THCudaTensor_data(NULL,gradOutput); float *weight_data = (float*)THCudaTensor_data(NULL,weight); float *gradInput_data = (float*)THCudaTensor_data(NULL,gradInput); spectral_prod(gradOutput_data, weight_data, gradInput_data, dim, nMinibatch, nOutputMaps*dim, nInputMaps*dim, nOutputMaps, dim, dim*nInputMaps, nInputMaps, dim, dim); return 0; } static int prod_accgrad_real(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradWeight = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); luaL_argcheck(L, input->nDimension == 3, 2, "input should be 3D tensor"); luaL_argcheck(L, gradOutput->nDimension == 3, 2, "gradOutput should be 3D tensor"); luaL_argcheck(L, gradWeight->nDimension == 3, 2, "gradWeight should be 3D tensor"); long nMinibatch = input->size[0]; long nOutputMaps = gradWeight->size[0]; long nInputMaps = gradWeight->size[1]; long dim = input->size[2]; // raw pointers float *input_data = (float*)THCudaTensor_data(NULL,input); float *gradOutput_data = (float*)THCudaTensor_data(NULL,gradOutput); float *gradWeight_data = (float*)THCudaTensor_data(NULL,gradWeight); spectral_prod(input_data, gradOutput_data, gradWeight_data, dim, nInputMaps, dim, dim, nMinibatch, nInputMaps*dim, nOutputMaps*dim, nOutputMaps, dim, nInputMaps*dim); return 0; } static int fill_hermitian(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); luaL_argcheck(L, THCudaTensor_isContiguous(NULL,input), 1, "input must be contiguous"); luaL_argcheck(L, THCudaTensor_isContiguous(NULL,output),2, "output must be contiguous"); luaL_argcheck(L, input->nDimension == 5, 2, "input should be 4D complex tensor"); luaL_argcheck(L, output->nDimension == 5, 2, "output should be 4D complex tensor"); luaL_argcheck(L, input->size[3] == output->size[3]/2+1, 2, "input must have N/2+1 columns"); long nMinibatch = input->size[0]; long nInputPlanes = input->size[1]; long nRows = output->size[2]; long nCols = output->size[3]; cuComplex *input_data = (cuComplex*)THCudaTensor_data(NULL,input); cuComplex *output_data = (cuComplex*)THCudaTensor_data(NULL,output); fill_hermitian_call(input_data, output_data, nMinibatch*nInputPlanes,nRows,nCols); return 0; } static const struct luaL_reg libspectralnet_init [] = { {"fft1d_r2c", fft1d_r2c}, {"fft1d_c2r", fft1d_c2r}, {"fft1d_c2c", fft1d_c2c}, {"fft2d_r2c", fft2d_r2c}, {"fft2d_c2r", fft2d_c2r}, {"fft2d_c2c", fft2d_c2c}, {"prod_fprop_real", prod_fprop_real}, {"prod_bprop_real", prod_bprop_real}, {"prod_accgrad_real", prod_accgrad_real}, {"prod_fprop_complex", prod_fprop_complex}, {"prod_bprop_complex", prod_bprop_complex}, {"prod_accgrad_complex",prod_accgrad_complex}, {"fill_hermitian",fill_hermitian}, {"modulus_updateGradInput",modulus_updateGradInput}, {"complexInterp_interpolate",complexInterp_interpolate}, {"bias_updateOutput", bias_updateOutput}, {"bias_accGradParameters", bias_accGradParameters}, {"crop_zeroborders",crop_zeroborders}, {"graph_pool_fprop", graph_pool_fprop}, {"graph_pool_bprop", graph_pool_bprop}, {NULL, NULL} }; LUA_EXTERNC int luaopen_libspectralnet(lua_State *L) { luaL_openlib(L,"libspectralnet",libspectralnet_init,0); return 1; }
e78898cf4d4065ac0d69ab4f8f1d0fa4c6fbee08.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2021 Xiaomi Corporation (authors: Fangjun Kuang) * * See LICENSE for clarification regarding multiple authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c10/hip/HIPFunctions.h" #include "gtest/gtest.h" #include "k2/csrc/test_utils.h" // #include "k2/csrc/array.h" #include "k2/csrc/device_guard.h" #include "k2/csrc/pytorch_context.h" namespace k2 { // Use a separate function because there is a lambda function inside K2_EVAL(). static void TestImpl() { K2_LOG(INFO) << "Number of devices: " << c10::hip::device_count(); // Set the default device to 1 c10::hip::set_device(1); EXPECT_EQ(c10::hip::current_device(), 1); ContextPtr c = GetCudaContext(0); EXPECT_EQ(c->GetDeviceId(), 0); // the default device should still be 1 EXPECT_EQ(c10::hip::current_device(), 1); Array1<int32_t> a(c, "[1 2]"); EXPECT_EQ(a.Context()->GetDeviceId(), 0); // b uses the default device, which is 1 Array1<int32_t> b(GetCudaContext(), "[10 20]"); EXPECT_EQ(b.Context()->GetDeviceId(), 1); int32_t *a_data = a.Data(); int32_t *b_data = b.Data(); { DeviceGuard guard(0); // a is on device 0 K2_EVAL( a.Context(), 2, set_a, (int32_t i)->void { a_data[i] += 1; }); CheckArrayData(a, {2, 3}); } { DeviceGuard guard(1); // b is on device 1 K2_EVAL( b.Context(), 2, set_b, (int32_t i)->void { b_data[i] += 2; }); CheckArrayData(b, {12, 22}); } } TEST(PyTorchContext, GetCudaContext) { // skip this test is CUDA is not available if (!torch::cuda::is_available()) return; // skip it if there are less than two CUDA GPUs. if (c10::hip::device_count() < 2) return; TestImpl(); } } // namespace k2
e78898cf4d4065ac0d69ab4f8f1d0fa4c6fbee08.cu
/** * Copyright 2021 Xiaomi Corporation (authors: Fangjun Kuang) * * See LICENSE for clarification regarding multiple authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c10/cuda/CUDAFunctions.h" #include "gtest/gtest.h" #include "k2/csrc/test_utils.h" // #include "k2/csrc/array.h" #include "k2/csrc/device_guard.h" #include "k2/csrc/pytorch_context.h" namespace k2 { // Use a separate function because there is a lambda function inside K2_EVAL(). static void TestImpl() { K2_LOG(INFO) << "Number of devices: " << c10::cuda::device_count(); // Set the default device to 1 c10::cuda::set_device(1); EXPECT_EQ(c10::cuda::current_device(), 1); ContextPtr c = GetCudaContext(0); EXPECT_EQ(c->GetDeviceId(), 0); // the default device should still be 1 EXPECT_EQ(c10::cuda::current_device(), 1); Array1<int32_t> a(c, "[1 2]"); EXPECT_EQ(a.Context()->GetDeviceId(), 0); // b uses the default device, which is 1 Array1<int32_t> b(GetCudaContext(), "[10 20]"); EXPECT_EQ(b.Context()->GetDeviceId(), 1); int32_t *a_data = a.Data(); int32_t *b_data = b.Data(); { DeviceGuard guard(0); // a is on device 0 K2_EVAL( a.Context(), 2, set_a, (int32_t i)->void { a_data[i] += 1; }); CheckArrayData(a, {2, 3}); } { DeviceGuard guard(1); // b is on device 1 K2_EVAL( b.Context(), 2, set_b, (int32_t i)->void { b_data[i] += 2; }); CheckArrayData(b, {12, 22}); } } TEST(PyTorchContext, GetCudaContext) { // skip this test is CUDA is not available if (!torch::cuda::is_available()) return; // skip it if there are less than two CUDA GPUs. if (c10::cuda::device_count() < 2) return; TestImpl(); } } // namespace k2
c2364e1da5f076de0d25e5b7dcbedbfe20b8bb93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // clang-format off {% set wdesc = "weighted" if weighted else "unweighted" %} #include "codegen/embedding_forward_template_helpers.cuh" using namespace fbgemm_gpu; using Tensor = at::Tensor; namespace nbit { /* Looping over the weight types is requires to generate all the C++ template declarations (not definitions) that will be invoked by the function `Tensor int_nbit_split_embedding*_codegen_forward_*_cuda(...)` later in the same generated source file. */ {% for emb_weight_type in ["FP32", "FP16", "FP8", "INT8", "INT4", "INT2"] %} template<typename index_t, typename output_t, size_t OutputRowsPerThread, size_t WarpsPerBlock, size_t InputRowsInFlight, size_t MinNum128BRows, size_t MaxNum128BRows, bool DeviceOnly> __launch_bounds__(WarpsPerBlock * kWarpSize) __global__ void {{ type_map[emb_weight_type].enum_name }}_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }}_kernel_small_L( const at::PackedTensorAccessor64<uint8_t, 1, at::RestrictPtrTraits> dev_weights, const at::PackedTensorAccessor64<uint8_t, 1, at::RestrictPtrTraits> uvm_weights, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> weights_placements, const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets, const at::PackedTensorAccessor32<uint8_t, 1, at::RestrictPtrTraits> weights_tys, {% if not nobag %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets, {% else %} const int64_t D, {% endif %} FixedDivisor fd_B, // FixedDivisor(div_round_up(B, OutputRowsPerThread)) const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> indices, const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> offsets, {% if not nobag %} const int64_t pooling_mode, {% endif %} const int64_t row_alignment, {% if weighted %} at::PackedTensorAccessor32<float, 1, at::RestrictPtrTraits> indice_weights, {% endif %} {% if type_map[emb_weight_type].enum_name == "FP8" %} const int exponent_bits, const int exponent_bias, {% endif %} at::PackedTensorAccessor32<output_t, 2, at::RestrictPtrTraits> output, // [B][total_D], const at::PackedTensorAccessor64<uint8_t, 2, at::RestrictPtrTraits> lxu_cache_weights, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> lxu_cache_locations ); {% endfor %} // for emb_weight_type in ["FP32", "FP16", "FP8", "INT8", "INT4", "INT2"] } Tensor int_nbit_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }}_cuda( Tensor dev_weights, Tensor uvm_weights, Tensor weights_placements, Tensor weights_offsets, Tensor weights_tys, {% if not nobag %} Tensor D_offsets, const int64_t total_D, {% else %} const int64_t D, {% endif %} const int64_t max_int2_D, const int64_t max_int4_D, const int64_t max_int8_D, const int64_t max_float16_D, const int64_t max_float32_D, Tensor indices, Tensor offsets, {% if not nobag %} const int64_t pooling_mode, {% endif %} const int64_t row_alignment, {% if weighted %} Tensor indice_weights, {% endif %} const int64_t output_dtype, Tensor lxu_cache_weights, Tensor lxu_cache_locations, const int64_t max_float8_D, const int64_t fp8_exponent_bits, const int64_t fp8_exponent_bias ) { TENSOR_ON_CUDA_GPU(dev_weights); TENSORS_ON_SAME_DEVICE(uvm_weights, dev_weights); TENSORS_ON_SAME_DEVICE(weights_placements, dev_weights); TENSORS_ON_SAME_DEVICE(weights_offsets, dev_weights); TENSORS_ON_SAME_DEVICE(weights_tys, dev_weights); {% if not nobag %} TENSORS_ON_SAME_DEVICE(D_offsets, dev_weights); {% endif %} TENSORS_ON_SAME_DEVICE(indices, dev_weights); TENSORS_ON_SAME_DEVICE(offsets, dev_weights); {% if weighted %} TENSORS_EMPTY_OR_ON_SAME_DEVICE(indice_weights, dev_weights); {% endif %} TENSORS_EMPTY_OR_ON_SAME_DEVICE(lxu_cache_weights, dev_weights); TENSORS_EMPTY_OR_ON_SAME_DEVICE(lxu_cache_locations, dev_weights); at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(dev_weights.get_device()); // kernels assume indices are contiguous. indices = indices.contiguous(); {% if not nobag %} const int32_t T = D_offsets.numel() - 1; {% else %} const int32_t total_L = indices.numel(); const int32_t T = weights_offsets.numel(); {% endif %} TORCH_CHECK(T > 0); // offsets = [B x T + 1] const int32_t B = (offsets.size(0) - 1) / T; TORCH_CHECK(B >= 0); {% if not nobag %} TORCH_CHECK(total_D > 0); {% else %} TORCH_CHECK(D > 0); {% endif %} Tensor output; const int kINT8QparamsBytes = 8; SparseType o_dtype = static_cast<SparseType>(output_dtype); TORCH_CHECK(o_dtype == SparseType::FP32 || o_dtype == SparseType::FP16 || o_dtype == SparseType::BF16 || o_dtype == SparseType::INT8); {% if not nobag %} int64_t total_adjusted_D = total_D; if (o_dtype == SparseType::INT8) { total_adjusted_D += T * kINT8QparamsBytes; } if (indices.numel() == 0) { output = at::zeros({B, total_adjusted_D}, dev_weights.options().dtype(getScalarType(o_dtype))); } else { output = at::empty({B, total_adjusted_D}, dev_weights.options().dtype(getScalarType(o_dtype))); } {% else %} int64_t adjusted_D = D; if (o_dtype == SparseType::INT8) { adjusted_D += T * kINT8QparamsBytes; } if (total_L == 0) { output = at::zeros({total_L, adjusted_D}, dev_weights.options().dtype(getScalarType(o_dtype))); } else { output = at::empty({total_L, adjusted_D}, dev_weights.options().dtype(getScalarType(o_dtype))); } {% endif %} if (B == 0 || indices.numel() == 0) { return output; } using index_t = int32_t; constexpr int32_t kWarpsPerBlock = 4; const auto device_only = lxu_cache_weights.numel() == 0 && uvm_weights.numel() == 0; #define Y(...) \ if (device_only) { \ X(true, __VA_ARGS__) \ } else { \ X(false, __VA_ARGS__) \ }; // launch 2-bit kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::INT2_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }hipLaunchKernelGGL((}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly>), \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "int2_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_int2_D > 0) { auto max_int2_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_int2_D, SparseType::INT2, row_alignment), 128); TORCH_CHECK(max_int2_128b_rows <= 4); if (max_int2_128b_rows > 0) { Y(2, 16, 0, 1); } if (max_int2_128b_rows > 1) { Y(2, 8, 1, 2); } if (max_int2_128b_rows > 2) { Y(2, 8, 2, 4); } } })); #undef X // launch 4-bit kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::INT4_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }hipLaunchKernelGGL((}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly>), \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "int4_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_int4_D > 0) { auto max_int4_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_int4_D, SparseType::INT4, row_alignment), 128); TORCH_CHECK(max_int4_128b_rows <= 8); if (max_int4_128b_rows > 0) { Y(4, 8, 0, 1); } if (max_int4_128b_rows > 1) { Y(2, 8, 1, 2); } if (max_int4_128b_rows > 2) { Y(1, 4, 2, 4); } if (max_int4_128b_rows > 4) { Y(1, 4, 4, 8); } } })); #undef X // launch 8-bit int kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::INT8_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }hipLaunchKernelGGL((}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly>), \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "int8_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_int8_D > 0) { auto max_int8_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_int8_D, SparseType::INT8, row_alignment), 128); TORCH_CHECK(max_int8_128b_rows <= 16); if (max_int8_128b_rows > 0) { Y(2, 8, 0, 1); } if (max_int8_128b_rows > 1) { Y(2, 4, 1, 2); } if (max_int8_128b_rows > 2) { Y(2, 4, 2, 4); } if (max_int8_128b_rows > 4) { Y(2, 4, 4, 8); } if (max_int8_128b_rows > 8) { Y(2, 2, 8, 16); } } })); #undef X // launch 8-bit float kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::FP8_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }hipLaunchKernelGGL((}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly>), \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ fp8_exponent_bits, \ fp8_exponent_bias, \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "fp8_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_float8_D > 0) { auto max_fp8_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_float8_D, SparseType::FP8, row_alignment), 128); TORCH_CHECK(max_fp8_128b_rows <= 16); if (max_fp8_128b_rows > 0) { Y(2, 8, 0, 1); } if (max_fp8_128b_rows > 1) { Y(2, 4, 1, 2); } if (max_fp8_128b_rows > 2) { Y(2, 4, 2, 4); } if (max_fp8_128b_rows > 4) { Y(2, 4, 4, 8); } if (max_fp8_128b_rows > 8) { Y(2, 2, 4, 8); } } })); #undef X // launch 16-bit kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::FP16_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }hipLaunchKernelGGL((}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly>), \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "fp16_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_float16_D > 0) { auto max_fp16_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_float16_D, SparseType::FP16, row_alignment), 128); TORCH_CHECK(max_fp16_128b_rows <= 32); if (max_fp16_128b_rows > 0) { Y(2, 8, 0, 2); } if (max_fp16_128b_rows > 2) { Y(2, 8, 2, 4); } if (max_fp16_128b_rows > 4) { Y(2, 4, 4, 8); } if (max_fp16_128b_rows > 8) { Y(2, 2, 8, 16); } if (max_fp16_128b_rows > 16) { Y(2, 1, 16, 32); } } })); #undef X // launch 32-bit kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::FP32_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }hipLaunchKernelGGL((}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly>), \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "fp32_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_float32_D > 0) { auto max_fp32_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_float32_D, SparseType::FP32, row_alignment), 128); TORCH_CHECK(max_fp32_128b_rows <= 64); if (max_fp32_128b_rows > 0) { Y(2, 4, 0, 4); } if (max_fp32_128b_rows > 4) { Y(2, 2, 4, 16); } if (max_fp32_128b_rows > 16) { Y(1, 1, 16, 32); } if (max_fp32_128b_rows > 32) { Y(1, 1, 32, 64); } } })); #undef X return output; } // clang-format on
c2364e1da5f076de0d25e5b7dcbedbfe20b8bb93.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // clang-format off {% set wdesc = "weighted" if weighted else "unweighted" %} #include "codegen/embedding_forward_template_helpers.cuh" using namespace fbgemm_gpu; using Tensor = at::Tensor; namespace nbit { /* Looping over the weight types is requires to generate all the C++ template declarations (not definitions) that will be invoked by the function `Tensor int_nbit_split_embedding*_codegen_forward_*_cuda(...)` later in the same generated source file. */ {% for emb_weight_type in ["FP32", "FP16", "FP8", "INT8", "INT4", "INT2"] %} template<typename index_t, typename output_t, size_t OutputRowsPerThread, size_t WarpsPerBlock, size_t InputRowsInFlight, size_t MinNum128BRows, size_t MaxNum128BRows, bool DeviceOnly> __launch_bounds__(WarpsPerBlock * kWarpSize) __global__ void {{ type_map[emb_weight_type].enum_name }}_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }}_kernel_small_L( const at::PackedTensorAccessor64<uint8_t, 1, at::RestrictPtrTraits> dev_weights, const at::PackedTensorAccessor64<uint8_t, 1, at::RestrictPtrTraits> uvm_weights, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> weights_placements, const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets, const at::PackedTensorAccessor32<uint8_t, 1, at::RestrictPtrTraits> weights_tys, {% if not nobag %} const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets, {% else %} const int64_t D, {% endif %} FixedDivisor fd_B, // FixedDivisor(div_round_up(B, OutputRowsPerThread)) const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> indices, const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> offsets, {% if not nobag %} const int64_t pooling_mode, {% endif %} const int64_t row_alignment, {% if weighted %} at::PackedTensorAccessor32<float, 1, at::RestrictPtrTraits> indice_weights, {% endif %} {% if type_map[emb_weight_type].enum_name == "FP8" %} const int exponent_bits, const int exponent_bias, {% endif %} at::PackedTensorAccessor32<output_t, 2, at::RestrictPtrTraits> output, // [B][total_D], const at::PackedTensorAccessor64<uint8_t, 2, at::RestrictPtrTraits> lxu_cache_weights, const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> lxu_cache_locations ); {% endfor %} // for emb_weight_type in ["FP32", "FP16", "FP8", "INT8", "INT4", "INT2"] } Tensor int_nbit_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }}_cuda( Tensor dev_weights, Tensor uvm_weights, Tensor weights_placements, Tensor weights_offsets, Tensor weights_tys, {% if not nobag %} Tensor D_offsets, const int64_t total_D, {% else %} const int64_t D, {% endif %} const int64_t max_int2_D, const int64_t max_int4_D, const int64_t max_int8_D, const int64_t max_float16_D, const int64_t max_float32_D, Tensor indices, Tensor offsets, {% if not nobag %} const int64_t pooling_mode, {% endif %} const int64_t row_alignment, {% if weighted %} Tensor indice_weights, {% endif %} const int64_t output_dtype, Tensor lxu_cache_weights, Tensor lxu_cache_locations, const int64_t max_float8_D, const int64_t fp8_exponent_bits, const int64_t fp8_exponent_bias ) { TENSOR_ON_CUDA_GPU(dev_weights); TENSORS_ON_SAME_DEVICE(uvm_weights, dev_weights); TENSORS_ON_SAME_DEVICE(weights_placements, dev_weights); TENSORS_ON_SAME_DEVICE(weights_offsets, dev_weights); TENSORS_ON_SAME_DEVICE(weights_tys, dev_weights); {% if not nobag %} TENSORS_ON_SAME_DEVICE(D_offsets, dev_weights); {% endif %} TENSORS_ON_SAME_DEVICE(indices, dev_weights); TENSORS_ON_SAME_DEVICE(offsets, dev_weights); {% if weighted %} TENSORS_EMPTY_OR_ON_SAME_DEVICE(indice_weights, dev_weights); {% endif %} TENSORS_EMPTY_OR_ON_SAME_DEVICE(lxu_cache_weights, dev_weights); TENSORS_EMPTY_OR_ON_SAME_DEVICE(lxu_cache_locations, dev_weights); at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(dev_weights.get_device()); // kernels assume indices are contiguous. indices = indices.contiguous(); {% if not nobag %} const int32_t T = D_offsets.numel() - 1; {% else %} const int32_t total_L = indices.numel(); const int32_t T = weights_offsets.numel(); {% endif %} TORCH_CHECK(T > 0); // offsets = [B x T + 1] const int32_t B = (offsets.size(0) - 1) / T; TORCH_CHECK(B >= 0); {% if not nobag %} TORCH_CHECK(total_D > 0); {% else %} TORCH_CHECK(D > 0); {% endif %} Tensor output; const int kINT8QparamsBytes = 8; SparseType o_dtype = static_cast<SparseType>(output_dtype); TORCH_CHECK(o_dtype == SparseType::FP32 || o_dtype == SparseType::FP16 || o_dtype == SparseType::BF16 || o_dtype == SparseType::INT8); {% if not nobag %} int64_t total_adjusted_D = total_D; if (o_dtype == SparseType::INT8) { total_adjusted_D += T * kINT8QparamsBytes; } if (indices.numel() == 0) { output = at::zeros({B, total_adjusted_D}, dev_weights.options().dtype(getScalarType(o_dtype))); } else { output = at::empty({B, total_adjusted_D}, dev_weights.options().dtype(getScalarType(o_dtype))); } {% else %} int64_t adjusted_D = D; if (o_dtype == SparseType::INT8) { adjusted_D += T * kINT8QparamsBytes; } if (total_L == 0) { output = at::zeros({total_L, adjusted_D}, dev_weights.options().dtype(getScalarType(o_dtype))); } else { output = at::empty({total_L, adjusted_D}, dev_weights.options().dtype(getScalarType(o_dtype))); } {% endif %} if (B == 0 || indices.numel() == 0) { return output; } using index_t = int32_t; constexpr int32_t kWarpsPerBlock = 4; const auto device_only = lxu_cache_weights.numel() == 0 && uvm_weights.numel() == 0; #define Y(...) \ if (device_only) { \ X(true, __VA_ARGS__) \ } else { \ X(false, __VA_ARGS__) \ }; // launch 2-bit kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::INT2_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly><<< \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::cuda::getCurrentCUDAStream()>>>( \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "int2_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_int2_D > 0) { auto max_int2_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_int2_D, SparseType::INT2, row_alignment), 128); TORCH_CHECK(max_int2_128b_rows <= 4); if (max_int2_128b_rows > 0) { Y(2, 16, 0, 1); } if (max_int2_128b_rows > 1) { Y(2, 8, 1, 2); } if (max_int2_128b_rows > 2) { Y(2, 8, 2, 4); } } })); #undef X // launch 4-bit kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::INT4_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly><<< \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::cuda::getCurrentCUDAStream()>>>( \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "int4_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_int4_D > 0) { auto max_int4_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_int4_D, SparseType::INT4, row_alignment), 128); TORCH_CHECK(max_int4_128b_rows <= 8); if (max_int4_128b_rows > 0) { Y(4, 8, 0, 1); } if (max_int4_128b_rows > 1) { Y(2, 8, 1, 2); } if (max_int4_128b_rows > 2) { Y(1, 4, 2, 4); } if (max_int4_128b_rows > 4) { Y(1, 4, 4, 8); } } })); #undef X // launch 8-bit int kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::INT8_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly><<< \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::cuda::getCurrentCUDAStream()>>>( \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "int8_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_int8_D > 0) { auto max_int8_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_int8_D, SparseType::INT8, row_alignment), 128); TORCH_CHECK(max_int8_128b_rows <= 16); if (max_int8_128b_rows > 0) { Y(2, 8, 0, 1); } if (max_int8_128b_rows > 1) { Y(2, 4, 1, 2); } if (max_int8_128b_rows > 2) { Y(2, 4, 2, 4); } if (max_int8_128b_rows > 4) { Y(2, 4, 4, 8); } if (max_int8_128b_rows > 8) { Y(2, 2, 8, 16); } } })); #undef X // launch 8-bit float kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::FP8_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly><<< \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::cuda::getCurrentCUDAStream()>>>( \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ fp8_exponent_bits, \ fp8_exponent_bias, \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "fp8_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_float8_D > 0) { auto max_fp8_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_float8_D, SparseType::FP8, row_alignment), 128); TORCH_CHECK(max_fp8_128b_rows <= 16); if (max_fp8_128b_rows > 0) { Y(2, 8, 0, 1); } if (max_fp8_128b_rows > 1) { Y(2, 4, 1, 2); } if (max_fp8_128b_rows > 2) { Y(2, 4, 2, 4); } if (max_fp8_128b_rows > 4) { Y(2, 4, 4, 8); } if (max_fp8_128b_rows > 8) { Y(2, 2, 4, 8); } } })); #undef X // launch 16-bit kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::FP16_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly><<< \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::cuda::getCurrentCUDAStream()>>>( \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "fp16_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_float16_D > 0) { auto max_fp16_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_float16_D, SparseType::FP16, row_alignment), 128); TORCH_CHECK(max_fp16_128b_rows <= 32); if (max_fp16_128b_rows > 0) { Y(2, 8, 0, 2); } if (max_fp16_128b_rows > 2) { Y(2, 8, 2, 4); } if (max_fp16_128b_rows > 4) { Y(2, 4, 4, 8); } if (max_fp16_128b_rows > 8) { Y(2, 2, 8, 16); } if (max_fp16_128b_rows > 16) { Y(2, 1, 16, 32); } } })); #undef X // launch 32-bit kernel #define X(DeviceOnly, OutputRowsPerThread, InputRowsInFlight, MinNum128BRows, MaxNum128BRows) \ nbit::FP32_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_{{ wdesc }}_kernel_small_L<index_t, output_t, OutputRowsPerThread, kWarpsPerBlock, InputRowsInFlight, MinNum128BRows, MaxNum128BRows, DeviceOnly><<< \ nbit::div_round_up(T * nbit::div_round_up(B, OutputRowsPerThread), kWarpsPerBlock), \ dim3(kWarpSize, kWarpsPerBlock), \ 0, \ at::cuda::getCurrentCUDAStream()>>>( \ dev_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ uvm_weights.packed_accessor64<uint8_t, 1, at::RestrictPtrTraits>(), \ weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), \ weights_tys.packed_accessor32<uint8_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(), \ {% else %} \ D, \ {% endif %} \ FixedDivisor(div_round_up(B, OutputRowsPerThread)), \ indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), \ {% if not nobag %} \ pooling_mode, \ {% endif %} \ row_alignment, \ {% if weighted %} indice_weights.packed_accessor32<float, 1, at::RestrictPtrTraits>(), {% endif %} \ output.packed_accessor32<output_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_weights.packed_accessor64<uint8_t, 2, at::RestrictPtrTraits>(), \ lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>() \ ); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ DISPATCH_OUTPUT_TYPES(output.scalar_type(), "fp32_split_embedding{{ "_nobag" if nobag else "" }}_codegen_forward_kernel", ([&] { if (max_float32_D > 0) { auto max_fp32_128b_rows = nbit::div_round_up(nbit::padded_row_size_in_bytes(max_float32_D, SparseType::FP32, row_alignment), 128); TORCH_CHECK(max_fp32_128b_rows <= 64); if (max_fp32_128b_rows > 0) { Y(2, 4, 0, 4); } if (max_fp32_128b_rows > 4) { Y(2, 2, 4, 16); } if (max_fp32_128b_rows > 16) { Y(1, 1, 16, 32); } if (max_fp32_128b_rows > 32) { Y(1, 1, 32, 64); } } })); #undef X return output; } // clang-format on
7c915ee33e9de7c80a74b8ae08b6602c40efaffa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduction_interleaved_unrolling_blocks8_1(int * input, int * temp, int size) { int tid = threadIdx.x; //element index for this thread int index = blockDim.x * blockIdx.x * 8 + threadIdx.x; //local data pointer int * i_data = input + blockDim.x * blockIdx.x * 8; if ((index + 7 * blockDim.x) < size) { int a1 = input[index]; int a2 = input[index + blockDim.x]; int a3 = input[index + 2 * blockDim.x]; int a4 = input[index + 3 * blockDim.x]; int a5 = input[index + 4 * blockDim.x]; int a6 = input[index + 5 * blockDim.x]; int a7 = input[index + 6 * blockDim.x]; int a8 = input[index + 7 * blockDim.x]; input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8; } __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2) { if (tid < offset) { i_data[tid] += i_data[tid + offset]; } __syncthreads(); } if (tid == 0) { temp[blockIdx.x] = i_data[0]; } }
7c915ee33e9de7c80a74b8ae08b6602c40efaffa.cu
#include "includes.h" __global__ void reduction_interleaved_unrolling_blocks8_1(int * input, int * temp, int size) { int tid = threadIdx.x; //element index for this thread int index = blockDim.x * blockIdx.x * 8 + threadIdx.x; //local data pointer int * i_data = input + blockDim.x * blockIdx.x * 8; if ((index + 7 * blockDim.x) < size) { int a1 = input[index]; int a2 = input[index + blockDim.x]; int a3 = input[index + 2 * blockDim.x]; int a4 = input[index + 3 * blockDim.x]; int a5 = input[index + 4 * blockDim.x]; int a6 = input[index + 5 * blockDim.x]; int a7 = input[index + 6 * blockDim.x]; int a8 = input[index + 7 * blockDim.x]; input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8; } __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2) { if (tid < offset) { i_data[tid] += i_data[tid + offset]; } __syncthreads(); } if (tid == 0) { temp[blockIdx.x] = i_data[0]; } }
eea7146f06521598887b705224526073b1c57d0d.hip
// !!! This is a file automatically generated by hipify!!! // textureViewer // Simple GL texture viewer to preview 2D slices of textures produced by cudaNoise #define GL_GLEXT_PROTOTYPES #include <glut.h> #include <glext.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #include <iostream> #include <time.h> #include "cudanoise.cuh" #define DIM 512 uchar4 *devPtr; dim3 blocks(DIM / 16, DIM / 16); dim3 threads(16, 16); float zoom = 16.0f; int genSeed = 0; int selectedNoise = 0; GLuint bufferObj; cudaGraphicsResource *resource; __global__ void kernel(uchar4 *ptr, float zoomFactor, int samples, int seed, int noise) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float fx = x / (float)DIM; float fy = y / (float)DIM; fx += 74.824f; fy += 38.234f; float3 pos = make_float3(fx, fy, 0.0f); pos = cudaNoise::scaleVector(pos, zoomFactor); float acc = 0.0f; float du = 1.0f / ((float)DIM / zoomFactor); for (int i = 0; i < samples; i++) { float dx = cudaNoise::randomFloat(327482 + i * 2347 + seed) / (float)DIM * zoomFactor; float dy = cudaNoise::randomFloat(912472 + i * 118438 + seed) / (float)DIM * zoomFactor; float dz = cudaNoise::randomFloat(112348 + i * 68214 + seed) / (float)DIM * zoomFactor; float3 ditheredPos = make_float3(pos.x + dx, pos.y + dy, pos.z + dz); float val = 0.0f; switch (noise) { case(0): val = cudaNoise::perlinNoise(ditheredPos, 1.0f, seed); break; case(1): val = cudaNoise::simplexNoise(ditheredPos, 1.0f, seed); break; case(2): val = cudaNoise::worleyNoise(ditheredPos, 1.0f, seed, 300.1f, 4, 4, 1.0f); break; case(3): val = cudaNoise::repeaterPerlin(ditheredPos, 1.0f, seed, 128, 1.9f, 0.5f); break; case(4): val = cudaNoise::repeaterPerlinAbs(ditheredPos, 1.0f, seed, 128, 1.9f, 0.5f); break; case(5): val = cudaNoise::fractalSimplex(ditheredPos, 1.0f, seed, du, 512, 1.5f, 0.95f); break; case(6): val = cudaNoise::repeaterTurbulence(ditheredPos, 0.2f, 1.0f, seed, 0.8f, 32, cudaNoise::BASIS_PERLIN, cudaNoise::BASIS_PERLIN); break; case(7): val = cudaNoise::cubicValue(ditheredPos, 1.0f, seed); break; case(8): val = cudaNoise::spots(ditheredPos, 1.0f, seed, 0.1f, 0, 8, 1.0f, cudaNoise::SHAPE_STEP); break; } // val = cudaNoise::checker(ditheredPos, 1.0f, seed); // val = cudaNoise::discreteNoise(ditheredPos, 1.0f, 3478); // val = cudaNoise::linearValue(ditheredPos, 1.0f, seed); // val = cudaNoise::perlinNoise(ditheredPos, 1.0f, seed); // val = cudaNoise::simplexNoise(ditheredPos, 0.01f, seed); // val = cudaNoise::worleyNoise(ditheredPos, 1.0f, seed, 300.1f, 4, 4, 1.0f); // val = cudaNoise::repeater(ditheredPos, 1.0f, seed, 4, 1.5f, 0.75f, cudaNoise::BASIS_PERLIN); // val = cudaNoise::repeaterPerlin(ditheredPos, 1.0f, seed, 128, 1.9f, 0.5f); // val = cudaNoise::repeaterPerlinAbs(ditheredPos, 1.0f, seed, 128, 1.9f, 0.5f); // val = cudaNoise::repeaterSimplex(ditheredPos, 1.0f, seed, 128, 1.5f, 0.8f); // val = cudaNoise::repeaterSimplexAbs(ditheredPos, 1.0f, seed, 16, 1.5f, 0.8f); // val = cudaNoise::fractalSimplex(ditheredPos, 1.0f, seed, du, 512, 1.5f, 0.95f); // val = cudaNoise::turbulence(ditheredPos, 4.0f, 1.0f, seed, 0.02f, cudaNoise::BASIS_PERLIN, cudaNoise::BASIS_CHECKER); // val = cudaNoise::repeaterTurbulence(ditheredPos, 0.2f, 1.0f, seed, 0.8f, 32, CUDANOISE_PERLIN, CUDANOISE_PERLIN); // val = cudaNoise::cubicValue(ditheredPos, 1.0f, seed); // val = cudaNoise::fadedValue(ditheredPos, 1.0f); // val = cudaNoise::spots(ditheredPos, 1.0f, seed, 0.1f, 0, 8, 1.0f, cudaNoise::SHAPE_STEP); acc += val; } acc /= (float)samples; acc = cudaNoise::mapToUnsigned(acc); acc = cudaNoise::clamp(acc, 0.0f, 1.0f); unsigned char iVal = 255 * acc; ptr[offset].x = iVal; ptr[offset].y = iVal; ptr[offset].z = iVal; ptr[offset].w = 255; } void setSeed(int newSeed) { genSeed = newSeed; } void redrawTexture() { time_t startTime = clock(); hipGraphicsMapResources(1, &resource, NULL); kernel << < blocks, threads >> > (devPtr, zoom, 1, genSeed, selectedNoise); hipDeviceSynchronize(); hipGraphicsUnmapResources(1, &resource, NULL); time_t endTime = clock(); double time_spent = (double)(endTime - startTime) / CLOCKS_PER_SEC; printf("Time spent: %f\n", time_spent); glutPostRedisplay(); } static void idle_func(void) { } static void draw_func(void) { glDrawPixels(DIM, DIM, GL_RGBA, GL_UNSIGNED_BYTE, 0); glutSwapBuffers(); } static void key_func(unsigned char key, int x, int y) { switch (key) { // ESC to exit case 27: hipGraphicsUnregisterResource(resource); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0); glDeleteBuffers(1, &bufferObj); exit(0); break; // Plus to zoom in case 43: zoom *= 0.5f; redrawTexture(); break; // Minus to zoom out case 45: zoom *= 2.0f; redrawTexture(); break; // Dot to get the next noise function case 46: std::cout << "KEA" << std::endl; selectedNoise = (selectedNoise + 1) % 9; redrawTexture(); break; // Spacebar to get new seed case 32: clock_t t = clock(); unsigned int newSeed = (unsigned int)((double)t * 1000.0f); setSeed(newSeed); redrawTexture(); break; } } int main(int argc, char **argv) { hipDeviceProp_t prop; int dev; setSeed(time(NULL)); memset(&prop, 0, sizeof(hipDeviceProp_t)); prop.major = 1; prop.minor = 0; hipChooseDevice(&dev, &prop); hipGLSetGLDevice(dev); glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(DIM, DIM); glutCreateWindow("cudaNoise - Texture Viewer"); glGenBuffers(1, &bufferObj); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj); glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, DIM * DIM * 4, NULL, GL_DYNAMIC_DRAW_ARB); hipGraphicsGLRegisterBuffer(&resource, bufferObj, hipGraphicsMapFlagsNone); size_t size; hipGraphicsMapResources(1, &resource, NULL); hipGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resource); kernel << <blocks, threads >> > (devPtr, zoom, 1, genSeed, selectedNoise); hipGraphicsUnmapResources(1, &resource, NULL); glutIdleFunc(idle_func); glutKeyboardFunc(key_func); glutDisplayFunc(draw_func); glutMainLoop(); printf("\n\n"); }
eea7146f06521598887b705224526073b1c57d0d.cu
// textureViewer // Simple GL texture viewer to preview 2D slices of textures produced by cudaNoise #define GL_GLEXT_PROTOTYPES #include <glut.h> #include <glext.h> #include <cuda.h> #include <cuda_gl_interop.h> #include <iostream> #include <time.h> #include "cudanoise.cuh" #define DIM 512 uchar4 *devPtr; dim3 blocks(DIM / 16, DIM / 16); dim3 threads(16, 16); float zoom = 16.0f; int genSeed = 0; int selectedNoise = 0; GLuint bufferObj; cudaGraphicsResource *resource; __global__ void kernel(uchar4 *ptr, float zoomFactor, int samples, int seed, int noise) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float fx = x / (float)DIM; float fy = y / (float)DIM; fx += 74.824f; fy += 38.234f; float3 pos = make_float3(fx, fy, 0.0f); pos = cudaNoise::scaleVector(pos, zoomFactor); float acc = 0.0f; float du = 1.0f / ((float)DIM / zoomFactor); for (int i = 0; i < samples; i++) { float dx = cudaNoise::randomFloat(327482 + i * 2347 + seed) / (float)DIM * zoomFactor; float dy = cudaNoise::randomFloat(912472 + i * 118438 + seed) / (float)DIM * zoomFactor; float dz = cudaNoise::randomFloat(112348 + i * 68214 + seed) / (float)DIM * zoomFactor; float3 ditheredPos = make_float3(pos.x + dx, pos.y + dy, pos.z + dz); float val = 0.0f; switch (noise) { case(0): val = cudaNoise::perlinNoise(ditheredPos, 1.0f, seed); break; case(1): val = cudaNoise::simplexNoise(ditheredPos, 1.0f, seed); break; case(2): val = cudaNoise::worleyNoise(ditheredPos, 1.0f, seed, 300.1f, 4, 4, 1.0f); break; case(3): val = cudaNoise::repeaterPerlin(ditheredPos, 1.0f, seed, 128, 1.9f, 0.5f); break; case(4): val = cudaNoise::repeaterPerlinAbs(ditheredPos, 1.0f, seed, 128, 1.9f, 0.5f); break; case(5): val = cudaNoise::fractalSimplex(ditheredPos, 1.0f, seed, du, 512, 1.5f, 0.95f); break; case(6): val = cudaNoise::repeaterTurbulence(ditheredPos, 0.2f, 1.0f, seed, 0.8f, 32, cudaNoise::BASIS_PERLIN, cudaNoise::BASIS_PERLIN); break; case(7): val = cudaNoise::cubicValue(ditheredPos, 1.0f, seed); break; case(8): val = cudaNoise::spots(ditheredPos, 1.0f, seed, 0.1f, 0, 8, 1.0f, cudaNoise::SHAPE_STEP); break; } // val = cudaNoise::checker(ditheredPos, 1.0f, seed); // val = cudaNoise::discreteNoise(ditheredPos, 1.0f, 3478); // val = cudaNoise::linearValue(ditheredPos, 1.0f, seed); // val = cudaNoise::perlinNoise(ditheredPos, 1.0f, seed); // val = cudaNoise::simplexNoise(ditheredPos, 0.01f, seed); // val = cudaNoise::worleyNoise(ditheredPos, 1.0f, seed, 300.1f, 4, 4, 1.0f); // val = cudaNoise::repeater(ditheredPos, 1.0f, seed, 4, 1.5f, 0.75f, cudaNoise::BASIS_PERLIN); // val = cudaNoise::repeaterPerlin(ditheredPos, 1.0f, seed, 128, 1.9f, 0.5f); // val = cudaNoise::repeaterPerlinAbs(ditheredPos, 1.0f, seed, 128, 1.9f, 0.5f); // val = cudaNoise::repeaterSimplex(ditheredPos, 1.0f, seed, 128, 1.5f, 0.8f); // val = cudaNoise::repeaterSimplexAbs(ditheredPos, 1.0f, seed, 16, 1.5f, 0.8f); // val = cudaNoise::fractalSimplex(ditheredPos, 1.0f, seed, du, 512, 1.5f, 0.95f); // val = cudaNoise::turbulence(ditheredPos, 4.0f, 1.0f, seed, 0.02f, cudaNoise::BASIS_PERLIN, cudaNoise::BASIS_CHECKER); // val = cudaNoise::repeaterTurbulence(ditheredPos, 0.2f, 1.0f, seed, 0.8f, 32, CUDANOISE_PERLIN, CUDANOISE_PERLIN); // val = cudaNoise::cubicValue(ditheredPos, 1.0f, seed); // val = cudaNoise::fadedValue(ditheredPos, 1.0f); // val = cudaNoise::spots(ditheredPos, 1.0f, seed, 0.1f, 0, 8, 1.0f, cudaNoise::SHAPE_STEP); acc += val; } acc /= (float)samples; acc = cudaNoise::mapToUnsigned(acc); acc = cudaNoise::clamp(acc, 0.0f, 1.0f); unsigned char iVal = 255 * acc; ptr[offset].x = iVal; ptr[offset].y = iVal; ptr[offset].z = iVal; ptr[offset].w = 255; } void setSeed(int newSeed) { genSeed = newSeed; } void redrawTexture() { time_t startTime = clock(); cudaGraphicsMapResources(1, &resource, NULL); kernel << < blocks, threads >> > (devPtr, zoom, 1, genSeed, selectedNoise); cudaDeviceSynchronize(); cudaGraphicsUnmapResources(1, &resource, NULL); time_t endTime = clock(); double time_spent = (double)(endTime - startTime) / CLOCKS_PER_SEC; printf("Time spent: %f\n", time_spent); glutPostRedisplay(); } static void idle_func(void) { } static void draw_func(void) { glDrawPixels(DIM, DIM, GL_RGBA, GL_UNSIGNED_BYTE, 0); glutSwapBuffers(); } static void key_func(unsigned char key, int x, int y) { switch (key) { // ESC to exit case 27: cudaGraphicsUnregisterResource(resource); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0); glDeleteBuffers(1, &bufferObj); exit(0); break; // Plus to zoom in case 43: zoom *= 0.5f; redrawTexture(); break; // Minus to zoom out case 45: zoom *= 2.0f; redrawTexture(); break; // Dot to get the next noise function case 46: std::cout << "KEA" << std::endl; selectedNoise = (selectedNoise + 1) % 9; redrawTexture(); break; // Spacebar to get new seed case 32: clock_t t = clock(); unsigned int newSeed = (unsigned int)((double)t * 1000.0f); setSeed(newSeed); redrawTexture(); break; } } int main(int argc, char **argv) { cudaDeviceProp prop; int dev; setSeed(time(NULL)); memset(&prop, 0, sizeof(cudaDeviceProp)); prop.major = 1; prop.minor = 0; cudaChooseDevice(&dev, &prop); cudaGLSetGLDevice(dev); glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(DIM, DIM); glutCreateWindow("cudaNoise - Texture Viewer"); glGenBuffers(1, &bufferObj); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj); glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, DIM * DIM * 4, NULL, GL_DYNAMIC_DRAW_ARB); cudaGraphicsGLRegisterBuffer(&resource, bufferObj, cudaGraphicsMapFlagsNone); size_t size; cudaGraphicsMapResources(1, &resource, NULL); cudaGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resource); kernel << <blocks, threads >> > (devPtr, zoom, 1, genSeed, selectedNoise); cudaGraphicsUnmapResources(1, &resource, NULL); glutIdleFunc(idle_func); glutKeyboardFunc(key_func); glutDisplayFunc(draw_func); glutMainLoop(); printf("\n\n"); }
02eb1b377b609d7fc26762e3cd75df52165eec77.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Open source copyright declaration based on BSD open source template: * http://www.opensource.org/licenses/bsd-license.php * * This file is part of the OP2 distribution. * * Copyright (c) 2011, Mike Giles and others. Please see the AUTHORS file in * the main source directory for a full list of copyright holders. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The name of Mike Giles may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Mike Giles ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Mike Giles BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MPICH_IGNORE_CXX_SEEK #include <op_lib_mpi.h> #include <op_lib_c.h> __global__ void export_halo_gather(int* list, char * dat, int copy_size, int elem_size, char * export_buffer) { int id = blockIdx.x*blockDim.x+threadIdx.x; if (id<copy_size) { int off = 0; if (elem_size%16 == 0) { off += 16*(elem_size/16); for (int i = 0; i < elem_size/16; i++) { ((double2*)(export_buffer+id*elem_size))[i] = ((double2*)(dat+list[id]*elem_size))[i]; } } else if (elem_size%8 == 0) { off += 8*(elem_size/8); for (int i = 0; i < elem_size/8; i++) { ((double*)(export_buffer+id*elem_size))[i] = ((double*)(dat+list[id]*elem_size))[i]; } } for (int i = off;i<elem_size;i++) { export_buffer[id*elem_size+i]=dat[list[id]*elem_size+i]; } } } __global__ void export_halo_gather_soa(int* list, char * dat, int copy_size, int elem_size, char * export_buffer, int set_size, int dim) { int id = blockIdx.x*blockDim.x+threadIdx.x; int size_of = elem_size/dim; if (id<copy_size) { if (size_of == 8) { for (int i =0;i<dim;i++) { ((double*)(export_buffer+id*elem_size))[i] = ((double*)(dat+list[id]*size_of))[i*set_size]; } } else { for (int i =0;i<dim;i++) { for (int j =0;j<size_of;j++) { export_buffer[id*elem_size+i*size_of+j] = dat[list[id]*size_of+i*set_size*size_of+j]; } } } } } __global__ void import_halo_scatter_soa(int offset, char * dat, int copy_size, int elem_size, char * import_buffer, int set_size, int dim) { int id = blockIdx.x*blockDim.x+threadIdx.x; int size_of = elem_size/dim; if (id<copy_size) { if (size_of == 8) { for (int i = 0;i<dim;i++) { ((double*)(dat+(offset+id)*size_of))[i*set_size] = ((double*)(import_buffer+id*elem_size))[i]; } } else { for (int i =0;i<dim;i++) { for (int j =0;j<size_of;j++) { dat[(offset+id)*size_of+i*set_size*size_of+j] = import_buffer[id*elem_size+i*size_of+j]; } } } } } __global__ void import_halo_scatter_partial_soa(int* list, char * dat, int copy_size, int elem_size, char * import_buffer, int set_size, int dim) { int id = blockIdx.x*blockDim.x+threadIdx.x; int size_of = elem_size/dim; if (id<copy_size) { int element = list[id]; if (size_of == 8) { for (int i = 0;i<dim;i++) { ((double*)(dat+(element)*size_of))[i*set_size] = ((double*)(import_buffer+id*elem_size))[i]; } } else { for (int i =0;i<dim;i++) { for (int j =0;j<size_of;j++) { dat[(element)*size_of+i*set_size*size_of+j] = import_buffer[id*elem_size+i*size_of+j]; } } } } } __global__ void import_halo_scatter_partial(int* list, char * dat, int copy_size, int elem_size, char * import_buffer, int dim) { int id = blockIdx.x*blockDim.x+threadIdx.x; int size_of = elem_size/dim; if (id<copy_size) { int element = list[id]; if (size_of == 8) { for (int i = 0;i<dim;i++) { ((double*)(dat+element*elem_size))[i] = ((double*)(import_buffer+id*elem_size))[i]; } } else { for (int i =0;i<dim;i++) { for (int j =0;j<size_of;j++) { dat[element*elem_size+i*size_of+j] = import_buffer[id*elem_size+i*size_of+j]; } } } } } void gather_data_to_buffer(op_arg arg, halo_list exp_exec_list, halo_list exp_nonexec_list) { int threads = 192; int blocks = 1+((exp_exec_list->size-1)/192); if (strstr( arg.dat->type, ":soa")!= NULL || (OP_auto_soa && arg.dat->dim > 1)) { int set_size = arg.dat->set->size + arg.dat->set->exec_size + arg.dat->set->nonexec_size; hipLaunchKernelGGL(( export_halo_gather_soa), dim3(blocks),dim3(threads), 0, 0, export_exec_list_d[arg.dat->set->index], arg.data_d, exp_exec_list->size, arg.dat->size, arg.dat->buffer_d, set_size, arg.dat->dim ); int blocks2 = 1+((exp_nonexec_list->size-1)/192); hipLaunchKernelGGL(( export_halo_gather_soa), dim3(blocks2),dim3(threads), 0, 0, export_nonexec_list_d[arg.dat->set->index], arg.data_d, exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d+exp_exec_list->size*arg.dat->size, set_size, arg.dat->dim ); } else { hipLaunchKernelGGL(( export_halo_gather), dim3(blocks),dim3(threads), 0, 0, export_exec_list_d[arg.dat->set->index], arg.data_d, exp_exec_list->size, arg.dat->size, arg.dat->buffer_d); int blocks2 = 1+((exp_nonexec_list->size-1)/192); hipLaunchKernelGGL(( export_halo_gather), dim3(blocks2),dim3(threads), 0, 0, export_nonexec_list_d[arg.dat->set->index], arg.data_d, exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d+exp_exec_list->size*arg.dat->size); } } void gather_data_to_buffer_partial(op_arg arg, halo_list exp_nonexec_list) { int threads = 192; int blocks = 1+((exp_nonexec_list->size-1)/192); if (strstr( arg.dat->type, ":soa")!= NULL || (OP_auto_soa && arg.dat->dim > 1)) { int set_size = arg.dat->set->size + arg.dat->set->exec_size + arg.dat->set->nonexec_size; hipLaunchKernelGGL(( export_halo_gather_soa), dim3(blocks),dim3(threads), 0, 0, export_nonexec_list_partial_d[arg.map->index], arg.data_d, exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d, set_size, arg.dat->dim ); } else { hipLaunchKernelGGL(( export_halo_gather), dim3(blocks),dim3(threads), 0, 0, export_nonexec_list_partial_d[arg.map->index], arg.data_d, exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d); } } void scatter_data_from_buffer(op_arg arg) { int threads = 192; int blocks = 1+((arg.dat->set->exec_size-1)/192); if (strstr( arg.dat->type, ":soa")!= NULL || (OP_auto_soa && arg.dat->dim > 1)) { int set_size = arg.dat->set->size + arg.dat->set->exec_size + arg.dat->set->nonexec_size; int offset = arg.dat->set->size; int copy_size = arg.dat->set->exec_size; hipLaunchKernelGGL(( import_halo_scatter_soa), dim3(blocks),dim3(threads), 0, 0, offset, arg.data_d, copy_size, arg.dat->size, arg.dat->buffer_d_r, set_size, arg.dat->dim ); offset += arg.dat->set->exec_size; copy_size = arg.dat->set->nonexec_size; int blocks2 = 1+((arg.dat->set->nonexec_size-1)/192); hipLaunchKernelGGL(( import_halo_scatter_soa), dim3(blocks2),dim3(threads), 0, 0, offset, arg.data_d, copy_size, arg.dat->size, arg.dat->buffer_d_r+arg.dat->set->exec_size*arg.dat->size, set_size, arg.dat->dim ); } } void scatter_data_from_buffer_partial(op_arg arg) { int threads = 192; int blocks = 1+((OP_import_nonexec_permap[arg.map->index]->size-1)/192); if (strstr( arg.dat->type, ":soa" )!= NULL || (OP_auto_soa && arg.dat->dim > 1)) { int set_size = arg.dat->set->size + arg.dat->set->exec_size + arg.dat->set->nonexec_size; int init = OP_export_nonexec_permap[arg.map->index]->size; int copy_size = OP_import_nonexec_permap[arg.map->index]->size; hipLaunchKernelGGL(( import_halo_scatter_partial_soa), dim3(blocks),dim3(threads), 0, 0, import_nonexec_list_partial_d[arg.map->index], arg.data_d, copy_size, arg.dat->size, arg.dat->buffer_d+init*arg.dat->size, set_size, arg.dat->dim ); } else { int init = OP_export_nonexec_permap[arg.map->index]->size; int copy_size = OP_import_nonexec_permap[arg.map->index]->size; hipLaunchKernelGGL(( import_halo_scatter_partial), dim3(blocks),dim3(threads), 0, 0, import_nonexec_list_partial_d[arg.map->index], arg.data_d, copy_size, arg.dat->size, arg.dat->buffer_d+init*arg.dat->size, arg.dat->dim ); } }
02eb1b377b609d7fc26762e3cd75df52165eec77.cu
/* * Open source copyright declaration based on BSD open source template: * http://www.opensource.org/licenses/bsd-license.php * * This file is part of the OP2 distribution. * * Copyright (c) 2011, Mike Giles and others. Please see the AUTHORS file in * the main source directory for a full list of copyright holders. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The name of Mike Giles may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Mike Giles ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Mike Giles BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MPICH_IGNORE_CXX_SEEK #include <op_lib_mpi.h> #include <op_lib_c.h> __global__ void export_halo_gather(int* list, char * dat, int copy_size, int elem_size, char * export_buffer) { int id = blockIdx.x*blockDim.x+threadIdx.x; if (id<copy_size) { int off = 0; if (elem_size%16 == 0) { off += 16*(elem_size/16); for (int i = 0; i < elem_size/16; i++) { ((double2*)(export_buffer+id*elem_size))[i] = ((double2*)(dat+list[id]*elem_size))[i]; } } else if (elem_size%8 == 0) { off += 8*(elem_size/8); for (int i = 0; i < elem_size/8; i++) { ((double*)(export_buffer+id*elem_size))[i] = ((double*)(dat+list[id]*elem_size))[i]; } } for (int i = off;i<elem_size;i++) { export_buffer[id*elem_size+i]=dat[list[id]*elem_size+i]; } } } __global__ void export_halo_gather_soa(int* list, char * dat, int copy_size, int elem_size, char * export_buffer, int set_size, int dim) { int id = blockIdx.x*blockDim.x+threadIdx.x; int size_of = elem_size/dim; if (id<copy_size) { if (size_of == 8) { for (int i =0;i<dim;i++) { ((double*)(export_buffer+id*elem_size))[i] = ((double*)(dat+list[id]*size_of))[i*set_size]; } } else { for (int i =0;i<dim;i++) { for (int j =0;j<size_of;j++) { export_buffer[id*elem_size+i*size_of+j] = dat[list[id]*size_of+i*set_size*size_of+j]; } } } } } __global__ void import_halo_scatter_soa(int offset, char * dat, int copy_size, int elem_size, char * import_buffer, int set_size, int dim) { int id = blockIdx.x*blockDim.x+threadIdx.x; int size_of = elem_size/dim; if (id<copy_size) { if (size_of == 8) { for (int i = 0;i<dim;i++) { ((double*)(dat+(offset+id)*size_of))[i*set_size] = ((double*)(import_buffer+id*elem_size))[i]; } } else { for (int i =0;i<dim;i++) { for (int j =0;j<size_of;j++) { dat[(offset+id)*size_of+i*set_size*size_of+j] = import_buffer[id*elem_size+i*size_of+j]; } } } } } __global__ void import_halo_scatter_partial_soa(int* list, char * dat, int copy_size, int elem_size, char * import_buffer, int set_size, int dim) { int id = blockIdx.x*blockDim.x+threadIdx.x; int size_of = elem_size/dim; if (id<copy_size) { int element = list[id]; if (size_of == 8) { for (int i = 0;i<dim;i++) { ((double*)(dat+(element)*size_of))[i*set_size] = ((double*)(import_buffer+id*elem_size))[i]; } } else { for (int i =0;i<dim;i++) { for (int j =0;j<size_of;j++) { dat[(element)*size_of+i*set_size*size_of+j] = import_buffer[id*elem_size+i*size_of+j]; } } } } } __global__ void import_halo_scatter_partial(int* list, char * dat, int copy_size, int elem_size, char * import_buffer, int dim) { int id = blockIdx.x*blockDim.x+threadIdx.x; int size_of = elem_size/dim; if (id<copy_size) { int element = list[id]; if (size_of == 8) { for (int i = 0;i<dim;i++) { ((double*)(dat+element*elem_size))[i] = ((double*)(import_buffer+id*elem_size))[i]; } } else { for (int i =0;i<dim;i++) { for (int j =0;j<size_of;j++) { dat[element*elem_size+i*size_of+j] = import_buffer[id*elem_size+i*size_of+j]; } } } } } void gather_data_to_buffer(op_arg arg, halo_list exp_exec_list, halo_list exp_nonexec_list) { int threads = 192; int blocks = 1+((exp_exec_list->size-1)/192); if (strstr( arg.dat->type, ":soa")!= NULL || (OP_auto_soa && arg.dat->dim > 1)) { int set_size = arg.dat->set->size + arg.dat->set->exec_size + arg.dat->set->nonexec_size; export_halo_gather_soa<<<blocks,threads>>>(export_exec_list_d[arg.dat->set->index], arg.data_d, exp_exec_list->size, arg.dat->size, arg.dat->buffer_d, set_size, arg.dat->dim ); int blocks2 = 1+((exp_nonexec_list->size-1)/192); export_halo_gather_soa<<<blocks2,threads>>>(export_nonexec_list_d[arg.dat->set->index], arg.data_d, exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d+exp_exec_list->size*arg.dat->size, set_size, arg.dat->dim ); } else { export_halo_gather<<<blocks,threads>>>(export_exec_list_d[arg.dat->set->index], arg.data_d, exp_exec_list->size, arg.dat->size, arg.dat->buffer_d); int blocks2 = 1+((exp_nonexec_list->size-1)/192); export_halo_gather<<<blocks2,threads>>>(export_nonexec_list_d[arg.dat->set->index], arg.data_d, exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d+exp_exec_list->size*arg.dat->size); } } void gather_data_to_buffer_partial(op_arg arg, halo_list exp_nonexec_list) { int threads = 192; int blocks = 1+((exp_nonexec_list->size-1)/192); if (strstr( arg.dat->type, ":soa")!= NULL || (OP_auto_soa && arg.dat->dim > 1)) { int set_size = arg.dat->set->size + arg.dat->set->exec_size + arg.dat->set->nonexec_size; export_halo_gather_soa<<<blocks,threads>>>(export_nonexec_list_partial_d[arg.map->index], arg.data_d, exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d, set_size, arg.dat->dim ); } else { export_halo_gather<<<blocks,threads>>>(export_nonexec_list_partial_d[arg.map->index], arg.data_d, exp_nonexec_list->size, arg.dat->size, arg.dat->buffer_d); } } void scatter_data_from_buffer(op_arg arg) { int threads = 192; int blocks = 1+((arg.dat->set->exec_size-1)/192); if (strstr( arg.dat->type, ":soa")!= NULL || (OP_auto_soa && arg.dat->dim > 1)) { int set_size = arg.dat->set->size + arg.dat->set->exec_size + arg.dat->set->nonexec_size; int offset = arg.dat->set->size; int copy_size = arg.dat->set->exec_size; import_halo_scatter_soa<<<blocks,threads>>>(offset, arg.data_d, copy_size, arg.dat->size, arg.dat->buffer_d_r, set_size, arg.dat->dim ); offset += arg.dat->set->exec_size; copy_size = arg.dat->set->nonexec_size; int blocks2 = 1+((arg.dat->set->nonexec_size-1)/192); import_halo_scatter_soa<<<blocks2,threads>>>(offset, arg.data_d, copy_size, arg.dat->size, arg.dat->buffer_d_r+arg.dat->set->exec_size*arg.dat->size, set_size, arg.dat->dim ); } } void scatter_data_from_buffer_partial(op_arg arg) { int threads = 192; int blocks = 1+((OP_import_nonexec_permap[arg.map->index]->size-1)/192); if (strstr( arg.dat->type, ":soa" )!= NULL || (OP_auto_soa && arg.dat->dim > 1)) { int set_size = arg.dat->set->size + arg.dat->set->exec_size + arg.dat->set->nonexec_size; int init = OP_export_nonexec_permap[arg.map->index]->size; int copy_size = OP_import_nonexec_permap[arg.map->index]->size; import_halo_scatter_partial_soa<<<blocks,threads>>>(import_nonexec_list_partial_d[arg.map->index], arg.data_d, copy_size, arg.dat->size, arg.dat->buffer_d+init*arg.dat->size, set_size, arg.dat->dim ); } else { int init = OP_export_nonexec_permap[arg.map->index]->size; int copy_size = OP_import_nonexec_permap[arg.map->index]->size; import_halo_scatter_partial<<<blocks,threads>>>(import_nonexec_list_partial_d[arg.map->index], arg.data_d, copy_size, arg.dat->size, arg.dat->buffer_d+init*arg.dat->size, arg.dat->dim ); } }
4e818db49c2b707165c9b280cf1c74d4db8b5fc4.hip
// !!! This is a file automatically generated by hipify!!! /** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/batch_triangular/Xsyrk_batch.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 2.0.0 * @author Ali Charara * @date 2017-11-13 **/ #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "rocblas.h" #include "kblas.h" #include "kblas_struct.h" #include "operators.h" #include "defs.h" #include "kblas_common.h" #include "batch_common.ch" //============================================================================================== #include "Xblas_core.ch" #include "Xhelper_funcs.ch" #include "Xsyrk_batch_drivers.cuh" //============================================================================================== //Non-Strided form // workspace needed: device pointers // A, B: host pointer to array of device pointers to device buffers int Xsyrk_batch_offset( kblasHandle_t handle, char uplo, char trans, const int m, const int n, const TYPE alpha, const TYPE** A, int A_row_off, int A_col_off, int lda, const TYPE beta, TYPE** B, int B_row_off, int B_col_off, int ldb, int batchCount){ return Xsyrk_batch_core(handle, uplo, trans, m, n, alpha, A, A_row_off, A_col_off, lda, beta, B, B_row_off, B_col_off, ldb, batchCount); } // workspace needed: device pointers // A, B: host pointer to array of device pointers to device buffers int kblas_syrk_batch(kblasHandle_t handle, char uplo, char trans, const int m, const int n, const TYPE alpha, const TYPE** A, int lda, const TYPE beta, TYPE** B, int ldb, int batchCount){ return Xsyrk_batch_core(handle, uplo, trans, m, n, alpha, A, 0, 0, lda, beta, B, 0, 0, ldb, batchCount); } extern "C" { // workspace needed: device pointers // A, B: host pointer to array of device pointers to device buffers int kblasXsyrk_batch(kblasHandle_t handle, char uplo, char trans, const int m, const int n, const TYPE alpha, const TYPE** A, int lda, const TYPE beta, TYPE** B, int ldb, int batchCount){ return Xsyrk_batch_core(handle, uplo, trans, m, n, alpha, A, 0, 0, lda, beta, B, 0, 0, ldb, batchCount); } }//extern "C" //============================================================================================== //Strided form // workspace needed: device pointers // A, B: host pointer to device buffers int kblas_syrk_batch( kblasHandle_t handle, char uplo, char trans, const int m, const int n, const TYPE alpha, const TYPE* A, int lda, long strideA, const TYPE beta, TYPE* B, int ldb, long strideB, int batchCount){ return Xsyrk_batch_strided_core(handle, uplo, trans, m, n, alpha, A, lda, strideA, beta, B, ldb, strideB, batchCount); } extern "C" { // workspace needed: device pointers // A, B: host pointer to device buffers int kblasXsyrk_batch_strided(kblasHandle_t handle, char uplo, char trans, const int m, const int n, const TYPE alpha, const TYPE* A, int lda, long strideA, const TYPE beta, TYPE* B, int ldb, long strideB, int batchCount){ return Xsyrk_batch_strided_core(handle, uplo, trans, m, n, alpha, A, lda, strideA, beta, B, ldb, strideB, batchCount); } }//extern C
4e818db49c2b707165c9b280cf1c74d4db8b5fc4.cu
/** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/batch_triangular/Xsyrk_batch.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 2.0.0 * @author Ali Charara * @date 2017-11-13 **/ #include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "cublas_v2.h" #include "kblas.h" #include "kblas_struct.h" #include "operators.h" #include "defs.h" #include "kblas_common.h" #include "batch_common.ch" //============================================================================================== #include "Xblas_core.ch" #include "Xhelper_funcs.ch" #include "Xsyrk_batch_drivers.cuh" //============================================================================================== //Non-Strided form // workspace needed: device pointers // A, B: host pointer to array of device pointers to device buffers int Xsyrk_batch_offset( kblasHandle_t handle, char uplo, char trans, const int m, const int n, const TYPE alpha, const TYPE** A, int A_row_off, int A_col_off, int lda, const TYPE beta, TYPE** B, int B_row_off, int B_col_off, int ldb, int batchCount){ return Xsyrk_batch_core(handle, uplo, trans, m, n, alpha, A, A_row_off, A_col_off, lda, beta, B, B_row_off, B_col_off, ldb, batchCount); } // workspace needed: device pointers // A, B: host pointer to array of device pointers to device buffers int kblas_syrk_batch(kblasHandle_t handle, char uplo, char trans, const int m, const int n, const TYPE alpha, const TYPE** A, int lda, const TYPE beta, TYPE** B, int ldb, int batchCount){ return Xsyrk_batch_core(handle, uplo, trans, m, n, alpha, A, 0, 0, lda, beta, B, 0, 0, ldb, batchCount); } extern "C" { // workspace needed: device pointers // A, B: host pointer to array of device pointers to device buffers int kblasXsyrk_batch(kblasHandle_t handle, char uplo, char trans, const int m, const int n, const TYPE alpha, const TYPE** A, int lda, const TYPE beta, TYPE** B, int ldb, int batchCount){ return Xsyrk_batch_core(handle, uplo, trans, m, n, alpha, A, 0, 0, lda, beta, B, 0, 0, ldb, batchCount); } }//extern "C" //============================================================================================== //Strided form // workspace needed: device pointers // A, B: host pointer to device buffers int kblas_syrk_batch( kblasHandle_t handle, char uplo, char trans, const int m, const int n, const TYPE alpha, const TYPE* A, int lda, long strideA, const TYPE beta, TYPE* B, int ldb, long strideB, int batchCount){ return Xsyrk_batch_strided_core(handle, uplo, trans, m, n, alpha, A, lda, strideA, beta, B, ldb, strideB, batchCount); } extern "C" { // workspace needed: device pointers // A, B: host pointer to device buffers int kblasXsyrk_batch_strided(kblasHandle_t handle, char uplo, char trans, const int m, const int n, const TYPE alpha, const TYPE* A, int lda, long strideA, const TYPE beta, TYPE* B, int ldb, long strideB, int batchCount){ return Xsyrk_batch_strided_core(handle, uplo, trans, m, n, alpha, A, lda, strideA, beta, B, ldb, strideB, batchCount); } }//extern C
8d15ab25b20dd5918e87ee5759eb42ae37e1a977.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> int *a, *b; // host data int *c, *c2; // results //Cuda error checking - non mandatory void cudaCheckError() { hipError_t e=hipGetLastError(); if(e!=hipSuccess) { printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); exit(0); } } //GPU kernel __global__ void saxpy(int *A,int *B,int *C,int N){ int ai = threadIdx.y * blockDim.x; int bi = threadIdx.x; int ci = threadIdx.x + threadIdx.y * blockDim.x ; int result = 0; if(ci < N*N) { for(int i = 0; i < N; i++) { result += A[ai] * B[bi]; ai++; bi += N; } C[ci] = result + C[ci]; } } //CPU function, N matrix width void saxpy_h(int *A1,int *B1, int *C1, int N){ int ai, bi, result; for(int i = 0; i < N*N; i++) { ai = (i - i%N)/N; bi = i%N; result = 0; for(int j = 0; j < N; j++) { result = A1[ai] * B1[bi]; ai++; bi += N; } C1[i] = result + C1[i]; } } int main(int argc,char **argv) { printf("Begin \n"); //Matrix side size int n = 2500; //memory allocation a = (int *) malloc(n*n*sizeof(int)); b = (int *) malloc(n*n*sizeof(int)); c = (int *) malloc(n*n*sizeof(int)); c2 = (int *) malloc(n*n*sizeof(int)); int *a_d,*b_d,*c_d; // Data filling for(int i=0; i<n*n; i++) a[i]=i,b[i]=i,c[i]; printf("Allocating device memory on host..\n"); //GPU memory allocation hipMalloc((void **) &a_d, n*n*sizeof(int)); hipMalloc((void **) &b_d, n*n*sizeof(int)); hipMalloc((void **) &c_d, n*n*sizeof(int)); printf("Copying to device..\n"); hipMemcpy(a_d, a, n*n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(b_d, b, n*n*sizeof(int), hipMemcpyHostToDevice); dim3 threadsPerBlock(10,10); dim3 numBlocks((n*n)/(10*10),1); clock_t start_d=clock(); printf("Doing GPU matrix saxpy\n"); hipLaunchKernelGGL(( saxpy), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, a_d, b_d, c_d, n); cudaCheckError(); //Wait for kernel call to finish hipDeviceSynchronize(); clock_t end_d = clock(); printf("Doing CPU Vector add\n"); clock_t start_h = clock(); saxpy_h(a, b, c2, n); clock_t end_h = clock(); //Time computing double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC; double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC; //Copying data back to host, this is a blocking call and will not start until all kernels are finished hipMemcpy(c, c_d, n*n*sizeof(int), hipMemcpyDeviceToHost); printf("n = %d \t GPU time = %fs \t CPU time = %fs\n", n, time_d, time_h); //Free GPU memory hipFree(a_d); hipFree(b_d); hipFree(c_d); return 0; }
8d15ab25b20dd5918e87ee5759eb42ae37e1a977.cu
#include <stdio.h> #include <cuda.h> int *a, *b; // host data int *c, *c2; // results //Cuda error checking - non mandatory void cudaCheckError() { cudaError_t e=cudaGetLastError(); if(e!=cudaSuccess) { printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); exit(0); } } //GPU kernel __global__ void saxpy(int *A,int *B,int *C,int N){ int ai = threadIdx.y * blockDim.x; int bi = threadIdx.x; int ci = threadIdx.x + threadIdx.y * blockDim.x ; int result = 0; if(ci < N*N) { for(int i = 0; i < N; i++) { result += A[ai] * B[bi]; ai++; bi += N; } C[ci] = result + C[ci]; } } //CPU function, N matrix width void saxpy_h(int *A1,int *B1, int *C1, int N){ int ai, bi, result; for(int i = 0; i < N*N; i++) { ai = (i - i%N)/N; bi = i%N; result = 0; for(int j = 0; j < N; j++) { result = A1[ai] * B1[bi]; ai++; bi += N; } C1[i] = result + C1[i]; } } int main(int argc,char **argv) { printf("Begin \n"); //Matrix side size int n = 2500; //memory allocation a = (int *) malloc(n*n*sizeof(int)); b = (int *) malloc(n*n*sizeof(int)); c = (int *) malloc(n*n*sizeof(int)); c2 = (int *) malloc(n*n*sizeof(int)); int *a_d,*b_d,*c_d; // Data filling for(int i=0; i<n*n; i++) a[i]=i,b[i]=i,c[i]; printf("Allocating device memory on host..\n"); //GPU memory allocation cudaMalloc((void **) &a_d, n*n*sizeof(int)); cudaMalloc((void **) &b_d, n*n*sizeof(int)); cudaMalloc((void **) &c_d, n*n*sizeof(int)); printf("Copying to device..\n"); cudaMemcpy(a_d, a, n*n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, n*n*sizeof(int), cudaMemcpyHostToDevice); dim3 threadsPerBlock(10,10); dim3 numBlocks((n*n)/(10*10),1); clock_t start_d=clock(); printf("Doing GPU matrix saxpy\n"); saxpy<<<numBlocks, threadsPerBlock>>>(a_d, b_d, c_d, n); cudaCheckError(); //Wait for kernel call to finish cudaDeviceSynchronize(); clock_t end_d = clock(); printf("Doing CPU Vector add\n"); clock_t start_h = clock(); saxpy_h(a, b, c2, n); clock_t end_h = clock(); //Time computing double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC; double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC; //Copying data back to host, this is a blocking call and will not start until all kernels are finished cudaMemcpy(c, c_d, n*n*sizeof(int), cudaMemcpyDeviceToHost); printf("n = %d \t GPU time = %fs \t CPU time = %fs\n", n, time_d, time_h); //Free GPU memory cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); return 0; }
f776cd0e1701b4e7f7ebb65d81502ce112b79bb6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel.h" #include "plugin.h" #include "gatherNMSOutputs.h" #include <vector> template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void gatherNMSOutputs_kernel( const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const int* indices, const T_SCORE* scores, const T_BBOX* bboxData, int* numDetections, T_BBOX* nmsedBoxes, T_BBOX* nmsedScores, T_BBOX* nmsedClasses, bool clipBoxes ) { if (keepTopK > topK) return; for (int i = blockIdx.x * nthds_per_cta + threadIdx.x; i < numImages * keepTopK; i += gridDim.x * nthds_per_cta) { const int imgId = i / keepTopK; const int detId = i % keepTopK; const int offset = imgId * numClasses * topK; const int index = indices[offset + detId]; const T_SCORE score = scores[offset + detId]; if (index == -1) { nmsedClasses[i] = -1; nmsedScores[i] = 0; nmsedBoxes[i * 4] = 0; nmsedBoxes[i * 4 + 1] = 0; nmsedBoxes[i * 4 + 2] = 0; nmsedBoxes[i * 4 + 3] = 0; } else { const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass)); const int bboxId = ((shareLocation ? (index % numPredsPerClass) : index % (numClasses * numPredsPerClass)) + bboxOffset) * 4; nmsedClasses[i] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label nmsedScores[i] = score; // confidence score // clipped bbox xmin nmsedBoxes[i * 4] = clipBoxes ? max(min(bboxData[bboxId], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId]; // clipped bbox ymin nmsedBoxes[i * 4 + 1] = clipBoxes ? max(min(bboxData[bboxId + 1], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 1]; // clipped bbox xmax nmsedBoxes[i * 4 + 2] = clipBoxes ? max(min(bboxData[bboxId + 2], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 2]; // clipped bbox ymax nmsedBoxes[i * 4 + 3] = clipBoxes ? max(min(bboxData[bboxId + 3], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 3]; atomicAdd(&numDetections[i / keepTopK], 1); } } } template <typename T_BBOX, typename T_SCORE> pluginStatus_t gatherNMSOutputs_gpu( hipStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const void* indices, const void* scores, const void* bboxData, void* numDetections, void* nmsedBoxes, void* nmsedScores, void* nmsedClasses, bool clipBoxes ) { hipMemsetAsync(numDetections, 0, numImages * sizeof(int), stream); const int BS = 32; const int GS = 32; hipLaunchKernelGGL(( gatherNMSOutputs_kernel<T_BBOX, T_SCORE, BS>), dim3(GS), dim3(BS), 0, stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, (int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData, (int*) numDetections, (T_BBOX*) nmsedBoxes, (T_BBOX*) nmsedScores, (T_BBOX*) nmsedClasses, clipBoxes ); CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // gatherNMSOutputs LAUNCH CONFIG {{{ typedef pluginStatus_t (*nmsOutFunc)(hipStream_t, const bool, const int, const int, const int, const int, const int, const void*, const void*, const void*, void*, void*, void*, void*, bool); struct nmsOutLaunchConfig { DataType t_bbox; DataType t_score; nmsOutFunc function; nmsOutLaunchConfig(DataType t_bbox, DataType t_score) : t_bbox(t_bbox) , t_score(t_score) { } nmsOutLaunchConfig(DataType t_bbox, DataType t_score, nmsOutFunc function) : t_bbox(t_bbox) , t_score(t_score) , function(function) { } bool operator==(const nmsOutLaunchConfig& other) { return t_bbox == other.t_bbox && t_score == other.t_score; } }; using nvinfer1::DataType; static std::vector<nmsOutLaunchConfig> nmsOutFuncVec; bool nmsOutputInit() { nmsOutFuncVec.push_back(nmsOutLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, gatherNMSOutputs_gpu<float, float>)); return true; } static bool initialized = nmsOutputInit(); //}}} pluginStatus_t gatherNMSOutputs( hipStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const DataType DT_BBOX, const DataType DT_SCORE, const void* indices, const void* scores, const void* bboxData, void* numDetections, void* nmsedBoxes, void* nmsedScores, void* nmsedClasses, bool clipBoxes ) { nmsOutLaunchConfig lc = nmsOutLaunchConfig(DT_BBOX, DT_SCORE); for (unsigned i = 0; i < nmsOutFuncVec.size(); ++i) { if (lc == nmsOutFuncVec[i]) { DEBUG_PRINTF("gatherNMSOutputs kernel %d\n", i); return nmsOutFuncVec[i].function(stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, indices, scores, bboxData, numDetections, nmsedBoxes, nmsedScores, nmsedClasses, clipBoxes ); } } return STATUS_BAD_PARAM; }
f776cd0e1701b4e7f7ebb65d81502ce112b79bb6.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel.h" #include "plugin.h" #include "gatherNMSOutputs.h" #include <vector> template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void gatherNMSOutputs_kernel( const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const int* indices, const T_SCORE* scores, const T_BBOX* bboxData, int* numDetections, T_BBOX* nmsedBoxes, T_BBOX* nmsedScores, T_BBOX* nmsedClasses, bool clipBoxes ) { if (keepTopK > topK) return; for (int i = blockIdx.x * nthds_per_cta + threadIdx.x; i < numImages * keepTopK; i += gridDim.x * nthds_per_cta) { const int imgId = i / keepTopK; const int detId = i % keepTopK; const int offset = imgId * numClasses * topK; const int index = indices[offset + detId]; const T_SCORE score = scores[offset + detId]; if (index == -1) { nmsedClasses[i] = -1; nmsedScores[i] = 0; nmsedBoxes[i * 4] = 0; nmsedBoxes[i * 4 + 1] = 0; nmsedBoxes[i * 4 + 2] = 0; nmsedBoxes[i * 4 + 3] = 0; } else { const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass)); const int bboxId = ((shareLocation ? (index % numPredsPerClass) : index % (numClasses * numPredsPerClass)) + bboxOffset) * 4; nmsedClasses[i] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label nmsedScores[i] = score; // confidence score // clipped bbox xmin nmsedBoxes[i * 4] = clipBoxes ? max(min(bboxData[bboxId], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId]; // clipped bbox ymin nmsedBoxes[i * 4 + 1] = clipBoxes ? max(min(bboxData[bboxId + 1], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 1]; // clipped bbox xmax nmsedBoxes[i * 4 + 2] = clipBoxes ? max(min(bboxData[bboxId + 2], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 2]; // clipped bbox ymax nmsedBoxes[i * 4 + 3] = clipBoxes ? max(min(bboxData[bboxId + 3], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 3]; atomicAdd(&numDetections[i / keepTopK], 1); } } } template <typename T_BBOX, typename T_SCORE> pluginStatus_t gatherNMSOutputs_gpu( cudaStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const void* indices, const void* scores, const void* bboxData, void* numDetections, void* nmsedBoxes, void* nmsedScores, void* nmsedClasses, bool clipBoxes ) { cudaMemsetAsync(numDetections, 0, numImages * sizeof(int), stream); const int BS = 32; const int GS = 32; gatherNMSOutputs_kernel<T_BBOX, T_SCORE, BS><<<GS, BS, 0, stream>>>(shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, (int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData, (int*) numDetections, (T_BBOX*) nmsedBoxes, (T_BBOX*) nmsedScores, (T_BBOX*) nmsedClasses, clipBoxes ); CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // gatherNMSOutputs LAUNCH CONFIG {{{ typedef pluginStatus_t (*nmsOutFunc)(cudaStream_t, const bool, const int, const int, const int, const int, const int, const void*, const void*, const void*, void*, void*, void*, void*, bool); struct nmsOutLaunchConfig { DataType t_bbox; DataType t_score; nmsOutFunc function; nmsOutLaunchConfig(DataType t_bbox, DataType t_score) : t_bbox(t_bbox) , t_score(t_score) { } nmsOutLaunchConfig(DataType t_bbox, DataType t_score, nmsOutFunc function) : t_bbox(t_bbox) , t_score(t_score) , function(function) { } bool operator==(const nmsOutLaunchConfig& other) { return t_bbox == other.t_bbox && t_score == other.t_score; } }; using nvinfer1::DataType; static std::vector<nmsOutLaunchConfig> nmsOutFuncVec; bool nmsOutputInit() { nmsOutFuncVec.push_back(nmsOutLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, gatherNMSOutputs_gpu<float, float>)); return true; } static bool initialized = nmsOutputInit(); //}}} pluginStatus_t gatherNMSOutputs( cudaStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const DataType DT_BBOX, const DataType DT_SCORE, const void* indices, const void* scores, const void* bboxData, void* numDetections, void* nmsedBoxes, void* nmsedScores, void* nmsedClasses, bool clipBoxes ) { nmsOutLaunchConfig lc = nmsOutLaunchConfig(DT_BBOX, DT_SCORE); for (unsigned i = 0; i < nmsOutFuncVec.size(); ++i) { if (lc == nmsOutFuncVec[i]) { DEBUG_PRINTF("gatherNMSOutputs kernel %d\n", i); return nmsOutFuncVec[i].function(stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, indices, scores, bboxData, numDetections, nmsedBoxes, nmsedScores, nmsedClasses, clipBoxes ); } } return STATUS_BAD_PARAM; }
6ce3fbddd1e32688e6468449153545bebce39b98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016 Kristofer Bjrnson * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** @file ChebyshevSolver.cu * * @author Kristofer Bjrnson */ #include "ChebyshevSolver.h" #include "HALinkedList.h" #include "TBTKMacros.h" #include "GPUResourceManager.h" #include "Streams.h" #include <hip/hip_complex.h> #include <cusparse_v2.h> #include <math.h> using namespace std; namespace TBTK{ complex<double> minus_one(-1., 0.); complex<double> one(1., 0.); complex<double> two(2., 0.); complex<double> zero(0., 0.); complex<double> i(0., 1.); __global__ void extractCoefficients( hipDoubleComplex *jResult, int basisSize, hipDoubleComplex *coefficients, int currentCoefficient, int *coefficientMap, int numCoefficients ){ int to = blockIdx.x*blockDim.x + threadIdx.x; if(to < basisSize && coefficientMap[to] != -1) coefficients[coefficientMap[to]*numCoefficients + currentCoefficient] = jResult[to]; } void ChebyshevSolver::calculateCoefficientsGPU( Index to, Index from, complex<double> *coefficients, int numCoefficients, double broadening ){ vector<Index> toVector; toVector.push_back(to); calculateCoefficientsGPU(toVector, from, coefficients, numCoefficients, broadening); } void ChebyshevSolver::calculateCoefficientsGPU( vector<Index> &to, Index from, complex<double> *coefficients, int numCoefficients, double broadening ){ TBTKAssert( model != NULL, "ChebyshevSolver::calculateCoefficientsGPU()", "Model not set", "Use ChebyshevSolver::setModel() to set model." ); TBTKAssert( scaleFactor > 0, "ChebyshevSolver::calculateCoefficientsGPU()", "Scale factor must be larger than zero.", "Use ChebyshevSolver::setScaleFactor() to set scale factor." ); TBTKAssert( numCoefficients > 0, "ChebyshevSolver::calculateCoefficients()", "numCoefficients has to be larger than zero.", "" ); // int device = allocateDeviceGPU(); int device = GPUResourceManager::getInstance().allocateDevice(); TBTKAssert( hipSetDevice(device) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA set device error for device " << device << ".", "" ); AmplitudeSet *amplitudeSet = model->getAmplitudeSet(); int fromBasisIndex = amplitudeSet->getBasisIndex(from); int *coefficientMap = new int[amplitudeSet->getBasisSize()]; for(int n = 0; n < amplitudeSet->getBasisSize(); n++) coefficientMap[n] = -1; for(int n = 0; n < to.size(); n++) coefficientMap[amplitudeSet->getBasisIndex(to.at(n))] = n; if(isTalkative){ Streams::out << "ChebyshevSolver::calculateCoefficientsGPU\n"; Streams::out << "\tFrom Index: " << fromBasisIndex << "\n"; Streams::out << "\tBasis size: " << amplitudeSet->getBasisSize() << "\n"; Streams::out << "\tUsing damping: "; if(damping != NULL) Streams::out << "Yes\n"; else Streams::out << "No\n"; } complex<double> *jIn1 = new complex<double>[amplitudeSet->getBasisSize()]; complex<double> *jIn2 = new complex<double>[amplitudeSet->getBasisSize()]; complex<double> *jTemp = NULL; for(int n = 0; n < amplitudeSet->getBasisSize(); n++){ jIn1[n] = 0.; jIn2[n] = 0.; } //Set up initial state (|j0>) jIn1[fromBasisIndex] = 1.; for(int n = 0; n < amplitudeSet->getBasisSize(); n++) if(coefficientMap[n] != -1) coefficients[coefficientMap[n]*numCoefficients] = jIn1[n]; const int numHoppingAmplitudes = amplitudeSet->getNumMatrixElements(); const int *cooHARowIndices_host = amplitudeSet->getCOORowIndices(); const int *cooHAColIndices_host = amplitudeSet->getCOOColIndices(); const complex<double> *cooHAValues_host = amplitudeSet->getCOOValues(); //Initialize GPU complex<double> *jIn1_device; complex<double> *jIn2_device; int *cooHARowIndices_device; int *csrHARowIndices_device; int *cooHAColIndices_device; complex<double> *cooHAValues_device; complex<double> *coefficients_device; int *coefficientMap_device; complex<double> *damping_device = NULL; int totalMemoryRequirement = amplitudeSet->getBasisSize()*sizeof(complex<double>); totalMemoryRequirement += amplitudeSet->getBasisSize()*sizeof(complex<double>); totalMemoryRequirement += numHoppingAmplitudes*sizeof(int); totalMemoryRequirement += amplitudeSet->getBasisSize()*sizeof(int); totalMemoryRequirement += numHoppingAmplitudes*sizeof(int); totalMemoryRequirement += numHoppingAmplitudes*sizeof(complex<double>); totalMemoryRequirement += to.size()*numCoefficients*sizeof(complex<double>); totalMemoryRequirement += amplitudeSet->getBasisSize()*sizeof(int); if(damping != NULL) totalMemoryRequirement += amplitudeSet->getBasisSize()*sizeof(complex<double>); if(isTalkative){ Streams::out << "\tCUDA memory requirement: "; if(totalMemoryRequirement < 1024) Streams::out << totalMemoryRequirement/1024 << "B\n"; else if(totalMemoryRequirement < 1024*1024) Streams::out << totalMemoryRequirement/1024 << "KB\n"; else Streams::out << totalMemoryRequirement/1024/1024 << "MB\n"; } TBTKAssert( hipMalloc( (void**)&jIn1_device, amplitudeSet->getBasisSize()*sizeof(complex<double>) ) == hipSuccess, "ChebyshevSOlver::calculateCoefficientsGPU()", "CUDA malloc error while allocating jIn1_device.", "" ); TBTKAssert( hipMalloc( (void**)&jIn2_device, amplitudeSet->getBasisSize()*sizeof(complex<double>) ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating jIn2_device.", "" ); TBTKAssert( hipMalloc( (void**)&cooHARowIndices_device, numHoppingAmplitudes*sizeof(int) ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating cooHARowIndices_device.", "" ); TBTKAssert( hipMalloc( (void**)&csrHARowIndices_device, (amplitudeSet->getBasisSize()+1)*sizeof(int) ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating csrHARowIndices_device.", "" ); TBTKAssert( hipMalloc( (void**)&cooHAColIndices_device, numHoppingAmplitudes*sizeof(int) ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating cooHAColIndices_device.", "" ); TBTKAssert( hipMalloc( (void**)&cooHAValues_device, numHoppingAmplitudes*sizeof(complex<double>) ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating cooHAValues_device.", "" ) TBTKAssert( hipMalloc( (void**)&coefficients_device, to.size()*numCoefficients*sizeof(complex<double>) ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating coefficients_device.", "" ); TBTKAssert( hipMalloc( (void**)&coefficientMap_device, amplitudeSet->getBasisSize()*sizeof(int) ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating coefficientMap_device.", "" ); if(damping != NULL){ TBTKAssert( hipMalloc( (void**)&damping_device, amplitudeSet->getBasisSize()*sizeof(complex<double>) ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating damping_device.", "" ); } TBTKAssert( hipMemcpy( jIn1_device, jIn1, amplitudeSet->getBasisSize()*sizeof(complex<double>), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying jIn1.", "" ); TBTKAssert( hipMemcpy( jIn2_device, jIn2, amplitudeSet->getBasisSize()*sizeof(complex<double>), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying jIn2.", "" ); TBTKAssert( hipMemcpy( cooHARowIndices_device, cooHARowIndices_host, numHoppingAmplitudes*sizeof(int), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying cooHARowIndices.", "" ); TBTKAssert( hipMemcpy( cooHAColIndices_device, cooHAColIndices_host, numHoppingAmplitudes*sizeof(int), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::calculateCoefficients()", "CUDA memcpy error while copying cooHAColIndices.", "" ) TBTKAssert( hipMemcpy( cooHAValues_device, cooHAValues_host, numHoppingAmplitudes*sizeof(complex<double>), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying cooHAValues.", "" ); TBTKAssert( hipMemcpy( coefficients_device, coefficients, to.size()*numCoefficients*sizeof(complex<double>), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::calculateCoefficients()", "CUDA memcpy error while copying coefficients.", "" ) TBTKAssert( hipMemcpy( coefficientMap_device, coefficientMap, amplitudeSet->getBasisSize()*sizeof(int), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying coefficientMap.", "" ); if(damping != NULL){ TBTKAssert( hipMemcpy( damping_device, damping, amplitudeSet->getBasisSize()*sizeof(complex<double>), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying damping.", "" ); } hipsparseHandle_t handle = NULL; TBTKAssert( hipsparseCreate(&handle) == HIPSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE create error.", "" ); hipsparseMatDescr_t descr = NULL; TBTKAssert( hipsparseCreateMatDescr(&descr) == HIPSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE create matrix descriptor error.", "" ); TBTKAssert( hipsparseSetMatType( descr, HIPSPARSE_MATRIX_TYPE_GENERAL ) == HIPSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE set matrix type error.", "" ); TBTKAssert( hipsparseSetMatIndexBase( descr, HIPSPARSE_INDEX_BASE_ZERO ) == HIPSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE set matrix index base error.", "" ); TBTKAssert( hipsparseXcoo2csr( handle, cooHARowIndices_device, numHoppingAmplitudes, amplitudeSet->getBasisSize(), csrHARowIndices_device, HIPSPARSE_INDEX_BASE_ZERO ) == HIPSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE COO to CSR error.", "" ); //Calculate |j1> int block_size = 1024; int num_blocks = amplitudeSet->getBasisSize()/block_size + (amplitudeSet->getBasisSize()%block_size == 0 ? 0:1); if(isTalkative){ Streams::out << "\tCUDA Block size: " << block_size << "\n"; Streams::out << "\tCUDA Num blocks: " << num_blocks << "\n"; } complex<double> multiplier = one/scaleFactor; TBTKAssert( hipsparseZcsrmv( handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, amplitudeSet->getBasisSize(), amplitudeSet->getBasisSize(), numHoppingAmplitudes, (hipDoubleComplex*)&multiplier, descr, (hipDoubleComplex*)cooHAValues_device, csrHARowIndices_device, cooHAColIndices_device, (hipDoubleComplex*)jIn1_device, (hipDoubleComplex*)&zero, (hipDoubleComplex*)jIn2_device ) == HIPSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficentsGPU()", "Matrix-vector multiplication error.", "" ); hipLaunchKernelGGL(( extractCoefficients) , dim3(num_blocks), dim3(block_size) , 0, 0, (hipDoubleComplex*)jIn2_device, amplitudeSet->getBasisSize(), (hipDoubleComplex*)coefficients_device, 1, coefficientMap_device, numCoefficients); jTemp = jIn2_device; jIn2_device = jIn1_device; jIn1_device = jTemp; if(isTalkative) Streams::out << "\tProgress (100 coefficients per dot): "; //Iteratively calculate |jn> and corresponding Chebyshev coefficients. for(int n = 2; n < numCoefficients; n++){ multiplier = two/scaleFactor; TBTKAssert( hipsparseZcsrmv( handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, amplitudeSet->getBasisSize(), amplitudeSet->getBasisSize(), numHoppingAmplitudes, (hipDoubleComplex*)&multiplier, descr, (hipDoubleComplex*)cooHAValues_device, csrHARowIndices_device, cooHAColIndices_device, (hipDoubleComplex*)jIn1_device, (hipDoubleComplex*)&minus_one, (hipDoubleComplex*)jIn2_device ) == HIPSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "Matrix-vector multiplication error.", "" ); hipLaunchKernelGGL(( extractCoefficients) , dim3(num_blocks), dim3(block_size) , 0, 0, (hipDoubleComplex*)jIn2_device, amplitudeSet->getBasisSize(), (hipDoubleComplex*)coefficients_device, n, coefficientMap_device, numCoefficients); jTemp = jIn2_device; jIn2_device = jIn1_device; jIn1_device = jTemp; if(isTalkative){ if(n%100 == 0) Streams::out << "." << flush; if(n%1000 == 0) Streams::out << " " << flush; } } if(isTalkative) Streams::out << "\n"; TBTKAssert( hipMemcpy( coefficients, coefficients_device, to.size()*numCoefficients*sizeof(complex<double>), hipMemcpyDeviceToHost ) == hipSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying coefficients.", "" ); TBTKAssert( hipsparseDestroyMatDescr(descr) == HIPSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE destroy matrix descriptor error.", "" ); descr = NULL; TBTKAssert( hipsparseDestroy(handle) == HIPSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE destroy error.", "" ); handle = NULL; delete [] jIn1; delete [] jIn2; delete [] coefficientMap; hipFree(jIn1_device); hipFree(jIn2_device); hipFree(cooHARowIndices_device); hipFree(csrHARowIndices_device); hipFree(cooHAColIndices_device); hipFree(cooHAValues_device); hipFree(coefficients_device); hipFree(coefficientMap_device); if(damping != NULL) hipFree(damping_device); // freeDeviceGPU(device); GPUResourceManager::getInstance().freeDevice(device); //Lorentzian convolution double lambda = broadening*numCoefficients; for(int n = 0; n < numCoefficients; n++) for(int c = 0; c < to.size(); c++) coefficients[n + c*numCoefficients] = coefficients[n + c*numCoefficients]*sinh(lambda*(1 - n/(double)numCoefficients))/sinh(lambda); } __global__ void calculateGreensFunction( hipDoubleComplex *greensFunction, hipDoubleComplex *coefficients, hipDoubleComplex *lookupTable, int numCoefficients, int energyResolution ){ int e = blockIdx.x*blockDim.x + threadIdx.x; if(e < energyResolution) for(int n = 0; n < numCoefficients; n++) greensFunction[e] = cuCadd(greensFunction[e], cuCmul(lookupTable[n*energyResolution + e], coefficients[n])); // greensFunction[e] += lookupTable[n*energyResolution + e]*coefficients[n]; } void ChebyshevSolver::loadLookupTableGPU(){ if(isTalkative) Streams::out << "CheyshevSolver::loadLookupTableGPU\n"; TBTKAssert( generatingFunctionLookupTable != NULL, "ChebyshevSolver::loadLookupTableGPU()", "Lookup table has not been generated.", "Call ChebyshevSolver::generateLokupTable() to generate lookup table." ); TBTKAssert( generatingFunctionLookupTable_device == NULL, "ChebyshevSolver::loadLookupTableGPU()", "Lookup table already loaded.", "" ); complex<double> *generatingFunctionLookupTable_host = new complex<double>[lookupTableNumCoefficients*lookupTableResolution]; for(int n = 0; n < lookupTableNumCoefficients; n++) for(int e = 0; e < lookupTableResolution; e++) generatingFunctionLookupTable_host[n*lookupTableResolution + e] = generatingFunctionLookupTable[n][e]; int memoryRequirement = lookupTableNumCoefficients*lookupTableResolution*sizeof(complex<double>); if(isTalkative){ Streams::out << "\tCUDA memory requirement: "; if(memoryRequirement < 1024) Streams::out << memoryRequirement << "B\n"; else if(memoryRequirement < 1024*1024) Streams::out << memoryRequirement/1024 << "KB\n"; else Streams::out << memoryRequirement/1024/1024 << "MB\n"; } // generatingFunctionLookupTable_device = new complex<double>**[numDevices]; generatingFunctionLookupTable_device = new complex<double>**[GPUResourceManager::getInstance().getNumDevices()]; // for(int n = 0; n < numDevices; n++){ for(int n = 0; n < GPUResourceManager::getInstance().getNumDevices(); n++){ TBTKAssert( hipSetDevice(n) == hipSuccess, "ChebyshevSolver::loadLookupTableGPU()", "CUDA set device error for device " << n << ".", "" ); TBTKAssert( hipMalloc( (void**)&generatingFunctionLookupTable_device[n], lookupTableNumCoefficients*lookupTableResolution*sizeof(complex<double>) ) == hipSuccess, "ChebyshevSolver::loadLookupTableGPU()", "CUDA malloc error while allocating generatingFunctionLookupTable_device.", "" ); TBTKAssert( hipMemcpy( generatingFunctionLookupTable_device[n], generatingFunctionLookupTable_host, lookupTableNumCoefficients*lookupTableResolution*sizeof(complex<double>), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::loadLookupTableGPU()", "CUDA memcpy error while copying generatingFunctionLookupTable_device.", "" ); } delete [] generatingFunctionLookupTable_host; } void ChebyshevSolver::destroyLookupTableGPU(){ if(isTalkative) Streams::out << "ChebyshevSolver::destroyLookupTableGPU\n"; TBTKAssert( generatingFunctionLookupTable_device != NULL, "ChebyshevSolver::destroyLookupTableGPU()", "No lookup table loaded onto GPU.\n", "" ); // for(int n = 0; n < numDevices; n++){ for(int n = 0; n < GPUResourceManager::getInstance().getNumDevices(); n++){ hipFree(generatingFunctionLookupTable_device[n]); } delete [] generatingFunctionLookupTable_device; generatingFunctionLookupTable_device = NULL; } void ChebyshevSolver::generateGreensFunctionGPU( complex<double> *greensFunction, complex<double> *coefficients, GreensFunctionType type ){ // int device = allocateDeviceGPU(); int device = GPUResourceManager::getInstance().allocateDevice(); TBTKAssert( hipSetDevice(device) == hipSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA set device error for device " << device << ".", "" ); if(isTalkative) Streams::out << "ChebyshevSolver::generateGreensFunctionGPU\n"; TBTKAssert( generatingFunctionLookupTable_device != NULL, "ChebyshevSolver::generateGreensFunctionGPU()", "No lookup table loaded onto GPU.", "" ); TBTKAssert( type == GreensFunctionType::Retarded, "ChebyshevSolver::generateGreensFunctionGPU()", "Only evaluation of retarded Green's function is implemented for GPU so far.", "Use CPU evaluation instead." ); for(int e = 0; e < lookupTableResolution; e++) greensFunction[e] = 0.; complex<double> *greensFunction_device; complex<double> *coefficients_device; TBTKAssert( hipMalloc( (void**)&greensFunction_device, lookupTableResolution*sizeof(complex<double>) ) == hipSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA malloc error while allocating greensFunction_device.", "" ); TBTKAssert( hipMalloc( (void**)&coefficients_device, lookupTableNumCoefficients*sizeof(complex<double>) ) == hipSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA malloc error while allocating coefficients_device.", "" ); TBTKAssert( hipMemcpy( greensFunction_device, greensFunction, lookupTableResolution*sizeof(complex<double>), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA memcpy error while copying greensFunction.", "" ); TBTKAssert( hipMemcpy( coefficients_device, coefficients, lookupTableNumCoefficients*sizeof(complex<double>), hipMemcpyHostToDevice ) == hipSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA memcpy error while copying coefficients.", "" ); int block_size = 1024; int num_blocks = lookupTableResolution/block_size + (lookupTableResolution%block_size == 0 ? 0:1); if(isTalkative){ Streams::out << "\tCUDA Block size: " << block_size << "\n"; Streams::out << "\tCUDA Num blocks: " << num_blocks << "\n"; } hipLaunchKernelGGL(( calculateGreensFunction) , dim3(num_blocks), dim3(block_size), 0, 0, (hipDoubleComplex*)greensFunction_device, (hipDoubleComplex*)coefficients_device, (hipDoubleComplex*)generatingFunctionLookupTable_device[device], lookupTableNumCoefficients, lookupTableResolution); TBTKAssert( hipMemcpy( greensFunction, greensFunction_device, lookupTableResolution*sizeof(complex<double>), hipMemcpyDeviceToHost ) == hipSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA memcpy error while copying greensFunction_device.", "" ); hipFree(greensFunction_device); hipFree(coefficients_device); // freeDeviceGPU(device); GPUResourceManager::getInstance().freeDevice(device); } /*void ChebyshevSolver::createDeviceTableGPU(){ hipGetDeviceCount(&numDevices); Streams::out << "Num GPU devices: " << numDevices << "\n"; if(numDevices > 0){ busyDevices = new bool[numDevices]; for(int n = 0; n < numDevices; n++) busyDevices[n] = false; } } void ChebyshevSolver::destroyDeviceTableGPU(){ if(numDevices > 0) delete [] busyDevices; } int ChebyshevSolver::allocateDeviceGPU(){ int device = 0; bool done = false; while(!done){ omp_set_lock(&busyDevicesLock); #pragma omp flush { for(int n = 0; n < numDevices; n++){ if(!busyDevices[n]){ device = n; busyDevices[n] = true; done = true; break; } } } #pragma omp flush omp_unset_lock(&busyDevicesLock); } return device; } void ChebyshevSolver::freeDeviceGPU(int device){ omp_set_lock(&busyDevicesLock); #pragma omp flush { busyDevices[device] = false; } #pragma omp flush omp_unset_lock(&busyDevicesLock); }*/ }; //End of namespace TBTK
6ce3fbddd1e32688e6468449153545bebce39b98.cu
/* Copyright 2016 Kristofer Björnson * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** @file ChebyshevSolver.cu * * @author Kristofer Björnson */ #include "ChebyshevSolver.h" #include "HALinkedList.h" #include "TBTKMacros.h" #include "GPUResourceManager.h" #include "Streams.h" #include <cuComplex.h> #include <cusparse_v2.h> #include <math.h> using namespace std; namespace TBTK{ complex<double> minus_one(-1., 0.); complex<double> one(1., 0.); complex<double> two(2., 0.); complex<double> zero(0., 0.); complex<double> i(0., 1.); __global__ void extractCoefficients( cuDoubleComplex *jResult, int basisSize, cuDoubleComplex *coefficients, int currentCoefficient, int *coefficientMap, int numCoefficients ){ int to = blockIdx.x*blockDim.x + threadIdx.x; if(to < basisSize && coefficientMap[to] != -1) coefficients[coefficientMap[to]*numCoefficients + currentCoefficient] = jResult[to]; } void ChebyshevSolver::calculateCoefficientsGPU( Index to, Index from, complex<double> *coefficients, int numCoefficients, double broadening ){ vector<Index> toVector; toVector.push_back(to); calculateCoefficientsGPU(toVector, from, coefficients, numCoefficients, broadening); } void ChebyshevSolver::calculateCoefficientsGPU( vector<Index> &to, Index from, complex<double> *coefficients, int numCoefficients, double broadening ){ TBTKAssert( model != NULL, "ChebyshevSolver::calculateCoefficientsGPU()", "Model not set", "Use ChebyshevSolver::setModel() to set model." ); TBTKAssert( scaleFactor > 0, "ChebyshevSolver::calculateCoefficientsGPU()", "Scale factor must be larger than zero.", "Use ChebyshevSolver::setScaleFactor() to set scale factor." ); TBTKAssert( numCoefficients > 0, "ChebyshevSolver::calculateCoefficients()", "numCoefficients has to be larger than zero.", "" ); // int device = allocateDeviceGPU(); int device = GPUResourceManager::getInstance().allocateDevice(); TBTKAssert( cudaSetDevice(device) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA set device error for device " << device << ".", "" ); AmplitudeSet *amplitudeSet = model->getAmplitudeSet(); int fromBasisIndex = amplitudeSet->getBasisIndex(from); int *coefficientMap = new int[amplitudeSet->getBasisSize()]; for(int n = 0; n < amplitudeSet->getBasisSize(); n++) coefficientMap[n] = -1; for(int n = 0; n < to.size(); n++) coefficientMap[amplitudeSet->getBasisIndex(to.at(n))] = n; if(isTalkative){ Streams::out << "ChebyshevSolver::calculateCoefficientsGPU\n"; Streams::out << "\tFrom Index: " << fromBasisIndex << "\n"; Streams::out << "\tBasis size: " << amplitudeSet->getBasisSize() << "\n"; Streams::out << "\tUsing damping: "; if(damping != NULL) Streams::out << "Yes\n"; else Streams::out << "No\n"; } complex<double> *jIn1 = new complex<double>[amplitudeSet->getBasisSize()]; complex<double> *jIn2 = new complex<double>[amplitudeSet->getBasisSize()]; complex<double> *jTemp = NULL; for(int n = 0; n < amplitudeSet->getBasisSize(); n++){ jIn1[n] = 0.; jIn2[n] = 0.; } //Set up initial state (|j0>) jIn1[fromBasisIndex] = 1.; for(int n = 0; n < amplitudeSet->getBasisSize(); n++) if(coefficientMap[n] != -1) coefficients[coefficientMap[n]*numCoefficients] = jIn1[n]; const int numHoppingAmplitudes = amplitudeSet->getNumMatrixElements(); const int *cooHARowIndices_host = amplitudeSet->getCOORowIndices(); const int *cooHAColIndices_host = amplitudeSet->getCOOColIndices(); const complex<double> *cooHAValues_host = amplitudeSet->getCOOValues(); //Initialize GPU complex<double> *jIn1_device; complex<double> *jIn2_device; int *cooHARowIndices_device; int *csrHARowIndices_device; int *cooHAColIndices_device; complex<double> *cooHAValues_device; complex<double> *coefficients_device; int *coefficientMap_device; complex<double> *damping_device = NULL; int totalMemoryRequirement = amplitudeSet->getBasisSize()*sizeof(complex<double>); totalMemoryRequirement += amplitudeSet->getBasisSize()*sizeof(complex<double>); totalMemoryRequirement += numHoppingAmplitudes*sizeof(int); totalMemoryRequirement += amplitudeSet->getBasisSize()*sizeof(int); totalMemoryRequirement += numHoppingAmplitudes*sizeof(int); totalMemoryRequirement += numHoppingAmplitudes*sizeof(complex<double>); totalMemoryRequirement += to.size()*numCoefficients*sizeof(complex<double>); totalMemoryRequirement += amplitudeSet->getBasisSize()*sizeof(int); if(damping != NULL) totalMemoryRequirement += amplitudeSet->getBasisSize()*sizeof(complex<double>); if(isTalkative){ Streams::out << "\tCUDA memory requirement: "; if(totalMemoryRequirement < 1024) Streams::out << totalMemoryRequirement/1024 << "B\n"; else if(totalMemoryRequirement < 1024*1024) Streams::out << totalMemoryRequirement/1024 << "KB\n"; else Streams::out << totalMemoryRequirement/1024/1024 << "MB\n"; } TBTKAssert( cudaMalloc( (void**)&jIn1_device, amplitudeSet->getBasisSize()*sizeof(complex<double>) ) == cudaSuccess, "ChebyshevSOlver::calculateCoefficientsGPU()", "CUDA malloc error while allocating jIn1_device.", "" ); TBTKAssert( cudaMalloc( (void**)&jIn2_device, amplitudeSet->getBasisSize()*sizeof(complex<double>) ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating jIn2_device.", "" ); TBTKAssert( cudaMalloc( (void**)&cooHARowIndices_device, numHoppingAmplitudes*sizeof(int) ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating cooHARowIndices_device.", "" ); TBTKAssert( cudaMalloc( (void**)&csrHARowIndices_device, (amplitudeSet->getBasisSize()+1)*sizeof(int) ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating csrHARowIndices_device.", "" ); TBTKAssert( cudaMalloc( (void**)&cooHAColIndices_device, numHoppingAmplitudes*sizeof(int) ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating cooHAColIndices_device.", "" ); TBTKAssert( cudaMalloc( (void**)&cooHAValues_device, numHoppingAmplitudes*sizeof(complex<double>) ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating cooHAValues_device.", "" ) TBTKAssert( cudaMalloc( (void**)&coefficients_device, to.size()*numCoefficients*sizeof(complex<double>) ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating coefficients_device.", "" ); TBTKAssert( cudaMalloc( (void**)&coefficientMap_device, amplitudeSet->getBasisSize()*sizeof(int) ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating coefficientMap_device.", "" ); if(damping != NULL){ TBTKAssert( cudaMalloc( (void**)&damping_device, amplitudeSet->getBasisSize()*sizeof(complex<double>) ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA malloc error while allocating damping_device.", "" ); } TBTKAssert( cudaMemcpy( jIn1_device, jIn1, amplitudeSet->getBasisSize()*sizeof(complex<double>), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying jIn1.", "" ); TBTKAssert( cudaMemcpy( jIn2_device, jIn2, amplitudeSet->getBasisSize()*sizeof(complex<double>), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying jIn2.", "" ); TBTKAssert( cudaMemcpy( cooHARowIndices_device, cooHARowIndices_host, numHoppingAmplitudes*sizeof(int), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying cooHARowIndices.", "" ); TBTKAssert( cudaMemcpy( cooHAColIndices_device, cooHAColIndices_host, numHoppingAmplitudes*sizeof(int), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::calculateCoefficients()", "CUDA memcpy error while copying cooHAColIndices.", "" ) TBTKAssert( cudaMemcpy( cooHAValues_device, cooHAValues_host, numHoppingAmplitudes*sizeof(complex<double>), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying cooHAValues.", "" ); TBTKAssert( cudaMemcpy( coefficients_device, coefficients, to.size()*numCoefficients*sizeof(complex<double>), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::calculateCoefficients()", "CUDA memcpy error while copying coefficients.", "" ) TBTKAssert( cudaMemcpy( coefficientMap_device, coefficientMap, amplitudeSet->getBasisSize()*sizeof(int), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying coefficientMap.", "" ); if(damping != NULL){ TBTKAssert( cudaMemcpy( damping_device, damping, amplitudeSet->getBasisSize()*sizeof(complex<double>), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying damping.", "" ); } cusparseHandle_t handle = NULL; TBTKAssert( cusparseCreate(&handle) == CUSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE create error.", "" ); cusparseMatDescr_t descr = NULL; TBTKAssert( cusparseCreateMatDescr(&descr) == CUSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE create matrix descriptor error.", "" ); TBTKAssert( cusparseSetMatType( descr, CUSPARSE_MATRIX_TYPE_GENERAL ) == CUSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE set matrix type error.", "" ); TBTKAssert( cusparseSetMatIndexBase( descr, CUSPARSE_INDEX_BASE_ZERO ) == CUSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE set matrix index base error.", "" ); TBTKAssert( cusparseXcoo2csr( handle, cooHARowIndices_device, numHoppingAmplitudes, amplitudeSet->getBasisSize(), csrHARowIndices_device, CUSPARSE_INDEX_BASE_ZERO ) == CUSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE COO to CSR error.", "" ); //Calculate |j1> int block_size = 1024; int num_blocks = amplitudeSet->getBasisSize()/block_size + (amplitudeSet->getBasisSize()%block_size == 0 ? 0:1); if(isTalkative){ Streams::out << "\tCUDA Block size: " << block_size << "\n"; Streams::out << "\tCUDA Num blocks: " << num_blocks << "\n"; } complex<double> multiplier = one/scaleFactor; TBTKAssert( cusparseZcsrmv( handle, CUSPARSE_OPERATION_NON_TRANSPOSE, amplitudeSet->getBasisSize(), amplitudeSet->getBasisSize(), numHoppingAmplitudes, (cuDoubleComplex*)&multiplier, descr, (cuDoubleComplex*)cooHAValues_device, csrHARowIndices_device, cooHAColIndices_device, (cuDoubleComplex*)jIn1_device, (cuDoubleComplex*)&zero, (cuDoubleComplex*)jIn2_device ) == CUSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficentsGPU()", "Matrix-vector multiplication error.", "" ); extractCoefficients <<< num_blocks, block_size >>> ((cuDoubleComplex*)jIn2_device, amplitudeSet->getBasisSize(), (cuDoubleComplex*)coefficients_device, 1, coefficientMap_device, numCoefficients); jTemp = jIn2_device; jIn2_device = jIn1_device; jIn1_device = jTemp; if(isTalkative) Streams::out << "\tProgress (100 coefficients per dot): "; //Iteratively calculate |jn> and corresponding Chebyshev coefficients. for(int n = 2; n < numCoefficients; n++){ multiplier = two/scaleFactor; TBTKAssert( cusparseZcsrmv( handle, CUSPARSE_OPERATION_NON_TRANSPOSE, amplitudeSet->getBasisSize(), amplitudeSet->getBasisSize(), numHoppingAmplitudes, (cuDoubleComplex*)&multiplier, descr, (cuDoubleComplex*)cooHAValues_device, csrHARowIndices_device, cooHAColIndices_device, (cuDoubleComplex*)jIn1_device, (cuDoubleComplex*)&minus_one, (cuDoubleComplex*)jIn2_device ) == CUSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "Matrix-vector multiplication error.", "" ); extractCoefficients <<< num_blocks, block_size >>> ((cuDoubleComplex*)jIn2_device, amplitudeSet->getBasisSize(), (cuDoubleComplex*)coefficients_device, n, coefficientMap_device, numCoefficients); jTemp = jIn2_device; jIn2_device = jIn1_device; jIn1_device = jTemp; if(isTalkative){ if(n%100 == 0) Streams::out << "." << flush; if(n%1000 == 0) Streams::out << " " << flush; } } if(isTalkative) Streams::out << "\n"; TBTKAssert( cudaMemcpy( coefficients, coefficients_device, to.size()*numCoefficients*sizeof(complex<double>), cudaMemcpyDeviceToHost ) == cudaSuccess, "ChebyshevSolver::calculateCoefficientsGPU()", "CUDA memcpy error while copying coefficients.", "" ); TBTKAssert( cusparseDestroyMatDescr(descr) == CUSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE destroy matrix descriptor error.", "" ); descr = NULL; TBTKAssert( cusparseDestroy(handle) == CUSPARSE_STATUS_SUCCESS, "ChebyshevSolver::calculateCoefficientsGPU()", "cuSPARSE destroy error.", "" ); handle = NULL; delete [] jIn1; delete [] jIn2; delete [] coefficientMap; cudaFree(jIn1_device); cudaFree(jIn2_device); cudaFree(cooHARowIndices_device); cudaFree(csrHARowIndices_device); cudaFree(cooHAColIndices_device); cudaFree(cooHAValues_device); cudaFree(coefficients_device); cudaFree(coefficientMap_device); if(damping != NULL) cudaFree(damping_device); // freeDeviceGPU(device); GPUResourceManager::getInstance().freeDevice(device); //Lorentzian convolution double lambda = broadening*numCoefficients; for(int n = 0; n < numCoefficients; n++) for(int c = 0; c < to.size(); c++) coefficients[n + c*numCoefficients] = coefficients[n + c*numCoefficients]*sinh(lambda*(1 - n/(double)numCoefficients))/sinh(lambda); } __global__ void calculateGreensFunction( cuDoubleComplex *greensFunction, cuDoubleComplex *coefficients, cuDoubleComplex *lookupTable, int numCoefficients, int energyResolution ){ int e = blockIdx.x*blockDim.x + threadIdx.x; if(e < energyResolution) for(int n = 0; n < numCoefficients; n++) greensFunction[e] = cuCadd(greensFunction[e], cuCmul(lookupTable[n*energyResolution + e], coefficients[n])); // greensFunction[e] += lookupTable[n*energyResolution + e]*coefficients[n]; } void ChebyshevSolver::loadLookupTableGPU(){ if(isTalkative) Streams::out << "CheyshevSolver::loadLookupTableGPU\n"; TBTKAssert( generatingFunctionLookupTable != NULL, "ChebyshevSolver::loadLookupTableGPU()", "Lookup table has not been generated.", "Call ChebyshevSolver::generateLokupTable() to generate lookup table." ); TBTKAssert( generatingFunctionLookupTable_device == NULL, "ChebyshevSolver::loadLookupTableGPU()", "Lookup table already loaded.", "" ); complex<double> *generatingFunctionLookupTable_host = new complex<double>[lookupTableNumCoefficients*lookupTableResolution]; for(int n = 0; n < lookupTableNumCoefficients; n++) for(int e = 0; e < lookupTableResolution; e++) generatingFunctionLookupTable_host[n*lookupTableResolution + e] = generatingFunctionLookupTable[n][e]; int memoryRequirement = lookupTableNumCoefficients*lookupTableResolution*sizeof(complex<double>); if(isTalkative){ Streams::out << "\tCUDA memory requirement: "; if(memoryRequirement < 1024) Streams::out << memoryRequirement << "B\n"; else if(memoryRequirement < 1024*1024) Streams::out << memoryRequirement/1024 << "KB\n"; else Streams::out << memoryRequirement/1024/1024 << "MB\n"; } // generatingFunctionLookupTable_device = new complex<double>**[numDevices]; generatingFunctionLookupTable_device = new complex<double>**[GPUResourceManager::getInstance().getNumDevices()]; // for(int n = 0; n < numDevices; n++){ for(int n = 0; n < GPUResourceManager::getInstance().getNumDevices(); n++){ TBTKAssert( cudaSetDevice(n) == cudaSuccess, "ChebyshevSolver::loadLookupTableGPU()", "CUDA set device error for device " << n << ".", "" ); TBTKAssert( cudaMalloc( (void**)&generatingFunctionLookupTable_device[n], lookupTableNumCoefficients*lookupTableResolution*sizeof(complex<double>) ) == cudaSuccess, "ChebyshevSolver::loadLookupTableGPU()", "CUDA malloc error while allocating generatingFunctionLookupTable_device.", "" ); TBTKAssert( cudaMemcpy( generatingFunctionLookupTable_device[n], generatingFunctionLookupTable_host, lookupTableNumCoefficients*lookupTableResolution*sizeof(complex<double>), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::loadLookupTableGPU()", "CUDA memcpy error while copying generatingFunctionLookupTable_device.", "" ); } delete [] generatingFunctionLookupTable_host; } void ChebyshevSolver::destroyLookupTableGPU(){ if(isTalkative) Streams::out << "ChebyshevSolver::destroyLookupTableGPU\n"; TBTKAssert( generatingFunctionLookupTable_device != NULL, "ChebyshevSolver::destroyLookupTableGPU()", "No lookup table loaded onto GPU.\n", "" ); // for(int n = 0; n < numDevices; n++){ for(int n = 0; n < GPUResourceManager::getInstance().getNumDevices(); n++){ cudaFree(generatingFunctionLookupTable_device[n]); } delete [] generatingFunctionLookupTable_device; generatingFunctionLookupTable_device = NULL; } void ChebyshevSolver::generateGreensFunctionGPU( complex<double> *greensFunction, complex<double> *coefficients, GreensFunctionType type ){ // int device = allocateDeviceGPU(); int device = GPUResourceManager::getInstance().allocateDevice(); TBTKAssert( cudaSetDevice(device) == cudaSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA set device error for device " << device << ".", "" ); if(isTalkative) Streams::out << "ChebyshevSolver::generateGreensFunctionGPU\n"; TBTKAssert( generatingFunctionLookupTable_device != NULL, "ChebyshevSolver::generateGreensFunctionGPU()", "No lookup table loaded onto GPU.", "" ); TBTKAssert( type == GreensFunctionType::Retarded, "ChebyshevSolver::generateGreensFunctionGPU()", "Only evaluation of retarded Green's function is implemented for GPU so far.", "Use CPU evaluation instead." ); for(int e = 0; e < lookupTableResolution; e++) greensFunction[e] = 0.; complex<double> *greensFunction_device; complex<double> *coefficients_device; TBTKAssert( cudaMalloc( (void**)&greensFunction_device, lookupTableResolution*sizeof(complex<double>) ) == cudaSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA malloc error while allocating greensFunction_device.", "" ); TBTKAssert( cudaMalloc( (void**)&coefficients_device, lookupTableNumCoefficients*sizeof(complex<double>) ) == cudaSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA malloc error while allocating coefficients_device.", "" ); TBTKAssert( cudaMemcpy( greensFunction_device, greensFunction, lookupTableResolution*sizeof(complex<double>), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA memcpy error while copying greensFunction.", "" ); TBTKAssert( cudaMemcpy( coefficients_device, coefficients, lookupTableNumCoefficients*sizeof(complex<double>), cudaMemcpyHostToDevice ) == cudaSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA memcpy error while copying coefficients.", "" ); int block_size = 1024; int num_blocks = lookupTableResolution/block_size + (lookupTableResolution%block_size == 0 ? 0:1); if(isTalkative){ Streams::out << "\tCUDA Block size: " << block_size << "\n"; Streams::out << "\tCUDA Num blocks: " << num_blocks << "\n"; } calculateGreensFunction <<< num_blocks, block_size>>> ((cuDoubleComplex*)greensFunction_device, (cuDoubleComplex*)coefficients_device, (cuDoubleComplex*)generatingFunctionLookupTable_device[device], lookupTableNumCoefficients, lookupTableResolution); TBTKAssert( cudaMemcpy( greensFunction, greensFunction_device, lookupTableResolution*sizeof(complex<double>), cudaMemcpyDeviceToHost ) == cudaSuccess, "ChebyshevSolver::generateGreensFunctionGPU()", "CUDA memcpy error while copying greensFunction_device.", "" ); cudaFree(greensFunction_device); cudaFree(coefficients_device); // freeDeviceGPU(device); GPUResourceManager::getInstance().freeDevice(device); } /*void ChebyshevSolver::createDeviceTableGPU(){ cudaGetDeviceCount(&numDevices); Streams::out << "Num GPU devices: " << numDevices << "\n"; if(numDevices > 0){ busyDevices = new bool[numDevices]; for(int n = 0; n < numDevices; n++) busyDevices[n] = false; } } void ChebyshevSolver::destroyDeviceTableGPU(){ if(numDevices > 0) delete [] busyDevices; } int ChebyshevSolver::allocateDeviceGPU(){ int device = 0; bool done = false; while(!done){ omp_set_lock(&busyDevicesLock); #pragma omp flush { for(int n = 0; n < numDevices; n++){ if(!busyDevices[n]){ device = n; busyDevices[n] = true; done = true; break; } } } #pragma omp flush omp_unset_lock(&busyDevicesLock); } return device; } void ChebyshevSolver::freeDeviceGPU(int device){ omp_set_lock(&busyDevicesLock); #pragma omp flush { busyDevices[device] = false; } #pragma omp flush omp_unset_lock(&busyDevicesLock); }*/ }; //End of namespace TBTK
b77fa9ec4cd28a4550b94a059f576ab429a2cbca.hip
// !!! This is a file automatically generated by hipify!!! #include "RTFEM/GPU/GPUMMMultiplication.cuh" #include <rocblas.h> #include <hip/hip_runtime.h> #include <assert.h> #include <stdexcept> namespace rtfem { template <class T> GPUMMMultiplication<T>::GPUMMMultiplication(){} template <class T> GPUMMMultiplication<T>::~GPUMMMultiplication(){} template <class T> void GPUMMMultiplication<T>::Solve(const T* A, const T* B, T* C, T alpha, T beta, int m, int k, int n, MatrixOperation A_operation, MatrixOperation B_operation){ hipError_t cuda_error; hipblasStatus_t status; hipblasHandle_t handle; auto GetOperation = [](const MatrixOperation& operation){ switch(operation){ case MatrixOperation::None: return HIPBLAS_OP_N; case MatrixOperation::Transpose: return HIPBLAS_OP_T; default: return HIPBLAS_OP_N; } }; T *d_A = nullptr; T *d_B = nullptr; T *d_C = nullptr; cuda_error = hipMalloc((void **) &d_A, m * k * sizeof(*A)); assert(hipSuccess == cuda_error); cuda_error = hipMalloc((void **) &d_B, k * n * sizeof(*B)); assert(hipSuccess == cuda_error); cuda_error = hipMalloc((void **) &d_C, m * n * sizeof(*C)); assert(hipSuccess == cuda_error); status = hipblasCreate(&handle); assert(HIPBLAS_STATUS_SUCCESS == status); status = hipblasSetMatrix(m, k, sizeof(*A), A, m, d_A, m); assert(HIPBLAS_STATUS_SUCCESS == status); status = hipblasSetMatrix(k, n, sizeof(*B), B, k, d_B, k); assert(HIPBLAS_STATUS_SUCCESS == status); status = hipblasSetMatrix(m, n, sizeof(*C), C, m, d_C, m); assert(HIPBLAS_STATUS_SUCCESS == status); status = hipblasDgemm(handle, GetOperation(A_operation), GetOperation(B_operation), m, n, k, &alpha, d_A, m, d_B, k, &beta, d_C, m); assert(HIPBLAS_STATUS_SUCCESS == status); status = hipblasGetMatrix(m, n, sizeof(*C), d_C, m, C, m); assert(HIPBLAS_STATUS_SUCCESS == status); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipblasDestroy(handle); } template<> void GPUMMMultiplication<float>::Solve(const float *A, const float *B, float *C, float alpha, float beta, int m, int k, int n, MatrixOperation A_operation, MatrixOperation B_operation) { throw std::invalid_argument( "GPUMMMultiplication<float>::Solve not implemented"); } template class GPUMMMultiplication<double>; template class GPUMMMultiplication<float>; }
b77fa9ec4cd28a4550b94a059f576ab429a2cbca.cu
#include "RTFEM/GPU/GPUMMMultiplication.cuh" #include <cublas_v2.h> #include <cuda_runtime.h> #include <assert.h> #include <stdexcept> namespace rtfem { template <class T> GPUMMMultiplication<T>::GPUMMMultiplication(){} template <class T> GPUMMMultiplication<T>::~GPUMMMultiplication(){} template <class T> void GPUMMMultiplication<T>::Solve(const T* A, const T* B, T* C, T alpha, T beta, int m, int k, int n, MatrixOperation A_operation, MatrixOperation B_operation){ cudaError_t cuda_error; cublasStatus_t status; cublasHandle_t handle; auto GetOperation = [](const MatrixOperation& operation){ switch(operation){ case MatrixOperation::None: return CUBLAS_OP_N; case MatrixOperation::Transpose: return CUBLAS_OP_T; default: return CUBLAS_OP_N; } }; T *d_A = nullptr; T *d_B = nullptr; T *d_C = nullptr; cuda_error = cudaMalloc((void **) &d_A, m * k * sizeof(*A)); assert(cudaSuccess == cuda_error); cuda_error = cudaMalloc((void **) &d_B, k * n * sizeof(*B)); assert(cudaSuccess == cuda_error); cuda_error = cudaMalloc((void **) &d_C, m * n * sizeof(*C)); assert(cudaSuccess == cuda_error); status = cublasCreate(&handle); assert(CUBLAS_STATUS_SUCCESS == status); status = cublasSetMatrix(m, k, sizeof(*A), A, m, d_A, m); assert(CUBLAS_STATUS_SUCCESS == status); status = cublasSetMatrix(k, n, sizeof(*B), B, k, d_B, k); assert(CUBLAS_STATUS_SUCCESS == status); status = cublasSetMatrix(m, n, sizeof(*C), C, m, d_C, m); assert(CUBLAS_STATUS_SUCCESS == status); status = cublasDgemm(handle, GetOperation(A_operation), GetOperation(B_operation), m, n, k, &alpha, d_A, m, d_B, k, &beta, d_C, m); assert(CUBLAS_STATUS_SUCCESS == status); status = cublasGetMatrix(m, n, sizeof(*C), d_C, m, C, m); assert(CUBLAS_STATUS_SUCCESS == status); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cublasDestroy(handle); } template<> void GPUMMMultiplication<float>::Solve(const float *A, const float *B, float *C, float alpha, float beta, int m, int k, int n, MatrixOperation A_operation, MatrixOperation B_operation) { throw std::invalid_argument( "GPUMMMultiplication<float>::Solve not implemented"); } template class GPUMMMultiplication<double>; template class GPUMMMultiplication<float>; }
bf4ac96a24c1bb0386ef08a93fdc02dbaf93b7e6.hip
// !!! This is a file automatically generated by hipify!!! /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include <array/ConstantDataBuffer.h> #include <array/DataTypeUtils.h> #include <array/ShapeDescriptor.h> #include <hip/hip_runtime.h> #include <exceptions/cuda_exception.h> #include <exceptions/datatype_exception.h> #include <helpers/ConstantShapeHelper.h> #include <helpers/CudaLaunchHelper.h> #include <helpers/DebugHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeBuilders.h> #include <legacy/NativeOpExecutioner.h> #include <loops/broadcasting.h> #include <loops/broadcasting_bool.h> #include <loops/broadcasting_int.h> #include <loops/indexreduce.h> #include <loops/pairwise_bool.h> #include <loops/pairwise_int.h> #include <loops/pairwise_transform.h> #include <loops/random.h> #include <loops/reduce3.h> #include <loops/reduce_bool.h> #include <loops/reduce_float.h> #include <loops/reduce_long.h> #include <loops/reduce_same.h> #include <loops/scalar.h> #include <loops/scalar_bool.h> #include <loops/scalar_int.h> #include <loops/special_kernels.h> #include <loops/summarystatsreduce.h> #include <loops/transform_any.h> #include <loops/transform_bool.h> #include <loops/transform_float.h> #include <loops/transform_same.h> #include <loops/transform_strict.h> #include <system/op_boilerplate.h> using namespace sd; /** * This is utility kernel, that updates given special buffer with proper values in device memory */ extern "C" SD_KERNEL void prepareShapeBuffer(int* dimension, int* maxDimension, sd::LongType* specialPointer, int rows, sd::DataType dataType) { sd::LongType tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid > 0) return; dimension[0] = 0; maxDimension[0] = 1; specialPointer[0] = 2; specialPointer[1] = rows; specialPointer[2] = 1; specialPointer[3] = 1; specialPointer[4] = 1; specialPointer[5] = 0; specialPointer[6] = 1; specialPointer[7] = 99; ArrayOptions::setDataType(specialPointer, dataType); // printf("special[0]: [%lld]\n", (long long) specialPointer[0]); // shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execPairwiseTransform(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (xType != zType && yType != zType) throw std::runtime_error( "NativeOpExecutioner::execPairwiseTransform requires Z operand to have either X or Y type"); if (lc == nullptr) throw std::runtime_error("NativeOpExecutioner::execPairwiseTransform: launch context cannot be nullptr !"); if (stream == nullptr) throw std::runtime_error("NativeOpExecutioner::execPairwiseTransform: CUDA stream cannot be nullptr !"); dim3 launchDims(256, 1024, 8192); #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR( xType, yType, zType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), SD_COMMON_TYPES, SD_COMMON_TYPES) #else BUILD_SINGLE_SELECTOR_THRICE( xType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), SD_COMMON_TYPES) #endif // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execPairwiseTransform failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execPairwiseBoolTransform(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isB(zType)) throw sd::datatype_exception::build("NativeOpExecutioner::execPairwiseBoolTransform wrong Z operand data type", sd::DataType::BOOL, zType); if (yType != xType) throw sd::datatype_exception::build( "NativeOpExecutioner::execPairwiseBoolTransform both operands must have same data type", xType, yType); dim3 launchDims(256, 1024, 16384); BUILD_DOUBLE_SELECTOR( xType, zType, functions::pairwise_transforms::PairWiseBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), SD_COMMON_TYPES, SD_BOOL_TYPES) // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execPairwiseBoolTransform failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execPairwiseIntTransform(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isZ(zType)) throw sd::datatype_exception::build("NativeOpExecutioner::execPairwiseIntTransform wrong Z operand data type", sd::DataType::BOOL, zType); if (yType != xType || zType != xType) throw sd::datatype_exception::build( "NativeOpExecutioner::execPairwiseIntTransform both operands must have same data type", xType, yType); dim3 launchDims(256, 1024, 16384); BUILD_SINGLE_SELECTOR( xType, functions::pairwise_transforms::PairWiseIntTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), SD_INTEGER_TYPES) // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execPairwiseIntTransform failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execSummaryStatsScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, bool biasCorrected) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); dim3 launchDims = dim3(256, SD_CUDA_BLOCK_SIZE, 1024); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); BUILD_DOUBLE_SELECTOR( xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execSummaryStatsScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execBroadcastBool(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires Z operand to have BOOL type"); if (yType != xType) throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires both X & Y operands to have same type"); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F3B opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 1024); BUILD_DOUBLE_SELECTOR( xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_BOOL_TYPES) // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execBroadcastBool(sd::LaunchContext* lc, const int opNum, const void* hX, const sd::LongType* hXShapeInfo, const void* dX, const sd::LongType* dXShapeInfo, const void* hY, const sd::LongType* hYShapeInfo, const void* dY, const sd::LongType* dYShapeInfo, void* hZ, const sd::LongType* hZShapeInfo, void* dZ, const sd::LongType* dZShapeInfo, void* extraParams) { if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims; launchDims.y = SD_MAX_NUM_THREADS / 4; // threadsPerBlock launchDims.x = (shape::length(hZShapeInfo) + launchDims.y - 1) / launchDims.y; // blocksPerGrid launchDims.z = 1024; // shared memory BUILD_DOUBLE_SELECTOR( xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res); } void NativeOpExecutioner::execInverseBroadcastBool( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires Z operand to have BOOL type"); if (yType != xType) throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires both X & Y operands to have same type"); dim3 launchDims(256, 256, 1024); BUILD_DOUBLE_SELECTOR( xType, zType, functions::broadcast::BroadcastBool, ::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_BOOL_TYPES) // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execInverseBroadcastBool failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execBroadcastInt( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isZ(zType)) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires Z operand to have INT type"); if (yType != xType || zType != xType) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires both X & Y operands to have same type"); dim3 launchDims(256, 256, 1024); BUILD_SINGLE_SELECTOR( xType, functions::broadcast::BroadcastInt, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_INTEGER_TYPES) // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execBroadcastInt(sd::LaunchContext* lc, const int opNum, const void* hX, const sd::LongType* hXShapeInfo, const void* dX, const sd::LongType* dXShapeInfo, const void* hY, const sd::LongType* hYShapeInfo, const void* dY, const sd::LongType* dYShapeInfo, void* hZ, const sd::LongType* hZShapeInfo, void* dZ, const sd::LongType* dZShapeInfo) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isZ(zType)) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires Z operand to have INT type"); if (yType != xType || zType != xType) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires both X & Y operands to have same type"); dim3 launchDims; launchDims.y = SD_MAX_NUM_THREADS / 4; // threadsPerBlock launchDims.x = (shape::length(hZShapeInfo) + launchDims.y - 1) / launchDims.y; // blocksPerGrid launchDims.z = 1024; // shared memory BUILD_SINGLE_SELECTOR(xType, functions::broadcast::BroadcastInt, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo), SD_INTEGER_TYPES) // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res); } void NativeOpExecutioner::execInverseBroadcastInt( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isZ(zType)) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires Z operand to have INT type"); if (yType != xType || zType != xType) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires both X & Y operands to have same type"); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F3BI opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 1024); BUILD_SINGLE_SELECTOR( xType, functions::broadcast::BroadcastInt, ::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_INTEGER_TYPES) // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execInverseBroadcastInt failed", res); } //////////////////////////////////////////////////////////////////////// /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOpExecutioner::execBroadcast(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; dim3 launchDims(256, 256, 1024); #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR( xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_COMMON_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE( xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES); #endif // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcast failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execBroadcast(sd::LaunchContext* lc, const int opNum, const void* hX, const sd::LongType* hXShapeInfo, const void* dX, const sd::LongType* dXShapeInfo, const void* hY, const sd::LongType* hYShapeInfo, const void* dY, const sd::LongType* dYShapeInfo, void* hZ, const sd::LongType* hZShapeInfo, void* dZ, const sd::LongType* dZShapeInfo) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; dim3 launchDims; launchDims.y = SD_MAX_NUM_THREADS / 4; // threadsPerBlock launchDims.x = (shape::length(hZShapeInfo) + launchDims.y - 1) / launchDims.y; // blocksPerGrid launchDims.z = 1024; // shared memory #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo), SD_COMMON_TYPES, SD_COMMON_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE( xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo), SD_COMMON_TYPES); #endif // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcast failed", res); } void NativeOpExecutioner::execInverseBroadcast( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; dim3 launchDims(256, 256, 1024); #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR( xType, yType, zType, functions::broadcast::Broadcast, ::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_COMMON_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE( xType, functions::broadcast::Broadcast, ::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES); #endif // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execInverseBroadcast failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceSame(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("SF7 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != xType) throw datatype_exception::build( "NativeOpExecutioner::execReduceSame requires both X & Z operands to have same type", xType, zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension), SD_COMMON_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceSame failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceLong(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::INT64) throw datatype_exception::build("NativeOpExecutioner::execReduceLong wrong Z data type", sd::DataType::INT64, zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension), SD_COMMON_TYPES, SD_LONG_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceLong failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceBool(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::BOOL) throw std::runtime_error("NativeOpExecutioner::execReduceBool requires Z operand to have BOOL type"); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceBool failed", res); } //////////////////////////////////////////////////////////////////////// /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ void NativeOpExecutioner::execReduceFloat(sd::LaunchContext* lc, int opNum, const void* hX, const sd::LongType* hXShapeInfo, const void* dX, const sd::LongType* dXShapeInfo, void* extraParams, void* hZ, const sd::LongType* hZShapeInfo, void* dZ, const sd::LongType* dZShapeInfo, int* dimension, int dimensionLength) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F8 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceFloat failed", res); } //////////////////////////////////////////////////////////////////////// /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOpExecutioner::execIndexReduce(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto allocationPointer = lc->getAllocationPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F2 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); auto tadLength = shape::length(hXShapeInfo) / numBlocks; dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, tadLength < SD_CUDA_BLOCK_SIZE ? tadLength : SD_CUDA_BLOCK_SIZE, 1024); if (zType != sd::DataType::INT64 && zType != sd::DataType::INT32) throw datatype_exception::build("NativeOpExecutioner::execIndexReduce requires Z operand to have INT32/INT64 type", zType); auto dz = reinterpret_cast<sd::LongType*>(dZ); BUILD_DOUBLE_SELECTOR( xType, zType, functions::indexreduce::IndexReduce, ::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), SD_COMMON_TYPES, SD_INDEXING_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execIndexReduce failed", res); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execIndexReduceScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { if (sd::Environment::getInstance().isDebug()) printf("F1 opNum:[%i]\n", opNum); auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto allocationPointer = lc->getAllocationPointer(); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); if (sd::Environment::getInstance().isDebugAndVerbose() && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); // FIXME: we want Z to be one of integer types // if (!DataTypeUtils::isZ(zType)) // throw sd::datatype_exception("NativeOpExecutioner::execIndexReduceScalar requires Z operand to have one of // integer types") if (zType != sd::DataType::INT64 && zType != sd::DataType::INT32) throw sd::datatype_exception::build( "NativeOpExecutioner::execIndexReduceScalar requires Z operand to have INT32/INT64 data type", zType); auto dz = reinterpret_cast<sd::LongType*>(dZ); BUILD_DOUBLE_SELECTOR( xType, zType, functions::indexreduce::IndexReduce, ::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr), SD_COMMON_TYPES, SD_INDEXING_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execIndexReduceScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceFloatScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceFloatScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceBoolScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::BOOL) throw std::runtime_error("NativeOpExecutioner::execReduceBoolScalar requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = SD_CUDA_BLOCK_SIZE; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceBoolScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceSameScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != xType) throw datatype_exception::build( "NativeOpExecutioner::execReduceSameScalar requires both X & Z operands to have same type", xType, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = SD_CUDA_BLOCK_SIZE; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr), SD_COMMON_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceSameScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceLongScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::INT64) throw datatype_exception::build("NativeOpExecutioner::execReduceLongScalar wrong Z data type", sd::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = SD_CUDA_BLOCK_SIZE; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr), SD_COMMON_TYPES, SD_LONG_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceLongScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execTransformSame(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) { auto stream = lc->getCudaStream(); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo)) { return; } if (xType != zType) { throw std::runtime_error("NativeOpExecutioner::execTransformSame requires X & Z to have same type"); } dim3 launchDims(512, 512, 16384); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), SD_COMMON_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execTransformSame failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execTransformBool(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) { auto stream = lc->getCudaStream(); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo)) { return; } if (!DataTypeUtils::isB(zType)) { throw std::runtime_error("NativeOpExecutioner::execTransformBool requires Z to have same boolean type"); } dim3 launchDims(512, 512, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execTransformBool failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execTransformAny(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, bool allowParallelism) { auto stream = lc->getCudaStream(); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo)) return; if (opNum == sd::transform::Assign && shape::order(hXShapeInfo) == shape::order(hZShapeInfo) && shape::order(hXShapeInfo) == 'c' && xType == zType && shape::elementWiseStride(hXShapeInfo) == 1 && shape::elementWiseStride(hZShapeInfo) == 1) { hipMemcpyAsync(dZ, dX, shape::length(hXShapeInfo) * sd::DataTypeUtils::sizeOfElement(xType), hipMemcpyDeviceToDevice, *stream); } else { dim3 launchDims(512, 512, 2048); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), SD_COMMON_TYPES, SD_COMMON_TYPES); } // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execTransformAny failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execTransformStrict(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) { auto stream = lc->getCudaStream(); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo)) { return; } if (xType != zType || !DataTypeUtils::isR(xType)) { throw datatype_exception::build( "NativeOpExecutioner::execTransformStrict requires X & Z to have same floating point type", xType, zType); } dim3 launchDims(512, 512, 16384); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execTransformStrict failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execTransformFloat(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo)) return; if (!DataTypeUtils::isR(zType)) throw datatype_exception::build("NativeOpExecutioner::execTransformFloat requires Z to have floating point type", zType); dim3 launchDims(512, 512, 2048); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execTransformFloat failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execSummaryStats(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, bool biasCorrected) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); dim3 launchDims = dim3(256, SD_CUDA_BLOCK_SIZE, 1024); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execSummaryStats requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR( xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execSummaryStats A failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execSummaryStats(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, bool biasCorrected) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); dim3 launchDims = dim3(256, SD_CUDA_BLOCK_SIZE, 1024); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execSummaryStats requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected, reductionPointer), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execSummaryStats B failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduce3(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto allocationPointer = lc->getAllocationPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto blockWidth = SD_CUDA_BLOCK_SIZE; auto numBlocks = CudaLaunchHelper::getReductionBlocks(shape::length(hXShapeInfo), blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024); if (xType != yType) throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduce3 failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduce3(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* yTadOnlyShapeInfo, sd::LongType const* yTadOffsets) { if (shape::isScalar(hZShapeInfo)) { NativeOpExecutioner::execReduce3(lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); return; } auto stream = lc->getCudaStream(); auto allocationPointer = lc->getAllocationPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (xType != yType) throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execReduce3 requires Z operand to have floating point data type", zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_DOUBLE_SELECTOR( xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduce3 B failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduce3Scalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto allocationPointer = lc->getAllocationPointer(); auto reductionPointer = lc->getReductionPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto xLength = shape::length(hXShapeInfo); auto blockWidth = SD_CUDA_BLOCK_SIZE; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024); if (xType != yType) throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3Scalar requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execReduce3Scalar requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduce3Scalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalarBool(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalar, sd::LongType const* hScalarShapeInfo, void const* dScalar, sd::LongType const* dScalarShapeInfo, void* extraParams, bool allowParallelism) { auto stream = lc->getCudaStream(); dim3 launchDims = dim3(256, 512, 8192); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; if (xType != yType) throw std::runtime_error("NativeOpExecutioner::execScalarBool requires X & Y to have same type"); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOpExecutioner::execScalarBool requires Z operand to have BOOL type"); BUILD_DOUBLE_SELECTOR( xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalarBool failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalarBool( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalars, sd::LongType const* hScalarShapeInfo, void const* dScalars, sd::LongType const* dScalarShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); dim3 launchDims(256, 512, 8192); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; if (xType != yType) throw std::runtime_error("NativeOpExecutioner::execScalarBool requires X & Y to have same type"); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOpExecutioner::execScalarBool requires Z operand to have BOOL type"); BUILD_DOUBLE_SELECTOR( xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalarBool B failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalarInt(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalar, sd::LongType const* hScalarShapeInfo, void const* dScalar, sd::LongType const* dScalarShapeInfo, void* extraParams, bool allowParallelism) { auto stream = lc->getCudaStream(); dim3 launchDims = dim3(256, 512, 8192); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; if (xType != yType || zType != xType) throw std::runtime_error("NativeOpExecutioner::execScalarInt requires X & Y to have same type"); if (!DataTypeUtils::isZ(zType)) throw std::runtime_error("NativeOpExecutioner::execScalarInt requires Z operand to have INT type"); BUILD_SINGLE_SELECTOR( xType, functions::scalar::ScalarIntTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), SD_INTEGER_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalarInt failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalarInt( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalars, sd::LongType const* hScalarShapeInfo, void const* dScalars, sd::LongType const* dScalarShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); dim3 launchDims(256, 512, 8192); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; if (xType != yType || zType != xType) throw std::runtime_error("NativeOpExecutioner::execScalarInt requires X & Y to have same type"); if (!DataTypeUtils::isZ(zType)) throw std::runtime_error("NativeOpExecutioner::execScalarInt requires Z operand to have INT type"); BUILD_SINGLE_SELECTOR( xType, functions::scalar::ScalarIntTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SD_INTEGER_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalarInt B failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalar, sd::LongType const* hScalarShapeInfo, void const* dScalar, sd::LongType const* dScalarShapeInfo, void* extraParams, bool allowParallelism) { auto stream = lc->getCudaStream(); dim3 launchDims(256, 512, 8192); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), SD_COMMON_TYPES, SD_COMMON_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), SD_COMMON_TYPES); #endif // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalars, sd::LongType const* hScalarShapeInfo, void const* dScalars, sd::LongType const* dScalarShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; dim3 launchDims(256, 256, 16384); #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR( xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_COMMON_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE( xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES); #endif // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalar B failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execRandom(sd::LaunchContext* lc, int opNum, sd::Pointer stateHost, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraArguments) { auto stream = lc->getCudaStream(); auto sizeOf = sizeof(sd::graph::RandomGenerator); sd::Pointer stateDevice; hipError_t res = hipMalloc(reinterpret_cast<void**>(&stateDevice), sizeOf); checkCudaErrors(hipStreamSynchronize(*stream)); checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto rng = reinterpret_cast<sd::graph::RandomGenerator*>(stateHost); // functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, dZ, // dZShapeInfo, extraArguments), BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction, ::executeCudaSingle(launchDims, stream, opNum, stateDevice, dZ, dZShapeInfo, extraArguments), SD_FLOAT_TYPES); res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execRandom X failed", res); hipFree(stateDevice); rng->rewindH(shape::length(hZShapeInfo)); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execRandom(sd::LaunchContext* lc, int opNum, sd::Pointer stateHost, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraArguments) { auto stream = lc->getCudaStream(); auto sizeOf = sizeof(sd::graph::RandomGenerator); sd::Pointer stateDevice; hipError_t res = hipMalloc(reinterpret_cast<void**>(&stateDevice), sizeOf); checkCudaErrors(hipStreamSynchronize(*stream)); checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream)); auto rng = reinterpret_cast<sd::graph::RandomGenerator*>(stateHost); dim3 launchDims = dim3(512, 512, 32768); auto xType = sd::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, dX, // dXShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR( xType, functions::random::RandomFunction, ::executeCudaDouble(launchDims, stream, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments), SD_FLOAT_TYPES); res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execRandom XY failed", res); hipFree(stateDevice); rng->rewindH(shape::length(hZShapeInfo)); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execRandom(sd::LaunchContext* lc, int opNum, sd::Pointer stateHost, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraArguments) { auto stream = lc->getCudaStream(); auto sizeOf = sizeof(sd::graph::RandomGenerator); sd::Pointer stateDevice; hipError_t res = hipMalloc(reinterpret_cast<void**>(&stateDevice), sizeOf); checkCudaErrors(hipStreamSynchronize(*stream)); checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream)); auto rng = reinterpret_cast<sd::graph::RandomGenerator*>(stateHost); dim3 launchDims = dim3(512, 512, 32768); auto xType = sd::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, dX, // dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaTriple(launchDims, stream, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments), SD_FLOAT_TYPES); res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execRandom XYZ failed", res); hipFree(stateDevice); rng->rewindH(shape::length(hZShapeInfo)); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduce3All(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParamsVals, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* xTadShapeInfo, sd::LongType const* xOffsets, sd::LongType const* yTadShapeInfo, sd::LongType const* yOffsets) { auto stream = lc->getCudaStream(); auto allocationPointer = lc->getAllocationPointer(); auto reductionPointer = lc->getReductionPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("D119 opNum:[%i]\n", opNum); dim3 launchDims(shape::length(hZShapeInfo), SD_CUDA_BLOCK_SIZE / 2, 1024); if (sd::Environment::getInstance().isVerbose() && launchDims.x == 1) printf("AD119 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (yType != xType) throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3All both operands must have same data type", xType, yType); BUILD_DOUBLE_SELECTOR( xType, zType, functions::reduce3::Reduce3, ::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduce3All failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduce3TAD(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* yTadShapeInfo, sd::LongType const* yTadOffsets) { if (shape::isScalar(hZShapeInfo)) { NativeOpExecutioner::execReduce3(lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); return; } auto stream = lc->getCudaStream(); auto allocationPointer = lc->getAllocationPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (xType != yType) throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3TAD requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execReduce3TAD requires Z operand to have floating point data type", zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_DOUBLE_SELECTOR( xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadShapeInfo, tadOffsets, yTadShapeInfo, yTadOffsets), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = hipStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduce3TAD failed", res); }
bf4ac96a24c1bb0386ef08a93fdc02dbaf93b7e6.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include <array/ConstantDataBuffer.h> #include <array/DataTypeUtils.h> #include <array/ShapeDescriptor.h> #include <cuda.h> #include <exceptions/cuda_exception.h> #include <exceptions/datatype_exception.h> #include <helpers/ConstantShapeHelper.h> #include <helpers/CudaLaunchHelper.h> #include <helpers/DebugHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeBuilders.h> #include <legacy/NativeOpExecutioner.h> #include <loops/broadcasting.h> #include <loops/broadcasting_bool.h> #include <loops/broadcasting_int.h> #include <loops/indexreduce.h> #include <loops/pairwise_bool.h> #include <loops/pairwise_int.h> #include <loops/pairwise_transform.h> #include <loops/random.h> #include <loops/reduce3.h> #include <loops/reduce_bool.h> #include <loops/reduce_float.h> #include <loops/reduce_long.h> #include <loops/reduce_same.h> #include <loops/scalar.h> #include <loops/scalar_bool.h> #include <loops/scalar_int.h> #include <loops/special_kernels.h> #include <loops/summarystatsreduce.h> #include <loops/transform_any.h> #include <loops/transform_bool.h> #include <loops/transform_float.h> #include <loops/transform_same.h> #include <loops/transform_strict.h> #include <system/op_boilerplate.h> using namespace sd; /** * This is utility kernel, that updates given special buffer with proper values in device memory */ extern "C" SD_KERNEL void prepareShapeBuffer(int* dimension, int* maxDimension, sd::LongType* specialPointer, int rows, sd::DataType dataType) { sd::LongType tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid > 0) return; dimension[0] = 0; maxDimension[0] = 1; specialPointer[0] = 2; specialPointer[1] = rows; specialPointer[2] = 1; specialPointer[3] = 1; specialPointer[4] = 1; specialPointer[5] = 0; specialPointer[6] = 1; specialPointer[7] = 99; ArrayOptions::setDataType(specialPointer, dataType); // printf("special[0]: [%lld]\n", (long long) specialPointer[0]); // shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execPairwiseTransform(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (xType != zType && yType != zType) throw std::runtime_error( "NativeOpExecutioner::execPairwiseTransform requires Z operand to have either X or Y type"); if (lc == nullptr) throw std::runtime_error("NativeOpExecutioner::execPairwiseTransform: launch context cannot be nullptr !"); if (stream == nullptr) throw std::runtime_error("NativeOpExecutioner::execPairwiseTransform: CUDA stream cannot be nullptr !"); dim3 launchDims(256, 1024, 8192); #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR( xType, yType, zType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), SD_COMMON_TYPES, SD_COMMON_TYPES) #else BUILD_SINGLE_SELECTOR_THRICE( xType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), SD_COMMON_TYPES) #endif // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execPairwiseTransform failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execPairwiseBoolTransform(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isB(zType)) throw sd::datatype_exception::build("NativeOpExecutioner::execPairwiseBoolTransform wrong Z operand data type", sd::DataType::BOOL, zType); if (yType != xType) throw sd::datatype_exception::build( "NativeOpExecutioner::execPairwiseBoolTransform both operands must have same data type", xType, yType); dim3 launchDims(256, 1024, 16384); BUILD_DOUBLE_SELECTOR( xType, zType, functions::pairwise_transforms::PairWiseBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), SD_COMMON_TYPES, SD_BOOL_TYPES) // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execPairwiseBoolTransform failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execPairwiseIntTransform(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isZ(zType)) throw sd::datatype_exception::build("NativeOpExecutioner::execPairwiseIntTransform wrong Z operand data type", sd::DataType::BOOL, zType); if (yType != xType || zType != xType) throw sd::datatype_exception::build( "NativeOpExecutioner::execPairwiseIntTransform both operands must have same data type", xType, yType); dim3 launchDims(256, 1024, 16384); BUILD_SINGLE_SELECTOR( xType, functions::pairwise_transforms::PairWiseIntTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), SD_INTEGER_TYPES) // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execPairwiseIntTransform failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execSummaryStatsScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, bool biasCorrected) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); dim3 launchDims = dim3(256, SD_CUDA_BLOCK_SIZE, 1024); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); BUILD_DOUBLE_SELECTOR( xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execSummaryStatsScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execBroadcastBool(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires Z operand to have BOOL type"); if (yType != xType) throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires both X & Y operands to have same type"); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F3B opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 1024); BUILD_DOUBLE_SELECTOR( xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_BOOL_TYPES) // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execBroadcastBool(sd::LaunchContext* lc, const int opNum, const void* hX, const sd::LongType* hXShapeInfo, const void* dX, const sd::LongType* dXShapeInfo, const void* hY, const sd::LongType* hYShapeInfo, const void* dY, const sd::LongType* dYShapeInfo, void* hZ, const sd::LongType* hZShapeInfo, void* dZ, const sd::LongType* dZShapeInfo, void* extraParams) { if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims; launchDims.y = SD_MAX_NUM_THREADS / 4; // threadsPerBlock launchDims.x = (shape::length(hZShapeInfo) + launchDims.y - 1) / launchDims.y; // blocksPerGrid launchDims.z = 1024; // shared memory BUILD_DOUBLE_SELECTOR( xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res); } void NativeOpExecutioner::execInverseBroadcastBool( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires Z operand to have BOOL type"); if (yType != xType) throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires both X & Y operands to have same type"); dim3 launchDims(256, 256, 1024); BUILD_DOUBLE_SELECTOR( xType, zType, functions::broadcast::BroadcastBool, ::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_BOOL_TYPES) // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execInverseBroadcastBool failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execBroadcastInt( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isZ(zType)) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires Z operand to have INT type"); if (yType != xType || zType != xType) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires both X & Y operands to have same type"); dim3 launchDims(256, 256, 1024); BUILD_SINGLE_SELECTOR( xType, functions::broadcast::BroadcastInt, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_INTEGER_TYPES) // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execBroadcastInt(sd::LaunchContext* lc, const int opNum, const void* hX, const sd::LongType* hXShapeInfo, const void* dX, const sd::LongType* dXShapeInfo, const void* hY, const sd::LongType* hYShapeInfo, const void* dY, const sd::LongType* dYShapeInfo, void* hZ, const sd::LongType* hZShapeInfo, void* dZ, const sd::LongType* dZShapeInfo) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isZ(zType)) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires Z operand to have INT type"); if (yType != xType || zType != xType) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires both X & Y operands to have same type"); dim3 launchDims; launchDims.y = SD_MAX_NUM_THREADS / 4; // threadsPerBlock launchDims.x = (shape::length(hZShapeInfo) + launchDims.y - 1) / launchDims.y; // blocksPerGrid launchDims.z = 1024; // shared memory BUILD_SINGLE_SELECTOR(xType, functions::broadcast::BroadcastInt, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo), SD_INTEGER_TYPES) // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res); } void NativeOpExecutioner::execInverseBroadcastInt( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; if (!DataTypeUtils::isZ(zType)) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires Z operand to have INT type"); if (yType != xType || zType != xType) throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires both X & Y operands to have same type"); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F3BI opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 1024); BUILD_SINGLE_SELECTOR( xType, functions::broadcast::BroadcastInt, ::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_INTEGER_TYPES) // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execInverseBroadcastInt failed", res); } //////////////////////////////////////////////////////////////////////// /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOpExecutioner::execBroadcast(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; dim3 launchDims(256, 256, 1024); #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR( xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_COMMON_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE( xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES); #endif // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcast failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execBroadcast(sd::LaunchContext* lc, const int opNum, const void* hX, const sd::LongType* hXShapeInfo, const void* dX, const sd::LongType* dXShapeInfo, const void* hY, const sd::LongType* hYShapeInfo, const void* dY, const sd::LongType* dYShapeInfo, void* hZ, const sd::LongType* hZShapeInfo, void* dZ, const sd::LongType* dZShapeInfo) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; dim3 launchDims; launchDims.y = SD_MAX_NUM_THREADS / 4; // threadsPerBlock launchDims.x = (shape::length(hZShapeInfo) + launchDims.y - 1) / launchDims.y; // blocksPerGrid launchDims.z = 1024; // shared memory #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo), SD_COMMON_TYPES, SD_COMMON_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE( xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo), SD_COMMON_TYPES); #endif // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execBroadcast failed", res); } void NativeOpExecutioner::execInverseBroadcast( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return; dim3 launchDims(256, 256, 1024); #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR( xType, yType, zType, functions::broadcast::Broadcast, ::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_COMMON_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE( xType, functions::broadcast::Broadcast, ::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES); #endif // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execInverseBroadcast failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceSame(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("SF7 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != xType) throw datatype_exception::build( "NativeOpExecutioner::execReduceSame requires both X & Z operands to have same type", xType, zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension), SD_COMMON_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceSame failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceLong(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::INT64) throw datatype_exception::build("NativeOpExecutioner::execReduceLong wrong Z data type", sd::DataType::INT64, zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension), SD_COMMON_TYPES, SD_LONG_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceLong failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceBool(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::BOOL) throw std::runtime_error("NativeOpExecutioner::execReduceBool requires Z operand to have BOOL type"); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceBool failed", res); } //////////////////////////////////////////////////////////////////////// /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ void NativeOpExecutioner::execReduceFloat(sd::LaunchContext* lc, int opNum, const void* hX, const sd::LongType* hXShapeInfo, const void* dX, const sd::LongType* dXShapeInfo, void* extraParams, void* hZ, const sd::LongType* hZShapeInfo, void* dZ, const sd::LongType* dZShapeInfo, int* dimension, int dimensionLength) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F8 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceFloat failed", res); } //////////////////////////////////////////////////////////////////////// /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOpExecutioner::execIndexReduce(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto allocationPointer = lc->getAllocationPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F2 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); auto tadLength = shape::length(hXShapeInfo) / numBlocks; dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, tadLength < SD_CUDA_BLOCK_SIZE ? tadLength : SD_CUDA_BLOCK_SIZE, 1024); if (zType != sd::DataType::INT64 && zType != sd::DataType::INT32) throw datatype_exception::build("NativeOpExecutioner::execIndexReduce requires Z operand to have INT32/INT64 type", zType); auto dz = reinterpret_cast<sd::LongType*>(dZ); BUILD_DOUBLE_SELECTOR( xType, zType, functions::indexreduce::IndexReduce, ::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), SD_COMMON_TYPES, SD_INDEXING_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execIndexReduce failed", res); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execIndexReduceScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { if (sd::Environment::getInstance().isDebug()) printf("F1 opNum:[%i]\n", opNum); auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto allocationPointer = lc->getAllocationPointer(); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); if (sd::Environment::getInstance().isDebugAndVerbose() && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); // FIXME: we want Z to be one of integer types // if (!DataTypeUtils::isZ(zType)) // throw sd::datatype_exception("NativeOpExecutioner::execIndexReduceScalar requires Z operand to have one of // integer types") if (zType != sd::DataType::INT64 && zType != sd::DataType::INT32) throw sd::datatype_exception::build( "NativeOpExecutioner::execIndexReduceScalar requires Z operand to have INT32/INT64 data type", zType); auto dz = reinterpret_cast<sd::LongType*>(dZ); BUILD_DOUBLE_SELECTOR( xType, zType, functions::indexreduce::IndexReduce, ::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr), SD_COMMON_TYPES, SD_INDEXING_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execIndexReduceScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceFloatScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceFloatScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceBoolScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::BOOL) throw std::runtime_error("NativeOpExecutioner::execReduceBoolScalar requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = SD_CUDA_BLOCK_SIZE; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceBoolScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceSameScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != xType) throw datatype_exception::build( "NativeOpExecutioner::execReduceSameScalar requires both X & Z operands to have same type", xType, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = SD_CUDA_BLOCK_SIZE; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr), SD_COMMON_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceSameScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduceLongScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::INT64) throw datatype_exception::build("NativeOpExecutioner::execReduceLongScalar wrong Z data type", sd::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = SD_CUDA_BLOCK_SIZE; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr), SD_COMMON_TYPES, SD_LONG_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduceLongScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execTransformSame(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) { auto stream = lc->getCudaStream(); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo)) { return; } if (xType != zType) { throw std::runtime_error("NativeOpExecutioner::execTransformSame requires X & Z to have same type"); } dim3 launchDims(512, 512, 16384); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), SD_COMMON_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execTransformSame failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execTransformBool(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) { auto stream = lc->getCudaStream(); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo)) { return; } if (!DataTypeUtils::isB(zType)) { throw std::runtime_error("NativeOpExecutioner::execTransformBool requires Z to have same boolean type"); } dim3 launchDims(512, 512, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execTransformBool failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execTransformAny(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, bool allowParallelism) { auto stream = lc->getCudaStream(); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo)) return; if (opNum == sd::transform::Assign && shape::order(hXShapeInfo) == shape::order(hZShapeInfo) && shape::order(hXShapeInfo) == 'c' && xType == zType && shape::elementWiseStride(hXShapeInfo) == 1 && shape::elementWiseStride(hZShapeInfo) == 1) { cudaMemcpyAsync(dZ, dX, shape::length(hXShapeInfo) * sd::DataTypeUtils::sizeOfElement(xType), cudaMemcpyDeviceToDevice, *stream); } else { dim3 launchDims(512, 512, 2048); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), SD_COMMON_TYPES, SD_COMMON_TYPES); } // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execTransformAny failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execTransformStrict(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) { auto stream = lc->getCudaStream(); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo)) { return; } if (xType != zType || !DataTypeUtils::isR(xType)) { throw datatype_exception::build( "NativeOpExecutioner::execTransformStrict requires X & Z to have same floating point type", xType, zType); } dim3 launchDims(512, 512, 16384); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execTransformStrict failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execTransformFloat(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraParams, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo)) return; if (!DataTypeUtils::isR(zType)) throw datatype_exception::build("NativeOpExecutioner::execTransformFloat requires Z to have floating point type", zType); dim3 launchDims(512, 512, 2048); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execTransformFloat failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execSummaryStats(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, bool biasCorrected) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); dim3 launchDims = dim3(256, SD_CUDA_BLOCK_SIZE, 1024); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execSummaryStats requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR( xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execSummaryStats A failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execSummaryStats(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, bool biasCorrected) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); dim3 launchDims = dim3(256, SD_CUDA_BLOCK_SIZE, 1024); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execSummaryStats requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected, reductionPointer), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execSummaryStats B failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduce3(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto reductionPointer = lc->getReductionPointer(); auto allocationPointer = lc->getAllocationPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto blockWidth = SD_CUDA_BLOCK_SIZE; auto numBlocks = CudaLaunchHelper::getReductionBlocks(shape::length(hXShapeInfo), blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024); if (xType != yType) throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduce3 failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduce3(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* yTadOnlyShapeInfo, sd::LongType const* yTadOffsets) { if (shape::isScalar(hZShapeInfo)) { NativeOpExecutioner::execReduce3(lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); return; } auto stream = lc->getCudaStream(); auto allocationPointer = lc->getAllocationPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (xType != yType) throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execReduce3 requires Z operand to have floating point data type", zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_DOUBLE_SELECTOR( xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduce3 B failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduce3Scalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo) { auto stream = lc->getCudaStream(); auto allocationPointer = lc->getAllocationPointer(); auto reductionPointer = lc->getReductionPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto xLength = shape::length(hXShapeInfo); auto blockWidth = SD_CUDA_BLOCK_SIZE; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024); if (xType != yType) throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3Scalar requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execReduce3Scalar requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduce3Scalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalarBool(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalar, sd::LongType const* hScalarShapeInfo, void const* dScalar, sd::LongType const* dScalarShapeInfo, void* extraParams, bool allowParallelism) { auto stream = lc->getCudaStream(); dim3 launchDims = dim3(256, 512, 8192); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; if (xType != yType) throw std::runtime_error("NativeOpExecutioner::execScalarBool requires X & Y to have same type"); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOpExecutioner::execScalarBool requires Z operand to have BOOL type"); BUILD_DOUBLE_SELECTOR( xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalarBool failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalarBool( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalars, sd::LongType const* hScalarShapeInfo, void const* dScalars, sd::LongType const* dScalarShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); dim3 launchDims(256, 512, 8192); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; if (xType != yType) throw std::runtime_error("NativeOpExecutioner::execScalarBool requires X & Y to have same type"); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOpExecutioner::execScalarBool requires Z operand to have BOOL type"); BUILD_DOUBLE_SELECTOR( xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_BOOL_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalarBool B failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalarInt(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalar, sd::LongType const* hScalarShapeInfo, void const* dScalar, sd::LongType const* dScalarShapeInfo, void* extraParams, bool allowParallelism) { auto stream = lc->getCudaStream(); dim3 launchDims = dim3(256, 512, 8192); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; if (xType != yType || zType != xType) throw std::runtime_error("NativeOpExecutioner::execScalarInt requires X & Y to have same type"); if (!DataTypeUtils::isZ(zType)) throw std::runtime_error("NativeOpExecutioner::execScalarInt requires Z operand to have INT type"); BUILD_SINGLE_SELECTOR( xType, functions::scalar::ScalarIntTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), SD_INTEGER_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalarInt failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalarInt( sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalars, sd::LongType const* hScalarShapeInfo, void const* dScalars, sd::LongType const* dScalarShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); dim3 launchDims(256, 512, 8192); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; if (xType != yType || zType != xType) throw std::runtime_error("NativeOpExecutioner::execScalarInt requires X & Y to have same type"); if (!DataTypeUtils::isZ(zType)) throw std::runtime_error("NativeOpExecutioner::execScalarInt requires Z operand to have INT type"); BUILD_SINGLE_SELECTOR( xType, functions::scalar::ScalarIntTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SD_INTEGER_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalarInt B failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalar, sd::LongType const* hScalarShapeInfo, void const* dScalar, sd::LongType const* dScalarShapeInfo, void* extraParams, bool allowParallelism) { auto stream = lc->getCudaStream(); dim3 launchDims(256, 512, 8192); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), SD_COMMON_TYPES, SD_COMMON_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), SD_COMMON_TYPES); #endif // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalar failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void const* hScalars, sd::LongType const* hScalarShapeInfo, void const* dScalars, sd::LongType const* dScalarShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto stream = lc->getCudaStream(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return; dim3 launchDims(256, 256, 16384); #ifdef SD_EXPERIMENTAL_ENABLED BUILD_PAIRWISE_SELECTOR( xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES, SD_COMMON_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE( xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SD_COMMON_TYPES); #endif // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execScalar B failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execRandom(sd::LaunchContext* lc, int opNum, sd::Pointer stateHost, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraArguments) { auto stream = lc->getCudaStream(); auto sizeOf = sizeof(sd::graph::RandomGenerator); sd::Pointer stateDevice; cudaError_t res = cudaMalloc(reinterpret_cast<void**>(&stateDevice), sizeOf); checkCudaErrors(cudaStreamSynchronize(*stream)); checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); auto rng = reinterpret_cast<sd::graph::RandomGenerator*>(stateHost); // functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, dZ, // dZShapeInfo, extraArguments), BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction, ::executeCudaSingle(launchDims, stream, opNum, stateDevice, dZ, dZShapeInfo, extraArguments), SD_FLOAT_TYPES); res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execRandom X failed", res); cudaFree(stateDevice); rng->rewindH(shape::length(hZShapeInfo)); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execRandom(sd::LaunchContext* lc, int opNum, sd::Pointer stateHost, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraArguments) { auto stream = lc->getCudaStream(); auto sizeOf = sizeof(sd::graph::RandomGenerator); sd::Pointer stateDevice; cudaError_t res = cudaMalloc(reinterpret_cast<void**>(&stateDevice), sizeOf); checkCudaErrors(cudaStreamSynchronize(*stream)); checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream)); auto rng = reinterpret_cast<sd::graph::RandomGenerator*>(stateHost); dim3 launchDims = dim3(512, 512, 32768); auto xType = sd::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, dX, // dXShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR( xType, functions::random::RandomFunction, ::executeCudaDouble(launchDims, stream, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments), SD_FLOAT_TYPES); res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execRandom XY failed", res); cudaFree(stateDevice); rng->rewindH(shape::length(hZShapeInfo)); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execRandom(sd::LaunchContext* lc, int opNum, sd::Pointer stateHost, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, void* extraArguments) { auto stream = lc->getCudaStream(); auto sizeOf = sizeof(sd::graph::RandomGenerator); sd::Pointer stateDevice; cudaError_t res = cudaMalloc(reinterpret_cast<void**>(&stateDevice), sizeOf); checkCudaErrors(cudaStreamSynchronize(*stream)); checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream)); auto rng = reinterpret_cast<sd::graph::RandomGenerator*>(stateHost); dim3 launchDims = dim3(512, 512, 32768); auto xType = sd::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, dX, // dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaTriple(launchDims, stream, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments), SD_FLOAT_TYPES); res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execRandom XYZ failed", res); cudaFree(stateDevice); rng->rewindH(shape::length(hZShapeInfo)); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduce3All(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParamsVals, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* xTadShapeInfo, sd::LongType const* xOffsets, sd::LongType const* yTadShapeInfo, sd::LongType const* yOffsets) { auto stream = lc->getCudaStream(); auto allocationPointer = lc->getAllocationPointer(); auto reductionPointer = lc->getReductionPointer(); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("D119 opNum:[%i]\n", opNum); dim3 launchDims(shape::length(hZShapeInfo), SD_CUDA_BLOCK_SIZE / 2, 1024); if (sd::Environment::getInstance().isVerbose() && launchDims.x == 1) printf("AD119 opNum:[%i]\n", opNum); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (yType != xType) throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3All both operands must have same data type", xType, yType); BUILD_DOUBLE_SELECTOR( xType, zType, functions::reduce3::Reduce3, ::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduce3All failed", res); } //////////////////////////////////////////////////////////////////////// void NativeOpExecutioner::execReduce3TAD(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void const* hY, sd::LongType const* hYShapeInfo, void const* dY, sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* yTadShapeInfo, sd::LongType const* yTadOffsets) { if (shape::isScalar(hZShapeInfo)) { NativeOpExecutioner::execReduce3(lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); return; } auto stream = lc->getCudaStream(); auto allocationPointer = lc->getAllocationPointer(); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (xType != yType) throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3TAD requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw sd::datatype_exception::build( "NativeOpExecutioner::execReduce3TAD requires Z operand to have floating point data type", zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024); BUILD_DOUBLE_SELECTOR( xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadShapeInfo, tadOffsets, yTadShapeInfo, yTadOffsets), SD_COMMON_TYPES, SD_FLOAT_TYPES); // TODO: remove after the release auto res = cudaStreamSynchronize(*stream); if (res != 0) throw cuda_exception::build("execReduce3TAD failed", res); }
9c672262e9104f4469bf17c9e726cea1e58bde0e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * * * * This sample illustrates the usage of CUDA streams for overlapping * kernel execution with device/host memcopies. The kernel is used to * initialize an array to a specific value, after which the array is * copied to the host (CPU) memory. To increase performance, multiple * kernel/memcopy pairs are launched asynchronously, each pair in its * own stream. Devices with Compute Capability 1.1 can overlap a kernel * and a memcopy as long as they are issued in different streams. Kernels * are serialized. Thus, if n pairs are launched, streamed approach * can reduce the memcopy cost to the (1/n)th of a single copy of the entire * data set. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * * */ const char *sSDKsample = "simpleStreams"; const char *sEventSyncMethod[] = { "hipEventDefault", "hipEventBlockingSync", "hipEventDisableTiming", NULL }; const char *sDeviceSyncMethod[] = { "hipDeviceScheduleAuto", "hipDeviceScheduleSpin", "hipDeviceScheduleYield", "INVALID", "hipDeviceScheduleBlockingSync", NULL }; // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #ifndef WIN32 #include <sys/mman.h> // for mmap() / munmap() #endif // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) __global__ void init_array(int *g_data, int *factor, int num_iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i=0; i<num_iterations; i++) { g_data[idx] += *factor; // non-coalesced on purpose, to burn time } } bool correct_data(int *a, const int n, const int c) { for (int i = 0; i < n; i++) { if (a[i] != c) { printf("%d: %d %d\n", i, a[i], c); return false; } } return true; } inline void AllocateHostMemory(bool bPinGenericMemory, int **pp_a, int **ppAligned_a, int nbytes) { #if CUDART_VERSION >= 4000 #if !defined(__arm__) && !defined(__aarch64__) if (bPinGenericMemory) { // allocate a generic page-aligned chunk of system memory #ifdef WIN32 printf("> VirtualAlloc() allocating %4.2f Mbytes of (generic page-aligned system memory)\n", (float)nbytes/1048576.0f); *pp_a = (int *) VirtualAlloc(NULL, (nbytes + MEMORY_ALIGNMENT), MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); #else printf("> mmap() allocating %4.2f Mbytes (generic page-aligned system memory)\n", (float)nbytes/1048576.0f); *pp_a = (int *) mmap(NULL, (nbytes + MEMORY_ALIGNMENT), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); #endif *ppAligned_a = (int *)ALIGN_UP(*pp_a, MEMORY_ALIGNMENT); printf("> hipHostRegister() registering %4.2f Mbytes of generic allocated system memory\n", (float)nbytes/1048576.0f); // pin allocate memory checkCudaErrors(hipHostRegister(*ppAligned_a, nbytes, hipHostRegisterMapped)); } else #endif #endif { printf("> hipHostMalloc() allocating %4.2f Mbytes of system memory\n", (float)nbytes/1048576.0f); // allocate host memory (pinned is required for achieve asynchronicity) checkCudaErrors(hipHostMalloc((void **)pp_a, nbytes)); *ppAligned_a = *pp_a; } } inline void FreeHostMemory(bool bPinGenericMemory, int **pp_a, int **ppAligned_a, int nbytes) { #if CUDART_VERSION >= 4000 #if !defined(__arm__) && !defined(__aarch64__) // CUDA 4.0 support pinning of generic host memory if (bPinGenericMemory) { // unpin and delete host memory checkCudaErrors(hipHostUnregister(*ppAligned_a)); #ifdef WIN32 VirtualFree(*pp_a, 0, MEM_RELEASE); #else munmap(*pp_a, nbytes); #endif } else #endif #endif { hipHostFree(*pp_a); } } static const char *sSyncMethod[] = { "0 (Automatic Blocking)", "1 (Spin Blocking)", "2 (Yield Blocking)", "3 (Undefined Blocking Method)", "4 (Blocking Sync Event) = low CPU utilization", NULL }; void printHelp() { printf("Usage: %s [options below]\n", sSDKsample); printf("\t--sync_method=n for CPU/GPU synchronization\n"); printf("\t n=%s\n", sSyncMethod[0]); printf("\t n=%s\n", sSyncMethod[1]); printf("\t n=%s\n", sSyncMethod[2]); printf("\t <Default> n=%s\n", sSyncMethod[4]); printf("\t--use_generic_memory (default) use generic page-aligned for system memory\n"); printf("\t--use_cuda_malloc_host (optional) use hipHostMalloc to allocate system memory\n"); } #if defined(__APPLE__) || defined(MACOSX) #define DEFAULT_PINNED_GENERIC_MEMORY false #else #define DEFAULT_PINNED_GENERIC_MEMORY true #endif int main(int argc, char **argv) { int cuda_device = 0; int nstreams = 4; // number of streams for CUDA calls int nreps = 10; // number of times each experiment is repeated int n = 16 * 1024 * 1024; // number of ints in the data set int nbytes = n * sizeof(int); // number of data bytes dim3 threads, blocks; // kernel launch configuration float elapsed_time, time_memcpy, time_kernel; // timing variables float scale_factor = 1.0f; // allocate generic memory and pin it laster instead of using hipHostMalloc() bool bPinGenericMemory = DEFAULT_PINNED_GENERIC_MEMORY; // we want this to be the default behavior int device_sync_method = hipDeviceScheduleBlockingSync; // by default we use BlockingSync int niterations; // number of iterations for the loop inside the kernel printf("[ %s ]\n\n", sSDKsample); if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printHelp(); return EXIT_SUCCESS; } if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0) { if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4) { printf("Device synchronization method set to = %s\n", sSyncMethod[device_sync_method]); printf("Setting reps to 100 to demonstrate steady state\n"); nreps = 100; } else { printf("Invalid command line option sync_method=\"%d\"\n", device_sync_method); return EXIT_FAILURE; } } else { printHelp(); return EXIT_SUCCESS; } if (checkCmdLineFlag(argc, (const char **)argv, "use_generic_memory")) { #if defined(__APPLE__) || defined(MACOSX) bPinGenericMemory = false; // Generic Pinning of System Paged memory not currently supported on Mac OSX #else bPinGenericMemory = true; #endif } if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host")) { bPinGenericMemory = false; } printf("\n> "); cuda_device = findCudaDevice(argc, (const char **)argv); // check the compute capability of the device int num_devices=0; checkCudaErrors(hipGetDeviceCount(&num_devices)); if (0==num_devices) { printf("your system does not have a CUDA capable device, waiving test...\n"); return EXIT_WAIVED; } // check if the command-line chosen device ID is within range, exit if not if (cuda_device >= num_devices) { printf("cuda_device=%d is invalid, must choose device ID between 0 and %d\n", cuda_device, num_devices-1); return EXIT_FAILURE; } checkCudaErrors(hipSetDevice(cuda_device)); // Checking for compute capabilities hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device)); niterations = 5; // Check if GPU can map host memory (Generic Method), if not then we override bPinGenericMemory to be false if (bPinGenericMemory) { printf("Device: <%s> canMapHostMemory: %s\n", deviceProp.name, deviceProp.canMapHostMemory ? "Yes" : "No"); if (deviceProp.canMapHostMemory == 0) { printf("Using hipHostMalloc, CUDA device does not support mapping of generic host memory\n"); bPinGenericMemory = false; } } // Anything that is less than 32 Cores will have scaled down workload scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); n = (int)rint((float)n / scale_factor); printf("> CUDA Capable: SM %d.%d hardware\n", deviceProp.major, deviceProp.minor); printf("> %d Multiprocessor(s) x %d (Cores/Multiprocessor) = %d (Cores)\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf("> scale_factor = %1.4f\n", 1.0f/scale_factor); printf("> array_size = %d\n\n", n); // enable use of blocking sync, to reduce CPU usage printf("> Using CPU/GPU Device Synchronization method (%s)\n", sDeviceSyncMethod[device_sync_method]); checkCudaErrors(hipSetDeviceFlags(device_sync_method | (bPinGenericMemory ? hipDeviceMapHost : 0))); // allocate host memory int c = 5; // value to which the array will be initialized int *h_a = 0; // pointer to the array data in host memory int *hAligned_a = 0; // pointer to the array data in host memory (aligned to MEMORY_ALIGNMENT) // Allocate Host memory (could be using hipHostMalloc or VirtualAlloc/mmap if using the new CUDA 4.0 features AllocateHostMemory(bPinGenericMemory, &h_a, &hAligned_a, nbytes); // allocate device memory int *d_a = 0, *d_c = 0; // pointers to data and init value in the device memory checkCudaErrors(hipMalloc((void **)&d_a, nbytes)); checkCudaErrors(hipMemset(d_a, 0x0, nbytes)); checkCudaErrors(hipMalloc((void **)&d_c, sizeof(int))); checkCudaErrors(hipMemcpy(d_c, &c, sizeof(int), hipMemcpyHostToDevice)); printf("\nStarting Test\n"); // allocate and initialize an array of stream handles hipStream_t *streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); for (int i = 0; i < nstreams; i++) { checkCudaErrors(hipStreamCreate(&(streams[i]))); } // create CUDA event handles // use blocking sync hipEvent_t start_event, stop_event; int eventflags = ((device_sync_method == hipDeviceScheduleBlockingSync) ? hipEventBlockingSync: hipEventDefault); checkCudaErrors(hipEventCreateWithFlags(&start_event, eventflags)); checkCudaErrors(hipEventCreateWithFlags(&stop_event, eventflags)); // time memcopy from device checkCudaErrors(hipEventRecord(start_event, 0)); // record in stream-0, to ensure that all previous CUDA calls have completed checkCudaErrors(hipMemcpyAsync(hAligned_a, d_a, nbytes, hipMemcpyDeviceToHost, streams[0])); checkCudaErrors(hipEventRecord(stop_event, 0)); checkCudaErrors(hipEventSynchronize(stop_event)); // block until the event is actually recorded checkCudaErrors(hipEventElapsedTime(&time_memcpy, start_event, stop_event)); printf("memcopy:\t%.2f\n", time_memcpy); // time kernel threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); checkCudaErrors(hipEventRecord(start_event, 0)); hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[0], d_a, d_c, niterations); checkCudaErrors(hipEventRecord(stop_event, 0)); checkCudaErrors(hipEventSynchronize(stop_event)); checkCudaErrors(hipEventElapsedTime(&time_kernel, start_event, stop_event)); printf("kernel:\t\t%.2f\n", time_kernel); ////////////////////////////////////////////////////////////////////// // time non-streamed execution for reference threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); checkCudaErrors(hipEventRecord(start_event, 0)); for (int k = 0; k < nreps; k++) { hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, 0, d_a, d_c, niterations); checkCudaErrors(hipMemcpy(hAligned_a, d_a, nbytes, hipMemcpyDeviceToHost)); } checkCudaErrors(hipEventRecord(stop_event, 0)); checkCudaErrors(hipEventSynchronize(stop_event)); checkCudaErrors(hipEventElapsedTime(&elapsed_time, start_event, stop_event)); printf("non-streamed:\t%.2f\n", elapsed_time / nreps); ////////////////////////////////////////////////////////////////////// // time execution with nstreams streams threads=dim3(512,1); blocks=dim3(n/(nstreams*threads.x),1); memset(hAligned_a, 255, nbytes); // set host memory bits to all 1s, for testing correctness checkCudaErrors(hipMemset(d_a, 0, nbytes)); // set device memory to all 0s, for testing correctness checkCudaErrors(hipEventRecord(start_event, 0)); for (int k = 0; k < nreps; k++) { // asynchronously launch nstreams kernels, each operating on its own portion of data for (int i = 0; i < nstreams; i++) { hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[i], d_a + i *n / nstreams, d_c, niterations); } // asynchronously launch nstreams memcopies. Note that memcopy in stream x will only // commence executing when all previous CUDA calls in stream x have completed for (int i = 0; i < nstreams; i++) { checkCudaErrors(hipMemcpyAsync(hAligned_a + i * n / nstreams, d_a + i * n / nstreams, nbytes / nstreams, hipMemcpyDeviceToHost, streams[i])); } } checkCudaErrors(hipEventRecord(stop_event, 0)); checkCudaErrors(hipEventSynchronize(stop_event)); checkCudaErrors(hipEventElapsedTime(&elapsed_time, start_event, stop_event)); printf("%d streams:\t%.2f\n", nstreams, elapsed_time / nreps); // check whether the output is correct printf("-------------------------------\n"); bool bResults = correct_data(hAligned_a, n, c*nreps*niterations); // release resources for (int i = 0; i < nstreams; i++) { checkCudaErrors(hipStreamDestroy(streams[i])); } checkCudaErrors(hipEventDestroy(start_event)); checkCudaErrors(hipEventDestroy(stop_event)); // Free hipHostMalloc or Generic Host allocated memory (from CUDA 4.0) FreeHostMemory(bPinGenericMemory, &h_a, &hAligned_a, nbytes); checkCudaErrors(hipFree(d_a)); checkCudaErrors(hipFree(d_c)); return bResults ? EXIT_SUCCESS : EXIT_FAILURE; }
9c672262e9104f4469bf17c9e726cea1e58bde0e.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * * * * This sample illustrates the usage of CUDA streams for overlapping * kernel execution with device/host memcopies. The kernel is used to * initialize an array to a specific value, after which the array is * copied to the host (CPU) memory. To increase performance, multiple * kernel/memcopy pairs are launched asynchronously, each pair in its * own stream. Devices with Compute Capability 1.1 can overlap a kernel * and a memcopy as long as they are issued in different streams. Kernels * are serialized. Thus, if n pairs are launched, streamed approach * can reduce the memcopy cost to the (1/n)th of a single copy of the entire * data set. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * * 多流处理 */ const char *sSDKsample = "simpleStreams"; const char *sEventSyncMethod[] = { "cudaEventDefault", "cudaEventBlockingSync", "cudaEventDisableTiming", NULL }; const char *sDeviceSyncMethod[] = { "cudaDeviceScheduleAuto", "cudaDeviceScheduleSpin", "cudaDeviceScheduleYield", "INVALID", "cudaDeviceScheduleBlockingSync", NULL }; // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #ifndef WIN32 #include <sys/mman.h> // for mmap() / munmap() #endif // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) __global__ void init_array(int *g_data, int *factor, int num_iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i=0; i<num_iterations; i++) { g_data[idx] += *factor; // non-coalesced on purpose, to burn time } } bool correct_data(int *a, const int n, const int c) { for (int i = 0; i < n; i++) { if (a[i] != c) { printf("%d: %d %d\n", i, a[i], c); return false; } } return true; } inline void AllocateHostMemory(bool bPinGenericMemory, int **pp_a, int **ppAligned_a, int nbytes) { #if CUDART_VERSION >= 4000 #if !defined(__arm__) && !defined(__aarch64__) if (bPinGenericMemory) { // allocate a generic page-aligned chunk of system memory #ifdef WIN32 printf("> VirtualAlloc() allocating %4.2f Mbytes of (generic page-aligned system memory)\n", (float)nbytes/1048576.0f); *pp_a = (int *) VirtualAlloc(NULL, (nbytes + MEMORY_ALIGNMENT), MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); #else printf("> mmap() allocating %4.2f Mbytes (generic page-aligned system memory)\n", (float)nbytes/1048576.0f); *pp_a = (int *) mmap(NULL, (nbytes + MEMORY_ALIGNMENT), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); #endif *ppAligned_a = (int *)ALIGN_UP(*pp_a, MEMORY_ALIGNMENT); printf("> cudaHostRegister() registering %4.2f Mbytes of generic allocated system memory\n", (float)nbytes/1048576.0f); // pin allocate memory checkCudaErrors(cudaHostRegister(*ppAligned_a, nbytes, cudaHostRegisterMapped)); } else #endif #endif { printf("> cudaMallocHost() allocating %4.2f Mbytes of system memory\n", (float)nbytes/1048576.0f); // allocate host memory (pinned is required for achieve asynchronicity) checkCudaErrors(cudaMallocHost((void **)pp_a, nbytes)); *ppAligned_a = *pp_a; } } inline void FreeHostMemory(bool bPinGenericMemory, int **pp_a, int **ppAligned_a, int nbytes) { #if CUDART_VERSION >= 4000 #if !defined(__arm__) && !defined(__aarch64__) // CUDA 4.0 support pinning of generic host memory if (bPinGenericMemory) { // unpin and delete host memory checkCudaErrors(cudaHostUnregister(*ppAligned_a)); #ifdef WIN32 VirtualFree(*pp_a, 0, MEM_RELEASE); #else munmap(*pp_a, nbytes); #endif } else #endif #endif { cudaFreeHost(*pp_a); } } static const char *sSyncMethod[] = { "0 (Automatic Blocking)", "1 (Spin Blocking)", "2 (Yield Blocking)", "3 (Undefined Blocking Method)", "4 (Blocking Sync Event) = low CPU utilization", NULL }; void printHelp() { printf("Usage: %s [options below]\n", sSDKsample); printf("\t--sync_method=n for CPU/GPU synchronization\n"); printf("\t n=%s\n", sSyncMethod[0]); printf("\t n=%s\n", sSyncMethod[1]); printf("\t n=%s\n", sSyncMethod[2]); printf("\t <Default> n=%s\n", sSyncMethod[4]); printf("\t--use_generic_memory (default) use generic page-aligned for system memory\n"); printf("\t--use_cuda_malloc_host (optional) use cudaMallocHost to allocate system memory\n"); } #if defined(__APPLE__) || defined(MACOSX) #define DEFAULT_PINNED_GENERIC_MEMORY false #else #define DEFAULT_PINNED_GENERIC_MEMORY true #endif int main(int argc, char **argv) { int cuda_device = 0; int nstreams = 4; // number of streams for CUDA calls int nreps = 10; // number of times each experiment is repeated int n = 16 * 1024 * 1024; // number of ints in the data set int nbytes = n * sizeof(int); // number of data bytes dim3 threads, blocks; // kernel launch configuration float elapsed_time, time_memcpy, time_kernel; // timing variables float scale_factor = 1.0f; // allocate generic memory and pin it laster instead of using cudaHostAlloc() bool bPinGenericMemory = DEFAULT_PINNED_GENERIC_MEMORY; // we want this to be the default behavior int device_sync_method = cudaDeviceBlockingSync; // by default we use BlockingSync int niterations; // number of iterations for the loop inside the kernel printf("[ %s ]\n\n", sSDKsample); if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printHelp(); return EXIT_SUCCESS; } if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0) { if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4) { printf("Device synchronization method set to = %s\n", sSyncMethod[device_sync_method]); printf("Setting reps to 100 to demonstrate steady state\n"); nreps = 100; } else { printf("Invalid command line option sync_method=\"%d\"\n", device_sync_method); return EXIT_FAILURE; } } else { printHelp(); return EXIT_SUCCESS; } if (checkCmdLineFlag(argc, (const char **)argv, "use_generic_memory")) { #if defined(__APPLE__) || defined(MACOSX) bPinGenericMemory = false; // Generic Pinning of System Paged memory not currently supported on Mac OSX #else bPinGenericMemory = true; #endif } if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host")) { bPinGenericMemory = false; } printf("\n> "); cuda_device = findCudaDevice(argc, (const char **)argv); // check the compute capability of the device int num_devices=0; checkCudaErrors(cudaGetDeviceCount(&num_devices)); if (0==num_devices) { printf("your system does not have a CUDA capable device, waiving test...\n"); return EXIT_WAIVED; } // check if the command-line chosen device ID is within range, exit if not if (cuda_device >= num_devices) { printf("cuda_device=%d is invalid, must choose device ID between 0 and %d\n", cuda_device, num_devices-1); return EXIT_FAILURE; } checkCudaErrors(cudaSetDevice(cuda_device)); // Checking for compute capabilities cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device)); niterations = 5; // Check if GPU can map host memory (Generic Method), if not then we override bPinGenericMemory to be false if (bPinGenericMemory) { printf("Device: <%s> canMapHostMemory: %s\n", deviceProp.name, deviceProp.canMapHostMemory ? "Yes" : "No"); if (deviceProp.canMapHostMemory == 0) { printf("Using cudaMallocHost, CUDA device does not support mapping of generic host memory\n"); bPinGenericMemory = false; } } // Anything that is less than 32 Cores will have scaled down workload scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); n = (int)rint((float)n / scale_factor); printf("> CUDA Capable: SM %d.%d hardware\n", deviceProp.major, deviceProp.minor); printf("> %d Multiprocessor(s) x %d (Cores/Multiprocessor) = %d (Cores)\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf("> scale_factor = %1.4f\n", 1.0f/scale_factor); printf("> array_size = %d\n\n", n); // enable use of blocking sync, to reduce CPU usage printf("> Using CPU/GPU Device Synchronization method (%s)\n", sDeviceSyncMethod[device_sync_method]); checkCudaErrors(cudaSetDeviceFlags(device_sync_method | (bPinGenericMemory ? cudaDeviceMapHost : 0))); // allocate host memory int c = 5; // value to which the array will be initialized int *h_a = 0; // pointer to the array data in host memory int *hAligned_a = 0; // pointer to the array data in host memory (aligned to MEMORY_ALIGNMENT) // Allocate Host memory (could be using cudaMallocHost or VirtualAlloc/mmap if using the new CUDA 4.0 features AllocateHostMemory(bPinGenericMemory, &h_a, &hAligned_a, nbytes); // allocate device memory int *d_a = 0, *d_c = 0; // pointers to data and init value in the device memory checkCudaErrors(cudaMalloc((void **)&d_a, nbytes)); checkCudaErrors(cudaMemset(d_a, 0x0, nbytes)); checkCudaErrors(cudaMalloc((void **)&d_c, sizeof(int))); checkCudaErrors(cudaMemcpy(d_c, &c, sizeof(int), cudaMemcpyHostToDevice)); printf("\nStarting Test\n"); // allocate and initialize an array of stream handles cudaStream_t *streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); for (int i = 0; i < nstreams; i++) { checkCudaErrors(cudaStreamCreate(&(streams[i]))); } // create CUDA event handles // use blocking sync cudaEvent_t start_event, stop_event; int eventflags = ((device_sync_method == cudaDeviceBlockingSync) ? cudaEventBlockingSync: cudaEventDefault); checkCudaErrors(cudaEventCreateWithFlags(&start_event, eventflags)); checkCudaErrors(cudaEventCreateWithFlags(&stop_event, eventflags)); // time memcopy from device checkCudaErrors(cudaEventRecord(start_event, 0)); // record in stream-0, to ensure that all previous CUDA calls have completed checkCudaErrors(cudaMemcpyAsync(hAligned_a, d_a, nbytes, cudaMemcpyDeviceToHost, streams[0])); checkCudaErrors(cudaEventRecord(stop_event, 0)); checkCudaErrors(cudaEventSynchronize(stop_event)); // block until the event is actually recorded checkCudaErrors(cudaEventElapsedTime(&time_memcpy, start_event, stop_event)); printf("memcopy:\t%.2f\n", time_memcpy); // time kernel threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); checkCudaErrors(cudaEventRecord(start_event, 0)); init_array<<<blocks, threads, 0, streams[0]>>>(d_a, d_c, niterations); checkCudaErrors(cudaEventRecord(stop_event, 0)); checkCudaErrors(cudaEventSynchronize(stop_event)); checkCudaErrors(cudaEventElapsedTime(&time_kernel, start_event, stop_event)); printf("kernel:\t\t%.2f\n", time_kernel); ////////////////////////////////////////////////////////////////////// // time non-streamed execution for reference threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); checkCudaErrors(cudaEventRecord(start_event, 0)); for (int k = 0; k < nreps; k++) { init_array<<<blocks, threads>>>(d_a, d_c, niterations); checkCudaErrors(cudaMemcpy(hAligned_a, d_a, nbytes, cudaMemcpyDeviceToHost)); } checkCudaErrors(cudaEventRecord(stop_event, 0)); checkCudaErrors(cudaEventSynchronize(stop_event)); checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start_event, stop_event)); printf("non-streamed:\t%.2f\n", elapsed_time / nreps); ////////////////////////////////////////////////////////////////////// // time execution with nstreams streams threads=dim3(512,1); blocks=dim3(n/(nstreams*threads.x),1); memset(hAligned_a, 255, nbytes); // set host memory bits to all 1s, for testing correctness checkCudaErrors(cudaMemset(d_a, 0, nbytes)); // set device memory to all 0s, for testing correctness checkCudaErrors(cudaEventRecord(start_event, 0)); for (int k = 0; k < nreps; k++) { // asynchronously launch nstreams kernels, each operating on its own portion of data for (int i = 0; i < nstreams; i++) { init_array<<<blocks, threads, 0, streams[i]>>>(d_a + i *n / nstreams, d_c, niterations); } // asynchronously launch nstreams memcopies. Note that memcopy in stream x will only // commence executing when all previous CUDA calls in stream x have completed for (int i = 0; i < nstreams; i++) { checkCudaErrors(cudaMemcpyAsync(hAligned_a + i * n / nstreams, d_a + i * n / nstreams, nbytes / nstreams, cudaMemcpyDeviceToHost, streams[i])); } } checkCudaErrors(cudaEventRecord(stop_event, 0)); checkCudaErrors(cudaEventSynchronize(stop_event)); checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start_event, stop_event)); printf("%d streams:\t%.2f\n", nstreams, elapsed_time / nreps); // check whether the output is correct printf("-------------------------------\n"); bool bResults = correct_data(hAligned_a, n, c*nreps*niterations); // release resources for (int i = 0; i < nstreams; i++) { checkCudaErrors(cudaStreamDestroy(streams[i])); } checkCudaErrors(cudaEventDestroy(start_event)); checkCudaErrors(cudaEventDestroy(stop_event)); // Free cudaMallocHost or Generic Host allocated memory (from CUDA 4.0) FreeHostMemory(bPinGenericMemory, &h_a, &hAligned_a, nbytes); checkCudaErrors(cudaFree(d_a)); checkCudaErrors(cudaFree(d_c)); return bResults ? EXIT_SUCCESS : EXIT_FAILURE; }
c0543ea10e571a99dd4cb3c8c2f684bd31d46377.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "../common/common.h" #include "../common/cuda_common.cuh" // assume grid is 1D and block is 1D then nx = size __global__ void sum_arrays_1Dgrid_1Dblock(float *a, float *b, float *c, int nx) {} // assume grid is 2D and block is 2D then nx * ny = size __global__ void sum_arrays_2Dgrid_2Dblock(float *a, float *b, float *c, int nx, int ny) {} void sum_array_cpu(float *a, float *b, float *c, int size) { for (int i = 0; i < size; i++) { c[i] = a[i] + b[i]; } } void run_sum_array_1d(int argc, char **argv) { printf("Running 1D grid \n"); int size = 1 << 22; int block_size = 128; int nx, ny = 0; if (argc > 2) { size = 1 << atoi(argv[2]); } if (argc > 4) { block_size = 1 << atoi(argv[4]); } unsigned int byte_size = size * sizeof(float); printf("Input size : %d \n", size); float *h_a, *h_b, *h_out, *h_ref; h_a = (float *)malloc(byte_size); h_b = (float *)malloc(byte_size); h_out = (float *)malloc(byte_size); h_ref = (float *)malloc(byte_size); if (!h_a) { printf("host memory allocaiton error \n"); } for (size_t i = 0; i < size; i++) { h_a[i] = i % 10; h_b[i] = i % 7; } sum_array_cpu(h_a, h_b, h_out, size); dim3 block(block_size); dim3 grid((size + block.x - 1) / block.x); printf("Kernel is launch with grid(%d, %d, %d) and block(%d, %d, %d) \n", grid.x, grid.y, grid.z, block.x, block.y, block.z); float *d_a, *d_b, *d_c; gpuErrchk(hipMalloc((void **)&d_a, byte_size)); gpuErrchk(hipMalloc((void **)&d_b, byte_size)); gpuErrchk(hipMalloc((void **)&d_c, byte_size)); gpuErrchk(hipMemset(d_c, 0, byte_size)); gpuErrchk(hipMemcpy(d_a, h_a, byte_size, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_b, h_b, byte_size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( sum_arrays_1Dgrid_1Dblock), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, size); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipMemcpy(h_ref, d_c, byte_size, hipMemcpyDeviceToHost)); compare_arrays_float(h_out, h_ref, size); hipFree(d_a); hipFree(d_b); hipFree(d_c); free(h_ref); free(h_out); free(h_a); free(h_b); } void run_sum_array_2d(int argc, char **argv) {} // arguments : // 1 - kernel (0:1D, 1:2D) // 2 - input size (2 pow (x)) // 3 - for 2D kernel nx, // 4 - block.x // 5 - block.y int main(int argc, char **argv) { printf("\n------------------------SUM ARRAY EXAMPLE FOR " "NVPROF-------------------------\n\n"); if (argc > 1) { if (atoi(argv[1]) > 0) { run_sum_array_2d(argc, argv); } else { run_sum_array_1d(argc, argv); } } else { run_sum_array_1d(argc, argv); } return 0; }
c0543ea10e571a99dd4cb3c8c2f684bd31d46377.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "../common/common.h" #include "../common/cuda_common.cuh" // assume grid is 1D and block is 1D then nx = size __global__ void sum_arrays_1Dgrid_1Dblock(float *a, float *b, float *c, int nx) {} // assume grid is 2D and block is 2D then nx * ny = size __global__ void sum_arrays_2Dgrid_2Dblock(float *a, float *b, float *c, int nx, int ny) {} void sum_array_cpu(float *a, float *b, float *c, int size) { for (int i = 0; i < size; i++) { c[i] = a[i] + b[i]; } } void run_sum_array_1d(int argc, char **argv) { printf("Running 1D grid \n"); int size = 1 << 22; int block_size = 128; int nx, ny = 0; if (argc > 2) { size = 1 << atoi(argv[2]); } if (argc > 4) { block_size = 1 << atoi(argv[4]); } unsigned int byte_size = size * sizeof(float); printf("Input size : %d \n", size); float *h_a, *h_b, *h_out, *h_ref; h_a = (float *)malloc(byte_size); h_b = (float *)malloc(byte_size); h_out = (float *)malloc(byte_size); h_ref = (float *)malloc(byte_size); if (!h_a) { printf("host memory allocaiton error \n"); } for (size_t i = 0; i < size; i++) { h_a[i] = i % 10; h_b[i] = i % 7; } sum_array_cpu(h_a, h_b, h_out, size); dim3 block(block_size); dim3 grid((size + block.x - 1) / block.x); printf("Kernel is launch with grid(%d, %d, %d) and block(%d, %d, %d) \n", grid.x, grid.y, grid.z, block.x, block.y, block.z); float *d_a, *d_b, *d_c; gpuErrchk(cudaMalloc((void **)&d_a, byte_size)); gpuErrchk(cudaMalloc((void **)&d_b, byte_size)); gpuErrchk(cudaMalloc((void **)&d_c, byte_size)); gpuErrchk(cudaMemset(d_c, 0, byte_size)); gpuErrchk(cudaMemcpy(d_a, h_a, byte_size, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_b, h_b, byte_size, cudaMemcpyHostToDevice)); sum_arrays_1Dgrid_1Dblock<<<grid, block>>>(d_a, d_b, d_c, size); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(h_ref, d_c, byte_size, cudaMemcpyDeviceToHost)); compare_arrays_float(h_out, h_ref, size); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_ref); free(h_out); free(h_a); free(h_b); } void run_sum_array_2d(int argc, char **argv) {} // arguments : // 1 - kernel (0:1D, 1:2D) // 2 - input size (2 pow (x)) // 3 - for 2D kernel nx, // 4 - block.x // 5 - block.y int main(int argc, char **argv) { printf("\n------------------------SUM ARRAY EXAMPLE FOR " "NVPROF-------------------------\n\n"); if (argc > 1) { if (atoi(argv[1]) > 0) { run_sum_array_2d(argc, argv); } else { run_sum_array_1d(argc, argv); } } else { run_sum_array_1d(argc, argv); } return 0; }
3207d8d7076aa8a4474be1b4cd2ea3182c6a1fc4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <iostream> #include <ctype.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #define CEIL(a,b) ((a+b-1)/b) #define PI 3.1415926 #define EDGE 0 #define NOEDGE 255 #define DATAMB(bytes) (bytes/1024/1024) #define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0)) typedef unsigned char uch; typedef unsigned long ul; typedef unsigned int ui; uch *TheImg, *CopyImg; // Where images are stored in CPU int ThreshLo=50, ThreshHi=100; // "Edge" vs. "No Edge" thresholds // Where images and temporary results are stored in GPU uch *GPUImg, *GPUResultImg; double *GPUBWImg, *GPUGaussImg, *GPUGradient, *GPUTheta; struct ImgProp{ ui Hpixels; ui Vpixels; uch HeaderInfo[54]; ul Hbytes; } ip; #define IPHB ip.Hbytes #define IPH ip.Hpixels #define IPV ip.Vpixels #define IMAGESIZE (IPHB*IPV) #define IMAGEPIX (IPH*IPV) // Kernel that calculates a B&W image from an RGB image // resulting image has a double type for each pixel position __global__ void BWKernel(double *ImgBW, uch *ImgGPU, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; double R, G, B; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); ui RowBytes = (Hpixels * 3 + 3) & (~3); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol; ui MYpixIndex = MYrow * Hpixels + MYcol; B = (double)ImgGPU[MYsrcIndex]; G = (double)ImgGPU[MYsrcIndex + 1]; R = (double)ImgGPU[MYsrcIndex + 2]; ImgBW[MYpixIndex] = (R+G+B)/3.0; } __device__ double Gauss[5][5] = { { 2, 4, 5, 4, 2 }, { 4, 9, 12, 9, 4 }, { 5, 12, 15, 12, 5 }, { 4, 9, 12, 9, 4 }, { 2, 4, 5, 4, 2 } }; // Kernel that calculates a Gauss image from the B&W image // resulting image has a double type for each pixel position __global__ void GaussKernel(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; int row, col, indx, i, j; double G=0.00; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); int MYrow = MYbid / BlkPerRow; int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYpixIndex = MYrow * Hpixels + MYcol; if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)){ ImgGauss[MYpixIndex] = 0.0; return; }else{ G = 0.0; for (i = -2; i <= 2; i++){ for (j = -2; j <= 2; j++){ row = MYrow + i; col = MYcol + j; indx = row*Hpixels + col; G += (ImgBW[indx] * Gauss[i + 2][j + 2]); } } ImgGauss[MYpixIndex] = G / 159.00; } } __device__ double Gx[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; __device__ double Gy[3][3] = { { -1, -2, -1 }, { 0, 0, 0 }, { 1, 2, 1 } }; // Kernel that calculates Gradient, Theta from the Gauss image // resulting image has a double type for each pixel position __global__ void SobelKernel(double *ImgGrad, double *ImgTheta, double *ImgGauss, ui Hpixels, ui Vpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; int row, col, indx, i, j; double GX,GY; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); int MYrow = MYbid / BlkPerRow; int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYpixIndex = MYrow * Hpixels + MYcol; if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){ ImgGrad[MYpixIndex] = 0.0; ImgTheta[MYpixIndex] = 0.0; return; }else{ GX = 0.0; GY = 0.0; for (i = -1; i <= 1; i++){ for (j = -1; j <= 1; j++){ row = MYrow + i; col = MYcol + j; indx = row*Hpixels + col; GX += (ImgGauss[indx] * Gx[i + 1][j + 1]); GY += (ImgGauss[indx] * Gy[i + 1][j + 1]); } } ImgGrad[MYpixIndex] = sqrt(GX*GX + GY*GY); ImgTheta[MYpixIndex] = atan(GX / GY)*180.0 / PI; } } // Kernel that calculates the threshold image from Gradient, Theta // resulting image has an RGB for each pixel, same RGB for each pixel __global__ void ThresholdKernel(uch *ImgResult, double *ImgGrad, double *ImgTheta, ui Hpixels, ui Vpixels, ui ThreshLo, ui ThreshHi) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; unsigned char PIXVAL; double L, H, G, T; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); ui RowBytes = (Hpixels * 3 + 3) & (~3); int MYrow = MYbid / BlkPerRow; int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYresultIndex = MYrow * RowBytes + 3 * MYcol; ui MYpixIndex = MYrow * Hpixels + MYcol; if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){ ImgResult[MYresultIndex] = NOEDGE; ImgResult[MYresultIndex + 1] = NOEDGE; ImgResult[MYresultIndex + 2] = NOEDGE; return; }else{ L = (double)ThreshLo; H = (double)ThreshHi; G = ImgGrad[MYpixIndex]; PIXVAL = NOEDGE; if (G <= L){ // no edge PIXVAL = NOEDGE; }else if (G >= H){ // edge PIXVAL = EDGE; }else{ T = ImgTheta[MYpixIndex]; if ((T<-67.5) || (T>67.5)){ // Look at left and right: [row][col-1] and [row][col+1] PIXVAL = ((ImgGrad[MYpixIndex - 1]>H) || (ImgGrad[MYpixIndex + 1]>H)) ? EDGE : NOEDGE; } else if ((T >= -22.5) && (T <= 22.5)){ // Look at top and bottom: [row-1][col] and [row+1][col] PIXVAL = ((ImgGrad[MYpixIndex - Hpixels]>H) || (ImgGrad[MYpixIndex + Hpixels]>H)) ? EDGE : NOEDGE; } else if ((T>22.5) && (T <= 67.5)){ // Look at upper right, lower left: [row-1][col+1] and [row+1][col-1] PIXVAL = ((ImgGrad[MYpixIndex - Hpixels + 1]>H) || (ImgGrad[MYpixIndex + Hpixels - 1]>H)) ? EDGE : NOEDGE; } else if ((T >= -67.5) && (T<-22.5)){ // Look at upper left, lower right: [row-1][col-1] and [row+1][col+1] PIXVAL = ((ImgGrad[MYpixIndex - Hpixels - 1]>H) || (ImgGrad[MYpixIndex + Hpixels + 1]>H)) ? EDGE : NOEDGE; } } ImgResult[MYresultIndex] = PIXVAL; ImgResult[MYresultIndex + 1] = PIXVAL; ImgResult[MYresultIndex + 2] = PIXVAL; } } /* // helper function that wraps CUDA API calls, reports any error and exits void chkCUDAErr(hipError_t error_id) { if (error_id != hipSuccess){ printf("CUDA ERROR :::%\n", hipGetErrorString(error_id)); exit(EXIT_FAILURE); } } */ // Read a 24-bit/pixel BMP file into a 1D linear array. // Allocate memory to store the 1D image and return its pointer. uch *ReadBMPlin(char* fn) { static uch *Img; FILE* f = fopen(fn, "rb"); if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); } uch HeaderInfo[54]; fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width; int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height; int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes; //save header for re-use memcpy(ip.HeaderInfo, HeaderInfo,54); printf("\n Input File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); // allocate memory to store the main image (1 Dimensional array) Img = (uch *)malloc(IMAGESIZE); if (Img == NULL) return Img; // Cannot allocate memory // read the image from disk fread(Img, sizeof(uch), IMAGESIZE, f); fclose(f); return Img; } // Write the 1D linear-memory stored image into file. void WriteBMPlin(uch *Img, char* fn) { FILE* f = fopen(fn, "wb"); if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); } //write header fwrite(ip.HeaderInfo, sizeof(uch), 54, f); //write data fwrite(Img, sizeof(uch), IMAGESIZE, f); printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); fclose(f); } int main(int argc, char **argv) { // clock_t CPUStartTime, CPUEndTime, CPUElapsedTime; // GPU code run times float totalTime, totalKernelTime, tfrCPUtoGPU, tfrGPUtoCPU; float kernelExecTimeBW, kernelExecTimeGauss, kernelExecTimeSobel, kernelExecTimeThreshold; hipError_t cudaStatus; hipEvent_t time1, time2, time2BW, time2Gauss, time2Sobel, time3, time4; char InputFileName[255], OutputFileName[255], ProgName[255]; ui BlkPerRow, ThrPerBlk=256, NumBlocks; ui GPUDataTfrBW, GPUDataTfrGauss, GPUDataTfrSobel, GPUDataTfrThresh,GPUDataTfrKernel, GPUDataTfrTotal; hipDeviceProp_t GPUprop; void *GPUptr; // Pointer to the bulk-allocated GPU memory ul GPUtotalBufferSize; ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100]; strcpy(ProgName, "imedgeG"); switch (argc){ case 6: ThreshHi = atoi(argv[5]); case 5: ThreshLo = atoi(argv[4]); case 4: ThrPerBlk = atoi(argv[3]); case 3: strcpy(InputFileName, argv[1]); strcpy(OutputFileName, argv[2]); break; default: printf("\n\nUsage: %s InputFilename OutputFilename [ThrPerBlk] [ThreshLo] [ThreshHi]", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp 256", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp 256 50 100",ProgName); exit(EXIT_FAILURE); } if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) { printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk); exit(EXIT_FAILURE); } if ((ThreshLo<0) || (ThreshHi>255) || (ThreshLo>ThreshHi)){ printf("\nInvalid Thresholds: Threshold must be between [0...255] ...\n"); printf("\n\nNothing executed ... Exiting ...\n\n"); exit(EXIT_FAILURE); } // Create CPU memory to store the input and output images TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated if (TheImg == NULL){ printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } CopyImg = (uch *)malloc(IMAGESIZE); if (CopyImg == NULL){ printf("Cannot allocate memory for the input image...\n"); free(TheImg); exit(EXIT_FAILURE); } // Choose which GPU to run on, change this on a multi-GPU system. int NumGPUs = 0; hipGetDeviceCount(&NumGPUs); if (NumGPUs == 0){ printf("\nNo CUDA Device is available\n"); goto EXITERROR; } cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto EXITERROR; } hipGetDeviceProperties(&GPUprop, 0); SupportedKBlocks = (ui) GPUprop.maxGridSize[0] * (ui) GPUprop.maxGridSize[1] * (ui )GPUprop.maxGridSize[2]/1024; SupportedMBlocks = SupportedKBlocks / 1024; sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks>=5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks>=5) ? 'M':'K'); MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock; hipEventCreate(&time1); hipEventCreate(&time2); hipEventCreate(&time2BW); hipEventCreate(&time2Gauss); hipEventCreate(&time2Sobel); hipEventCreate(&time3); hipEventCreate(&time4); hipEventRecord(time1, 0); // Time stamp at the start of the GPU transfer // Allocate GPU buffer for the input and output images and the imtermediate results GPUtotalBufferSize = 4 * sizeof(double)*IMAGEPIX + 2 * sizeof(uch)*IMAGESIZE; cudaStatus = hipMalloc((void**)&GPUptr, GPUtotalBufferSize); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! Can't allocate GPU memory"); goto EXITERROR; } GPUImg = (uch *)GPUptr; GPUResultImg = GPUImg + IMAGESIZE; GPUBWImg = (double *)(GPUResultImg + IMAGESIZE); GPUGaussImg = GPUBWImg + IMAGEPIX; GPUGradient = GPUGaussImg + IMAGEPIX; GPUTheta = GPUGradient + IMAGEPIX; // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(GPUImg, TheImg, IMAGESIZE, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy CPU to GPU failed!"); goto EXITCUDAERROR; } hipEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done //dim3 dimBlock(ThrPerBlk); //dim3 dimGrid(ip.Hpixels*BlkPerRow); BlkPerRow = CEIL(ip.Hpixels, ThrPerBlk); NumBlocks = IPV*BlkPerRow; // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. hipLaunchKernelGGL(( BWKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUBWImg, GPUImg, ip.Hpixels); if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR; hipEventRecord(time2BW, 0); // Time stamp after BW image calculation GPUDataTfrBW = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE; hipLaunchKernelGGL(( GaussKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUGaussImg, GPUBWImg, ip.Hpixels, ip.Vpixels); if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR; hipEventRecord(time2Gauss, 0); // Time stamp after Gauss image calculation GPUDataTfrGauss = 2*sizeof(double)*IMAGEPIX; hipLaunchKernelGGL(( SobelKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUGradient, GPUTheta, GPUGaussImg, ip.Hpixels, ip.Vpixels); if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR; hipEventRecord(time2Sobel, 0); // Time stamp after Gradient, Theta computation GPUDataTfrSobel = 3 * sizeof(double)*IMAGEPIX; hipLaunchKernelGGL(( ThresholdKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUResultImg, GPUGradient, GPUTheta, ip.Hpixels, ip.Vpixels, ThreshLo, ThreshHi); if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR; GPUDataTfrThresh = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE; GPUDataTfrKernel = GPUDataTfrBW + GPUDataTfrGauss + GPUDataTfrSobel + GPUDataTfrThresh; GPUDataTfrTotal = GPUDataTfrKernel + 2 * IMAGESIZE; hipEventRecord(time3, 0); // Copy output (results) from GPU buffer to host (CPU) memory. cudaStatus = hipMemcpy(CopyImg, GPUResultImg, IMAGESIZE, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy GPU to CPU failed!"); goto EXITCUDAERROR; } hipEventRecord(time4, 0); hipEventSynchronize(time1); hipEventSynchronize(time2); hipEventSynchronize(time2BW); hipEventSynchronize(time2Gauss); hipEventSynchronize(time2Sobel); hipEventSynchronize(time3); hipEventSynchronize(time4); hipEventElapsedTime(&totalTime, time1, time4); hipEventElapsedTime(&tfrCPUtoGPU, time1, time2); hipEventElapsedTime(&kernelExecTimeBW, time2, time2BW); hipEventElapsedTime(&kernelExecTimeGauss, time2BW, time2Gauss); hipEventElapsedTime(&kernelExecTimeSobel, time2Gauss, time2Sobel); hipEventElapsedTime(&kernelExecTimeThreshold, time2Sobel, time3); hipEventElapsedTime(&tfrGPUtoCPU, time3, time4); totalKernelTime = kernelExecTimeBW + kernelExecTimeGauss + kernelExecTimeSobel + kernelExecTimeThreshold; cudaStatus = hipDeviceSynchronize(); //checkError(hipGetLastError()); // screen for errors in kernel launches if (cudaStatus != hipSuccess) { fprintf(stderr, "\n Program failed after hipDeviceSynchronize()!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk printf("\n\n----------------------------------------------------------------------------\n"); printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n", GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk); printf("----------------------------------------------------------------------------\n"); printf("%s %s %s %u %d %d [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName, ThrPerBlk, ThreshLo, ThreshHi, NumBlocks, BlkPerRow); printf("----------------------------------------------------------------------------\n"); printf(" CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE,tfrCPUtoGPU)); printf(" GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU)); printf("----------------------------------------------------------------------------\n"); printf(" BW Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeBW, DATAMB(GPUDataTfrBW), DATABW(GPUDataTfrBW, kernelExecTimeBW)); printf(" Gauss Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeGauss, DATAMB(GPUDataTfrGauss), DATABW(GPUDataTfrGauss, kernelExecTimeGauss)); printf(" Sobel Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeSobel, DATAMB(GPUDataTfrSobel), DATABW(GPUDataTfrSobel, kernelExecTimeSobel)); printf("Threshold Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeThreshold, DATAMB(GPUDataTfrThresh), DATABW(GPUDataTfrThresh, kernelExecTimeThreshold)); printf("----------------------------------------------------------------------------\n"); printf(" Total Kernel-only time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalKernelTime, DATAMB(GPUDataTfrKernel), DATABW(GPUDataTfrKernel, totalKernelTime)); printf(" Total time with I/O included =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalTime, DATAMB(GPUDataTfrTotal), DATABW(GPUDataTfrTotal, totalTime)); printf("----------------------------------------------------------------------------\n"); // Deallocate CPU, GPU memory and destroy events. hipFree(GPUptr); hipEventDestroy(time1); hipEventDestroy(time2); hipEventDestroy(time2BW); hipEventDestroy(time2Gauss); hipEventDestroy(time2Sobel); hipEventDestroy(time3); hipEventDestroy(time4); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Parallel Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } free(TheImg); free(CopyImg); return(EXIT_SUCCESS); KERNELERROR: fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus); EXITCUDAERROR: hipFree(GPUptr); EXITERROR: free(TheImg); free(CopyImg); return(EXIT_FAILURE); }
3207d8d7076aa8a4474be1b4cd2ea3182c6a1fc4.cu
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <iostream> #include <ctype.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda.h" #define CEIL(a,b) ((a+b-1)/b) #define PI 3.1415926 #define EDGE 0 #define NOEDGE 255 #define DATAMB(bytes) (bytes/1024/1024) #define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0)) typedef unsigned char uch; typedef unsigned long ul; typedef unsigned int ui; uch *TheImg, *CopyImg; // Where images are stored in CPU int ThreshLo=50, ThreshHi=100; // "Edge" vs. "No Edge" thresholds // Where images and temporary results are stored in GPU uch *GPUImg, *GPUResultImg; double *GPUBWImg, *GPUGaussImg, *GPUGradient, *GPUTheta; struct ImgProp{ ui Hpixels; ui Vpixels; uch HeaderInfo[54]; ul Hbytes; } ip; #define IPHB ip.Hbytes #define IPH ip.Hpixels #define IPV ip.Vpixels #define IMAGESIZE (IPHB*IPV) #define IMAGEPIX (IPH*IPV) // Kernel that calculates a B&W image from an RGB image // resulting image has a double type for each pixel position __global__ void BWKernel(double *ImgBW, uch *ImgGPU, ui Hpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; double R, G, B; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); ui RowBytes = (Hpixels * 3 + 3) & (~3); ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol; ui MYpixIndex = MYrow * Hpixels + MYcol; B = (double)ImgGPU[MYsrcIndex]; G = (double)ImgGPU[MYsrcIndex + 1]; R = (double)ImgGPU[MYsrcIndex + 2]; ImgBW[MYpixIndex] = (R+G+B)/3.0; } __device__ double Gauss[5][5] = { { 2, 4, 5, 4, 2 }, { 4, 9, 12, 9, 4 }, { 5, 12, 15, 12, 5 }, { 4, 9, 12, 9, 4 }, { 2, 4, 5, 4, 2 } }; // Kernel that calculates a Gauss image from the B&W image // resulting image has a double type for each pixel position __global__ void GaussKernel(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; int row, col, indx, i, j; double G=0.00; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); int MYrow = MYbid / BlkPerRow; int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYpixIndex = MYrow * Hpixels + MYcol; if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)){ ImgGauss[MYpixIndex] = 0.0; return; }else{ G = 0.0; for (i = -2; i <= 2; i++){ for (j = -2; j <= 2; j++){ row = MYrow + i; col = MYcol + j; indx = row*Hpixels + col; G += (ImgBW[indx] * Gauss[i + 2][j + 2]); } } ImgGauss[MYpixIndex] = G / 159.00; } } __device__ double Gx[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; __device__ double Gy[3][3] = { { -1, -2, -1 }, { 0, 0, 0 }, { 1, 2, 1 } }; // Kernel that calculates Gradient, Theta from the Gauss image // resulting image has a double type for each pixel position __global__ void SobelKernel(double *ImgGrad, double *ImgTheta, double *ImgGauss, ui Hpixels, ui Vpixels) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; int row, col, indx, i, j; double GX,GY; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); int MYrow = MYbid / BlkPerRow; int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYpixIndex = MYrow * Hpixels + MYcol; if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){ ImgGrad[MYpixIndex] = 0.0; ImgTheta[MYpixIndex] = 0.0; return; }else{ GX = 0.0; GY = 0.0; for (i = -1; i <= 1; i++){ for (j = -1; j <= 1; j++){ row = MYrow + i; col = MYcol + j; indx = row*Hpixels + col; GX += (ImgGauss[indx] * Gx[i + 1][j + 1]); GY += (ImgGauss[indx] * Gy[i + 1][j + 1]); } } ImgGrad[MYpixIndex] = sqrt(GX*GX + GY*GY); ImgTheta[MYpixIndex] = atan(GX / GY)*180.0 / PI; } } // Kernel that calculates the threshold image from Gradient, Theta // resulting image has an RGB for each pixel, same RGB for each pixel __global__ void ThresholdKernel(uch *ImgResult, double *ImgGrad, double *ImgTheta, ui Hpixels, ui Vpixels, ui ThreshLo, ui ThreshHi) { ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; unsigned char PIXVAL; double L, H, G, T; //ui NumBlocks = gridDim.x; ui BlkPerRow = CEIL(Hpixels, ThrPerBlk); ui RowBytes = (Hpixels * 3 + 3) & (~3); int MYrow = MYbid / BlkPerRow; int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYresultIndex = MYrow * RowBytes + 3 * MYcol; ui MYpixIndex = MYrow * Hpixels + MYcol; if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){ ImgResult[MYresultIndex] = NOEDGE; ImgResult[MYresultIndex + 1] = NOEDGE; ImgResult[MYresultIndex + 2] = NOEDGE; return; }else{ L = (double)ThreshLo; H = (double)ThreshHi; G = ImgGrad[MYpixIndex]; PIXVAL = NOEDGE; if (G <= L){ // no edge PIXVAL = NOEDGE; }else if (G >= H){ // edge PIXVAL = EDGE; }else{ T = ImgTheta[MYpixIndex]; if ((T<-67.5) || (T>67.5)){ // Look at left and right: [row][col-1] and [row][col+1] PIXVAL = ((ImgGrad[MYpixIndex - 1]>H) || (ImgGrad[MYpixIndex + 1]>H)) ? EDGE : NOEDGE; } else if ((T >= -22.5) && (T <= 22.5)){ // Look at top and bottom: [row-1][col] and [row+1][col] PIXVAL = ((ImgGrad[MYpixIndex - Hpixels]>H) || (ImgGrad[MYpixIndex + Hpixels]>H)) ? EDGE : NOEDGE; } else if ((T>22.5) && (T <= 67.5)){ // Look at upper right, lower left: [row-1][col+1] and [row+1][col-1] PIXVAL = ((ImgGrad[MYpixIndex - Hpixels + 1]>H) || (ImgGrad[MYpixIndex + Hpixels - 1]>H)) ? EDGE : NOEDGE; } else if ((T >= -67.5) && (T<-22.5)){ // Look at upper left, lower right: [row-1][col-1] and [row+1][col+1] PIXVAL = ((ImgGrad[MYpixIndex - Hpixels - 1]>H) || (ImgGrad[MYpixIndex + Hpixels + 1]>H)) ? EDGE : NOEDGE; } } ImgResult[MYresultIndex] = PIXVAL; ImgResult[MYresultIndex + 1] = PIXVAL; ImgResult[MYresultIndex + 2] = PIXVAL; } } /* // helper function that wraps CUDA API calls, reports any error and exits void chkCUDAErr(cudaError_t error_id) { if (error_id != CUDA_SUCCESS){ printf("CUDA ERROR :::%\n", cudaGetErrorString(error_id)); exit(EXIT_FAILURE); } } */ // Read a 24-bit/pixel BMP file into a 1D linear array. // Allocate memory to store the 1D image and return its pointer. uch *ReadBMPlin(char* fn) { static uch *Img; FILE* f = fopen(fn, "rb"); if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); } uch HeaderInfo[54]; fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width; int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height; int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes; //save header for re-use memcpy(ip.HeaderInfo, HeaderInfo,54); printf("\n Input File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); // allocate memory to store the main image (1 Dimensional array) Img = (uch *)malloc(IMAGESIZE); if (Img == NULL) return Img; // Cannot allocate memory // read the image from disk fread(Img, sizeof(uch), IMAGESIZE, f); fclose(f); return Img; } // Write the 1D linear-memory stored image into file. void WriteBMPlin(uch *Img, char* fn) { FILE* f = fopen(fn, "wb"); if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); } //write header fwrite(ip.HeaderInfo, sizeof(uch), 54, f); //write data fwrite(Img, sizeof(uch), IMAGESIZE, f); printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); fclose(f); } int main(int argc, char **argv) { // clock_t CPUStartTime, CPUEndTime, CPUElapsedTime; // GPU code run times float totalTime, totalKernelTime, tfrCPUtoGPU, tfrGPUtoCPU; float kernelExecTimeBW, kernelExecTimeGauss, kernelExecTimeSobel, kernelExecTimeThreshold; cudaError_t cudaStatus; cudaEvent_t time1, time2, time2BW, time2Gauss, time2Sobel, time3, time4; char InputFileName[255], OutputFileName[255], ProgName[255]; ui BlkPerRow, ThrPerBlk=256, NumBlocks; ui GPUDataTfrBW, GPUDataTfrGauss, GPUDataTfrSobel, GPUDataTfrThresh,GPUDataTfrKernel, GPUDataTfrTotal; cudaDeviceProp GPUprop; void *GPUptr; // Pointer to the bulk-allocated GPU memory ul GPUtotalBufferSize; ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100]; strcpy(ProgName, "imedgeG"); switch (argc){ case 6: ThreshHi = atoi(argv[5]); case 5: ThreshLo = atoi(argv[4]); case 4: ThrPerBlk = atoi(argv[3]); case 3: strcpy(InputFileName, argv[1]); strcpy(OutputFileName, argv[2]); break; default: printf("\n\nUsage: %s InputFilename OutputFilename [ThrPerBlk] [ThreshLo] [ThreshHi]", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp 256", ProgName); printf("\n\nExample: %s Astronaut.bmp Output.bmp 256 50 100",ProgName); exit(EXIT_FAILURE); } if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) { printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk); exit(EXIT_FAILURE); } if ((ThreshLo<0) || (ThreshHi>255) || (ThreshLo>ThreshHi)){ printf("\nInvalid Thresholds: Threshold must be between [0...255] ...\n"); printf("\n\nNothing executed ... Exiting ...\n\n"); exit(EXIT_FAILURE); } // Create CPU memory to store the input and output images TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated if (TheImg == NULL){ printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } CopyImg = (uch *)malloc(IMAGESIZE); if (CopyImg == NULL){ printf("Cannot allocate memory for the input image...\n"); free(TheImg); exit(EXIT_FAILURE); } // Choose which GPU to run on, change this on a multi-GPU system. int NumGPUs = 0; cudaGetDeviceCount(&NumGPUs); if (NumGPUs == 0){ printf("\nNo CUDA Device is available\n"); goto EXITERROR; } cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto EXITERROR; } cudaGetDeviceProperties(&GPUprop, 0); SupportedKBlocks = (ui) GPUprop.maxGridSize[0] * (ui) GPUprop.maxGridSize[1] * (ui )GPUprop.maxGridSize[2]/1024; SupportedMBlocks = SupportedKBlocks / 1024; sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks>=5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks>=5) ? 'M':'K'); MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock; cudaEventCreate(&time1); cudaEventCreate(&time2); cudaEventCreate(&time2BW); cudaEventCreate(&time2Gauss); cudaEventCreate(&time2Sobel); cudaEventCreate(&time3); cudaEventCreate(&time4); cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer // Allocate GPU buffer for the input and output images and the imtermediate results GPUtotalBufferSize = 4 * sizeof(double)*IMAGEPIX + 2 * sizeof(uch)*IMAGESIZE; cudaStatus = cudaMalloc((void**)&GPUptr, GPUtotalBufferSize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory"); goto EXITERROR; } GPUImg = (uch *)GPUptr; GPUResultImg = GPUImg + IMAGESIZE; GPUBWImg = (double *)(GPUResultImg + IMAGESIZE); GPUGaussImg = GPUBWImg + IMAGEPIX; GPUGradient = GPUGaussImg + IMAGEPIX; GPUTheta = GPUGradient + IMAGEPIX; // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy CPU to GPU failed!"); goto EXITCUDAERROR; } cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done //dim3 dimBlock(ThrPerBlk); //dim3 dimGrid(ip.Hpixels*BlkPerRow); BlkPerRow = CEIL(ip.Hpixels, ThrPerBlk); NumBlocks = IPV*BlkPerRow; // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. BWKernel <<< NumBlocks, ThrPerBlk >>> (GPUBWImg, GPUImg, ip.Hpixels); if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR; cudaEventRecord(time2BW, 0); // Time stamp after BW image calculation GPUDataTfrBW = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE; GaussKernel <<< NumBlocks, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, ip.Hpixels, ip.Vpixels); if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR; cudaEventRecord(time2Gauss, 0); // Time stamp after Gauss image calculation GPUDataTfrGauss = 2*sizeof(double)*IMAGEPIX; SobelKernel <<< NumBlocks, ThrPerBlk >>> (GPUGradient, GPUTheta, GPUGaussImg, ip.Hpixels, ip.Vpixels); if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR; cudaEventRecord(time2Sobel, 0); // Time stamp after Gradient, Theta computation GPUDataTfrSobel = 3 * sizeof(double)*IMAGEPIX; ThresholdKernel <<< NumBlocks, ThrPerBlk >>> (GPUResultImg, GPUGradient, GPUTheta, ip.Hpixels, ip.Vpixels, ThreshLo, ThreshHi); if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR; GPUDataTfrThresh = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE; GPUDataTfrKernel = GPUDataTfrBW + GPUDataTfrGauss + GPUDataTfrSobel + GPUDataTfrThresh; GPUDataTfrTotal = GPUDataTfrKernel + 2 * IMAGESIZE; cudaEventRecord(time3, 0); // Copy output (results) from GPU buffer to host (CPU) memory. cudaStatus = cudaMemcpy(CopyImg, GPUResultImg, IMAGESIZE, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy GPU to CPU failed!"); goto EXITCUDAERROR; } cudaEventRecord(time4, 0); cudaEventSynchronize(time1); cudaEventSynchronize(time2); cudaEventSynchronize(time2BW); cudaEventSynchronize(time2Gauss); cudaEventSynchronize(time2Sobel); cudaEventSynchronize(time3); cudaEventSynchronize(time4); cudaEventElapsedTime(&totalTime, time1, time4); cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2); cudaEventElapsedTime(&kernelExecTimeBW, time2, time2BW); cudaEventElapsedTime(&kernelExecTimeGauss, time2BW, time2Gauss); cudaEventElapsedTime(&kernelExecTimeSobel, time2Gauss, time2Sobel); cudaEventElapsedTime(&kernelExecTimeThreshold, time2Sobel, time3); cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4); totalKernelTime = kernelExecTimeBW + kernelExecTimeGauss + kernelExecTimeSobel + kernelExecTimeThreshold; cudaStatus = cudaDeviceSynchronize(); //checkError(cudaGetLastError()); // screen for errors in kernel launches if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk printf("\n\n----------------------------------------------------------------------------\n"); printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n", GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk); printf("----------------------------------------------------------------------------\n"); printf("%s %s %s %u %d %d [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName, ThrPerBlk, ThreshLo, ThreshHi, NumBlocks, BlkPerRow); printf("----------------------------------------------------------------------------\n"); printf(" CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE,tfrCPUtoGPU)); printf(" GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU)); printf("----------------------------------------------------------------------------\n"); printf(" BW Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeBW, DATAMB(GPUDataTfrBW), DATABW(GPUDataTfrBW, kernelExecTimeBW)); printf(" Gauss Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeGauss, DATAMB(GPUDataTfrGauss), DATABW(GPUDataTfrGauss, kernelExecTimeGauss)); printf(" Sobel Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeSobel, DATAMB(GPUDataTfrSobel), DATABW(GPUDataTfrSobel, kernelExecTimeSobel)); printf("Threshold Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeThreshold, DATAMB(GPUDataTfrThresh), DATABW(GPUDataTfrThresh, kernelExecTimeThreshold)); printf("----------------------------------------------------------------------------\n"); printf(" Total Kernel-only time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalKernelTime, DATAMB(GPUDataTfrKernel), DATABW(GPUDataTfrKernel, totalKernelTime)); printf(" Total time with I/O included =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalTime, DATAMB(GPUDataTfrTotal), DATABW(GPUDataTfrTotal, totalTime)); printf("----------------------------------------------------------------------------\n"); // Deallocate CPU, GPU memory and destroy events. cudaFree(GPUptr); cudaEventDestroy(time1); cudaEventDestroy(time2); cudaEventDestroy(time2BW); cudaEventDestroy(time2Gauss); cudaEventDestroy(time2Sobel); cudaEventDestroy(time3); cudaEventDestroy(time4); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Parallel Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } free(TheImg); free(CopyImg); return(EXIT_SUCCESS); KERNELERROR: fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus); EXITCUDAERROR: cudaFree(GPUptr); EXITERROR: free(TheImg); free(CopyImg); return(EXIT_FAILURE); }
9371516357f40596c3a4ccda5025b0cd64e36248.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <float.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <hip/hip_runtime.h> #include <iostream> #include <cstdio> #include <helper_cuda.h> #include <helper_string.h> #define MAX_DEPTH 24 #define INSERTION_SORT 32 #define MAXIMUM_VALUE 1000000.0f #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) ) void HandleError( hipError_t err, const char *file, int line ) { // // Handle and report on CUDA errors. // if ( err != hipSuccess ) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } void checkCUDAError( const char *msg, bool exitOnError ) { // // Check cuda error and print result if appropriate. // hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err) ); if (exitOnError) { exit(-1); } } } void cleanupCuda( void ) { // // Clean up CUDA resources. // // // Explicitly cleans up all runtime-related resources associated with the // calling host thread. // HANDLE_ERROR( hipDeviceReset() ); } __device__ float device_pow( float x, float y ) { // // Calculate x^y on the GPU. // return pow( x, y ); } // // PLACE GPU KERNELS HERE - BEGIN // //////////////////////////////////////////////////////////////////////////////// // Selection sort used when depth gets too big or the number of elements drops // below a threshold. //////////////////////////////////////////////////////////////////////////////// __device__ void selection_sort(float *data, int left, int right) { for (int i = left ; i <= right ; ++i) { float min_val = data[i]; int min_idx = i; // Find the smallest value in the range [left, right]. for (int j = i+1 ; j <= right ; ++j) { float val_j = data[j]; if (val_j < min_val) { min_idx = j; min_val = val_j; } } // Swap the values. if (i != min_idx) { data[min_idx] = data[i]; data[i] = min_val; } } } //////////////////////////////////////////////////////////////////////////////// // Very basic quicksort algorithm, recursively launching the next level. //////////////////////////////////////////////////////////////////////////////// __global__ void cdp_simple_quicksort(float *data, int left, int right, int depth) { // If we're too deep or there are few elements left, we use an insertion sort... if (depth >= MAX_DEPTH || right-left <= INSERTION_SORT) { selection_sort(data, left, right); return; } float *lptr = data+left; float *rptr = data+right; float pivot = data[right]; // Do the partitioning. while (lptr <= rptr) { // Find the next left- and right-hand values to swap unsigned int lval = *lptr; unsigned int rval = *rptr; // Move the left pointer as long as the pointed element is smaller than the pivot. while (lval < pivot) { lptr++; lval = *lptr; } // Move the right pointer as long as the pointed element is larger than the pivot. while (rval > pivot) { rptr--; rval = *rptr; } // If the swap points are valid, do the swap! if (lptr <= rptr) { *lptr++ = rval; *rptr-- = lval; } } // Now the recursive part int nright = rptr - data; int nleft = lptr - data; // Launch a new block to sort the left part. if (left < (rptr-data)) { hipStream_t s; hipStreamCreateWithFlags(&s, hipStreamNonBlocking); hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, s , data, left, nright, depth+1); hipStreamDestroy(s); } // Launch a new block to sort the right part. if ((lptr-data) < right) { hipStream_t s1; hipStreamCreateWithFlags(&s1, hipStreamNonBlocking); hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, s1 , data, nleft, right, depth+1); hipStreamDestroy(s1); } } // // PLACE GPU KERNELS HERE - END // //////////////////////////////////////////////////////////////////////////////// // Call the quicksort kernel from the host. //////////////////////////////////////////////////////////////////////////////// void run_qsort(float *data, unsigned int nitems) { // Prepare CDP for the max depth 'MAX_DEPTH'. checkCudaErrors(hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, MAX_DEPTH)); // Launch on device int left = 0; int right = nitems-1; std::cout << "Launching kernel on the GPU" << std::endl; hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1) , 0, 0, data, left, right, 0); checkCudaErrors(hipDeviceSynchronize()); } void printArray(float arr[], int size) { int i; printf( "array size is %d\n", size); for (i = 0; i < size; i++) printf( "%f ", arr[i]); } void checkResult(float array[], int size) { float temp = 0; bool checkResult = true; for (int i=0; i < size; i++) { if (temp > array[i]) { checkResult = false; break; } temp = array[i]; } if (checkResult) { printf( "Result sorted correct\n"); } else { printf( "Result sorted wrong\n"); } } int main( int argc, char* argv[] ) { // // Determine min, max, mean, mode and standard deviation of array // unsigned int array_size, seed, i; struct timeval start, end; double runtime; if( argc < 3 ) { printf( "Format: quickSort_gpu <size of array> <random seed>\n" ); printf( "Arguments:\n" ); printf( " size of array - This is the size of the array to be generated and processed\n" ); printf( " random seed - This integer will be used to seed the random number\n" ); printf( " generator that will generate the contents of the array\n" ); printf( " to be processed\n" ); exit( 1 ); } // // Get the size of the array to process. // array_size = atoi( argv[1] ); // // Get the seed to be used // seed = atoi( argv[2] ); // // Make sure that CUDA resources get cleaned up on exit. // atexit( cleanupCuda ); // // Record the start time. // gettimeofday( &start, NULL ); // // Allocate the array to be populated. // float *array = (float *) malloc( array_size * sizeof( float ) ); // // Seed the random number generator and populate the array with its values. // srand( seed ); for( i = 0; i < array_size; i++ ) array[i] = ( (float) rand() / (float) RAND_MAX ) * MAXIMUM_VALUE; float *dev_array; // Allocate GPU memory. checkCudaErrors(hipMalloc((void **)&dev_array, array_size * sizeof(float))); checkCudaErrors(hipMemcpy(dev_array, array, array_size * sizeof(float), hipMemcpyHostToDevice)); // Execute run_qsort(dev_array, array_size); float *results = new float[array_size]; checkCudaErrors(hipMemcpy(results, dev_array, array_size*sizeof(float), hipMemcpyDeviceToHost)); // // Record the end time. // gettimeofday( &end, NULL ); // // Calculate the runtime. // runtime = ( ( end.tv_sec - start.tv_sec ) * 1000.0 ) + ( ( end.tv_usec - start.tv_usec ) / 1000.0 ); // // Output discoveries from the array. // printf( "Statistics for array ( %d, %d ):\n", array_size, seed ); //printArray(array, array_size); printf( "\n------------\n" ); //printArray(results, array_size); checkResult(results, array_size); printf( "Processing Time: %4.4f milliseconds\n", runtime ); // // Free the allocated array. // free( array ); hipFree(dev_array); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); return 0; }
9371516357f40596c3a4ccda5025b0cd64e36248.cu
#include <stdio.h> #include <float.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <cuda.h> #include <iostream> #include <cstdio> #include <helper_cuda.h> #include <helper_string.h> #define MAX_DEPTH 24 #define INSERTION_SORT 32 #define MAXIMUM_VALUE 1000000.0f #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) ) void HandleError( cudaError_t err, const char *file, int line ) { // // Handle and report on CUDA errors. // if ( err != cudaSuccess ) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } void checkCUDAError( const char *msg, bool exitOnError ) { // // Check cuda error and print result if appropriate. // cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err) ); if (exitOnError) { exit(-1); } } } void cleanupCuda( void ) { // // Clean up CUDA resources. // // // Explicitly cleans up all runtime-related resources associated with the // calling host thread. // HANDLE_ERROR( cudaThreadExit() ); } __device__ float device_pow( float x, float y ) { // // Calculate x^y on the GPU. // return pow( x, y ); } // // PLACE GPU KERNELS HERE - BEGIN // //////////////////////////////////////////////////////////////////////////////// // Selection sort used when depth gets too big or the number of elements drops // below a threshold. //////////////////////////////////////////////////////////////////////////////// __device__ void selection_sort(float *data, int left, int right) { for (int i = left ; i <= right ; ++i) { float min_val = data[i]; int min_idx = i; // Find the smallest value in the range [left, right]. for (int j = i+1 ; j <= right ; ++j) { float val_j = data[j]; if (val_j < min_val) { min_idx = j; min_val = val_j; } } // Swap the values. if (i != min_idx) { data[min_idx] = data[i]; data[i] = min_val; } } } //////////////////////////////////////////////////////////////////////////////// // Very basic quicksort algorithm, recursively launching the next level. //////////////////////////////////////////////////////////////////////////////// __global__ void cdp_simple_quicksort(float *data, int left, int right, int depth) { // If we're too deep or there are few elements left, we use an insertion sort... if (depth >= MAX_DEPTH || right-left <= INSERTION_SORT) { selection_sort(data, left, right); return; } float *lptr = data+left; float *rptr = data+right; float pivot = data[right]; // Do the partitioning. while (lptr <= rptr) { // Find the next left- and right-hand values to swap unsigned int lval = *lptr; unsigned int rval = *rptr; // Move the left pointer as long as the pointed element is smaller than the pivot. while (lval < pivot) { lptr++; lval = *lptr; } // Move the right pointer as long as the pointed element is larger than the pivot. while (rval > pivot) { rptr--; rval = *rptr; } // If the swap points are valid, do the swap! if (lptr <= rptr) { *lptr++ = rval; *rptr-- = lval; } } // Now the recursive part int nright = rptr - data; int nleft = lptr - data; // Launch a new block to sort the left part. if (left < (rptr-data)) { cudaStream_t s; cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking); cdp_simple_quicksort<<< 1, 1, 0, s >>>(data, left, nright, depth+1); cudaStreamDestroy(s); } // Launch a new block to sort the right part. if ((lptr-data) < right) { cudaStream_t s1; cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking); cdp_simple_quicksort<<< 1, 1, 0, s1 >>>(data, nleft, right, depth+1); cudaStreamDestroy(s1); } } // // PLACE GPU KERNELS HERE - END // //////////////////////////////////////////////////////////////////////////////// // Call the quicksort kernel from the host. //////////////////////////////////////////////////////////////////////////////// void run_qsort(float *data, unsigned int nitems) { // Prepare CDP for the max depth 'MAX_DEPTH'. checkCudaErrors(cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, MAX_DEPTH)); // Launch on device int left = 0; int right = nitems-1; std::cout << "Launching kernel on the GPU" << std::endl; cdp_simple_quicksort<<< 1, 1 >>>(data, left, right, 0); checkCudaErrors(cudaDeviceSynchronize()); } void printArray(float arr[], int size) { int i; printf( "array size is %d\n", size); for (i = 0; i < size; i++) printf( "%f ", arr[i]); } void checkResult(float array[], int size) { float temp = 0; bool checkResult = true; for (int i=0; i < size; i++) { if (temp > array[i]) { checkResult = false; break; } temp = array[i]; } if (checkResult) { printf( "Result sorted correct\n"); } else { printf( "Result sorted wrong\n"); } } int main( int argc, char* argv[] ) { // // Determine min, max, mean, mode and standard deviation of array // unsigned int array_size, seed, i; struct timeval start, end; double runtime; if( argc < 3 ) { printf( "Format: quickSort_gpu <size of array> <random seed>\n" ); printf( "Arguments:\n" ); printf( " size of array - This is the size of the array to be generated and processed\n" ); printf( " random seed - This integer will be used to seed the random number\n" ); printf( " generator that will generate the contents of the array\n" ); printf( " to be processed\n" ); exit( 1 ); } // // Get the size of the array to process. // array_size = atoi( argv[1] ); // // Get the seed to be used // seed = atoi( argv[2] ); // // Make sure that CUDA resources get cleaned up on exit. // atexit( cleanupCuda ); // // Record the start time. // gettimeofday( &start, NULL ); // // Allocate the array to be populated. // float *array = (float *) malloc( array_size * sizeof( float ) ); // // Seed the random number generator and populate the array with its values. // srand( seed ); for( i = 0; i < array_size; i++ ) array[i] = ( (float) rand() / (float) RAND_MAX ) * MAXIMUM_VALUE; float *dev_array; // Allocate GPU memory. checkCudaErrors(cudaMalloc((void **)&dev_array, array_size * sizeof(float))); checkCudaErrors(cudaMemcpy(dev_array, array, array_size * sizeof(float), cudaMemcpyHostToDevice)); // Execute run_qsort(dev_array, array_size); float *results = new float[array_size]; checkCudaErrors(cudaMemcpy(results, dev_array, array_size*sizeof(float), cudaMemcpyDeviceToHost)); // // Record the end time. // gettimeofday( &end, NULL ); // // Calculate the runtime. // runtime = ( ( end.tv_sec - start.tv_sec ) * 1000.0 ) + ( ( end.tv_usec - start.tv_usec ) / 1000.0 ); // // Output discoveries from the array. // printf( "Statistics for array ( %d, %d ):\n", array_size, seed ); //printArray(array, array_size); printf( "\n------------\n" ); //printArray(results, array_size); checkResult(results, array_size); printf( "Processing Time: %4.4f milliseconds\n", runtime ); // // Free the allocated array. // free( array ); cudaFree(dev_array); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); return 0; }
f2b50199d78f1b625fa94934f085bba5a8e86082.hip
// !!! This is a file automatically generated by hipify!!! /*********************************************** * # Copyright 2011. Thuy Diem Nguyen * # Contact: [email protected] * # * # GPL 3.0 applies. * # * ************************************************/ #include "genMain.h" #include "genKernel.h" #include <thrust/host_vector.h> void writeVectorToFile_GPU(thrust::host_vector< thrust::pair<unsigned int, unsigned int> > h_pairVector, thrust::host_vector< float > h_distVector, string outFileName, unsigned long long count, int fileId) { FILE * distFile; distFile = fopen(outFileName.c_str(), "wb"); if (distFile == NULL){ printf("cannot open distFile: %s\n", outFileName.c_str()); exit(-1); } float distArray[BUF_SIZE]; int h = 0; thrust::pair<unsigned int, unsigned int> aPair; for (unsigned int i = 0; i < count; ++i) { distArray[h] = h_distVector[i]; ++h; if (h == BUF_SIZE) { fwrite(distArray, sizeof(float), BUF_SIZE, distFile); h = 0; } } if (h > 0) { fwrite(distArray, sizeof(float), h, distFile); h = 0; } fclose(distFile); } void computeGenDist_CUDA(FILE* inFile, string outFileName, READ* &readArray, int numReads, int maxLen, float threshold, int band) { int i, maxDigitalLen = 0; int EOFTag = 0; int numPairs = 0; unsigned long long totalNumPairs = 0, count = 0; int fileId = 0; // check for the number of available GPUs //printf("blockSize: %d, gridSize: %d\n", (int)BLOCK_SIZE, (int)GRID_SIZE); maxDigitalLen = (int) (maxLen / SUBMAT_DIM) + 2; if (maxDigitalLen % 16 != 0) maxDigitalLen += 16 - (maxDigitalLen % 16); // padding //printf("maxDigitalLen: %d\n", maxDigitalLen); ushort *binaryReadArray; binaryReadArray = (ushort*) malloc(numReads * maxDigitalLen * sizeof(ushort)); // Digitialize a sequence into bit storage format(on host, in an array of unsigned UINT). convertToBinary(readArray, numReads, binaryReadArray, maxDigitalLen); // clean up readArray for (i = 0; i < numReads; ++i) readArray[i].release(); free(readArray); // Allocate space for the pair id array int *h_pairArray; int *d_pairArray; checkCudaErrors( hipHostMalloc((void**)&h_pairArray, NUM_PAIRS * 2 * sizeof(int)) ); checkCudaErrors( hipMalloc((void**)&d_pairArray, NUM_PAIRS * 2 * sizeof(int)) ); // Allocate memory block for the Needleman-Wunsch distance array float *h_distArray; float *d_distArray; checkCudaErrors( hipHostMalloc((void**)&h_distArray, NUM_PAIRS * sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&d_distArray, NUM_PAIRS * sizeof(float)) ); // determine gridSize and blockSize size_t threadsPerBlock(BLOCK_SIZE); size_t blocksPerGrid(GRID_SIZE); int offset, chunkSize, numPairsPerChunk; // use hipArray to store tupleArraySet hipChannelFormatDesc channelDesc=hipCreateChannelDesc<ushort>(); hipArray *seqCuArray; size_t width, height; width = maxDigitalLen*16; height = numReads/16; if ( (numReads&15) != 0) ++height; //cout << "2D texture: width " << width << " height: " << height << endl; checkCudaErrors( hipMallocArray(&seqCuArray, &channelDesc, width, height) ); checkCudaErrors( hipMemcpyToArray(seqCuArray, 0, 0, binaryReadArray, numReads * maxDigitalLen * sizeof(ushort), hipMemcpyHostToDevice) ); checkCudaErrors( hipBindTextureToArray(getSeqTexRef(), seqCuArray, channelDesc) ); free(binaryReadArray); hipStream_t stream[NUM_STREAMS]; for (i = 0; i < NUM_STREAMS; ++i) checkCudaErrors( hipStreamCreate(&stream[i]) ); size_t maxNumPairs = numReads*128; if (maxNumPairs > MAX_NUM_PAIRS) maxNumPairs = MAX_NUM_PAIRS; thrust::host_vector< float > h_distVector (maxNumPairs); thrust::host_vector< thrust::pair<unsigned int, unsigned int> > h_pairVector (maxNumPairs * 2); float dist; // obtain file size: while (!EOFTag) { numPairs = loadPairs(inFile, h_pairArray, EOFTag); numPairsPerChunk = (numPairs + NUM_STREAMS - 1) / NUM_STREAMS; if (numPairsPerChunk % 16 != 0) numPairsPerChunk += 16 - (numPairsPerChunk % 16); for (i = 0; i < NUM_STREAMS; ++i) { offset = i * numPairsPerChunk; if (i < NUM_STREAMS - 1) chunkSize = numPairsPerChunk; else chunkSize = numPairs - offset; checkCudaErrors( hipMemcpyAsync(d_pairArray+offset*2, h_pairArray+offset*2, chunkSize * sizeof(int) * 2, hipMemcpyHostToDevice, stream[i]) ); } for (i = 0; i < NUM_STREAMS; ++i) { offset = i * numPairsPerChunk; if (i < NUM_STREAMS - 1) chunkSize = numPairsPerChunk; else chunkSize = numPairs - offset; if (band > 1) launchGenKernel_band(stream[i], blocksPerGrid, threadsPerBlock, d_pairArray+offset*2, d_distArray+offset, maxDigitalLen, chunkSize, band); else launchGenKernel_full(stream[i], blocksPerGrid, threadsPerBlock, d_pairArray+offset*2, d_distArray+offset, maxDigitalLen, chunkSize); } for (i = 0; i < NUM_STREAMS; ++i) { offset = i * numPairsPerChunk; if (i < NUM_STREAMS - 1) chunkSize = numPairsPerChunk; else chunkSize = numPairs - offset; // copy results from device to host checkCudaErrors( hipMemcpyAsync(h_distArray+offset, d_distArray+offset, chunkSize * sizeof(float), hipMemcpyDeviceToHost, stream[i]) ); } hipDeviceSynchronize(); for (i = 0; i < numPairs; i++) { dist = h_distArray[i]; if (dist < threshold || fabs(dist-threshold) < EPSILON) { h_pairVector[count] = thrust::make_pair(h_pairArray[i*2], h_pairArray[i*2+1]); h_distVector[count] = dist; ++count; } } if (count >= maxNumPairs) { //h_pairVector.resize(count); //h_distVector.resize(count); writeVectorToFile_GPU(h_pairVector, h_distVector, outFileName, count, fileId); //h_pairVector.resize(maxNumPairs * 2); //h_distVector.resize(maxNumPairs * 2); ++ fileId; totalNumPairs += count; count = 0; } } if (count > 0) { h_pairVector.resize(count); h_distVector.resize(count); writeVectorToFile_GPU(h_pairVector, h_distVector, outFileName, count, fileId); totalNumPairs += count; } //printf("totalNumPairs: %llu\n", totalNumPairs); printf("%llu\n", totalNumPairs); for (i = 0; i < NUM_STREAMS; ++i) checkCudaErrors( hipStreamDestroy(stream[i]) ); checkCudaErrors( hipHostFree(h_distArray) ); checkCudaErrors( hipHostFree(h_pairArray) ); checkCudaErrors( hipFree(d_pairArray) ); checkCudaErrors( hipFree(d_distArray) ); // clean up device variables checkCudaErrors( hipUnbindTexture(getSeqTexRef()) ); checkCudaErrors( hipFreeArray(seqCuArray) ); }
f2b50199d78f1b625fa94934f085bba5a8e86082.cu
/*********************************************** * # Copyright 2011. Thuy Diem Nguyen * # Contact: [email protected] * # * # GPL 3.0 applies. * # * ************************************************/ #include "genMain.h" #include "genKernel.h" #include <thrust/host_vector.h> void writeVectorToFile_GPU(thrust::host_vector< thrust::pair<unsigned int, unsigned int> > h_pairVector, thrust::host_vector< float > h_distVector, string outFileName, unsigned long long count, int fileId) { FILE * distFile; distFile = fopen(outFileName.c_str(), "wb"); if (distFile == NULL){ printf("cannot open distFile: %s\n", outFileName.c_str()); exit(-1); } float distArray[BUF_SIZE]; int h = 0; thrust::pair<unsigned int, unsigned int> aPair; for (unsigned int i = 0; i < count; ++i) { distArray[h] = h_distVector[i]; ++h; if (h == BUF_SIZE) { fwrite(distArray, sizeof(float), BUF_SIZE, distFile); h = 0; } } if (h > 0) { fwrite(distArray, sizeof(float), h, distFile); h = 0; } fclose(distFile); } void computeGenDist_CUDA(FILE* inFile, string outFileName, READ* &readArray, int numReads, int maxLen, float threshold, int band) { int i, maxDigitalLen = 0; int EOFTag = 0; int numPairs = 0; unsigned long long totalNumPairs = 0, count = 0; int fileId = 0; // check for the number of available GPUs //printf("blockSize: %d, gridSize: %d\n", (int)BLOCK_SIZE, (int)GRID_SIZE); maxDigitalLen = (int) (maxLen / SUBMAT_DIM) + 2; if (maxDigitalLen % 16 != 0) maxDigitalLen += 16 - (maxDigitalLen % 16); // padding //printf("maxDigitalLen: %d\n", maxDigitalLen); ushort *binaryReadArray; binaryReadArray = (ushort*) malloc(numReads * maxDigitalLen * sizeof(ushort)); // Digitialize a sequence into bit storage format(on host, in an array of unsigned UINT). convertToBinary(readArray, numReads, binaryReadArray, maxDigitalLen); // clean up readArray for (i = 0; i < numReads; ++i) readArray[i].release(); free(readArray); // Allocate space for the pair id array int *h_pairArray; int *d_pairArray; checkCudaErrors( cudaMallocHost((void**)&h_pairArray, NUM_PAIRS * 2 * sizeof(int)) ); checkCudaErrors( cudaMalloc((void**)&d_pairArray, NUM_PAIRS * 2 * sizeof(int)) ); // Allocate memory block for the Needleman-Wunsch distance array float *h_distArray; float *d_distArray; checkCudaErrors( cudaMallocHost((void**)&h_distArray, NUM_PAIRS * sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&d_distArray, NUM_PAIRS * sizeof(float)) ); // determine gridSize and blockSize size_t threadsPerBlock(BLOCK_SIZE); size_t blocksPerGrid(GRID_SIZE); int offset, chunkSize, numPairsPerChunk; // use cudaArray to store tupleArraySet cudaChannelFormatDesc channelDesc=cudaCreateChannelDesc<ushort>(); cudaArray *seqCuArray; size_t width, height; width = maxDigitalLen*16; height = numReads/16; if ( (numReads&15) != 0) ++height; //cout << "2D texture: width " << width << " height: " << height << endl; checkCudaErrors( cudaMallocArray(&seqCuArray, &channelDesc, width, height) ); checkCudaErrors( cudaMemcpyToArray(seqCuArray, 0, 0, binaryReadArray, numReads * maxDigitalLen * sizeof(ushort), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaBindTextureToArray(getSeqTexRef(), seqCuArray, channelDesc) ); free(binaryReadArray); cudaStream_t stream[NUM_STREAMS]; for (i = 0; i < NUM_STREAMS; ++i) checkCudaErrors( cudaStreamCreate(&stream[i]) ); size_t maxNumPairs = numReads*128; if (maxNumPairs > MAX_NUM_PAIRS) maxNumPairs = MAX_NUM_PAIRS; thrust::host_vector< float > h_distVector (maxNumPairs); thrust::host_vector< thrust::pair<unsigned int, unsigned int> > h_pairVector (maxNumPairs * 2); float dist; // obtain file size: while (!EOFTag) { numPairs = loadPairs(inFile, h_pairArray, EOFTag); numPairsPerChunk = (numPairs + NUM_STREAMS - 1) / NUM_STREAMS; if (numPairsPerChunk % 16 != 0) numPairsPerChunk += 16 - (numPairsPerChunk % 16); for (i = 0; i < NUM_STREAMS; ++i) { offset = i * numPairsPerChunk; if (i < NUM_STREAMS - 1) chunkSize = numPairsPerChunk; else chunkSize = numPairs - offset; checkCudaErrors( cudaMemcpyAsync(d_pairArray+offset*2, h_pairArray+offset*2, chunkSize * sizeof(int) * 2, cudaMemcpyHostToDevice, stream[i]) ); } for (i = 0; i < NUM_STREAMS; ++i) { offset = i * numPairsPerChunk; if (i < NUM_STREAMS - 1) chunkSize = numPairsPerChunk; else chunkSize = numPairs - offset; if (band > 1) launchGenKernel_band(stream[i], blocksPerGrid, threadsPerBlock, d_pairArray+offset*2, d_distArray+offset, maxDigitalLen, chunkSize, band); else launchGenKernel_full(stream[i], blocksPerGrid, threadsPerBlock, d_pairArray+offset*2, d_distArray+offset, maxDigitalLen, chunkSize); } for (i = 0; i < NUM_STREAMS; ++i) { offset = i * numPairsPerChunk; if (i < NUM_STREAMS - 1) chunkSize = numPairsPerChunk; else chunkSize = numPairs - offset; // copy results from device to host checkCudaErrors( cudaMemcpyAsync(h_distArray+offset, d_distArray+offset, chunkSize * sizeof(float), cudaMemcpyDeviceToHost, stream[i]) ); } cudaDeviceSynchronize(); for (i = 0; i < numPairs; i++) { dist = h_distArray[i]; if (dist < threshold || fabs(dist-threshold) < EPSILON) { h_pairVector[count] = thrust::make_pair(h_pairArray[i*2], h_pairArray[i*2+1]); h_distVector[count] = dist; ++count; } } if (count >= maxNumPairs) { //h_pairVector.resize(count); //h_distVector.resize(count); writeVectorToFile_GPU(h_pairVector, h_distVector, outFileName, count, fileId); //h_pairVector.resize(maxNumPairs * 2); //h_distVector.resize(maxNumPairs * 2); ++ fileId; totalNumPairs += count; count = 0; } } if (count > 0) { h_pairVector.resize(count); h_distVector.resize(count); writeVectorToFile_GPU(h_pairVector, h_distVector, outFileName, count, fileId); totalNumPairs += count; } //printf("totalNumPairs: %llu\n", totalNumPairs); printf("%llu\n", totalNumPairs); for (i = 0; i < NUM_STREAMS; ++i) checkCudaErrors( cudaStreamDestroy(stream[i]) ); checkCudaErrors( cudaFreeHost(h_distArray) ); checkCudaErrors( cudaFreeHost(h_pairArray) ); checkCudaErrors( cudaFree(d_pairArray) ); checkCudaErrors( cudaFree(d_distArray) ); // clean up device variables checkCudaErrors( cudaUnbindTexture(getSeqTexRef()) ); checkCudaErrors( cudaFreeArray(seqCuArray) ); }
f44e04dafb55539e90ac8f3ea18750d40a8d452c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> __global__ void swap(int *M, int mat_size) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int N = mat_size; if((i < N) && (j < N) && (j%2 == 0) && (j != N - 1)) { int tmp = M[i * mat_size + j]; M[i * mat_size + j] = M[i*mat_size + j + 1]; M[i * mat_size + j + 1] = tmp; } __syncthreads(); } __global__ void reflect(int *M, int mat_size) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int N = mat_size; if((i < N) && (j < N) && (i > j)) { M[j*mat_size + i] = M[i*mat_size + j]; } __syncthreads(); }
f44e04dafb55539e90ac8f3ea18750d40a8d452c.cu
#include<stdio.h> __global__ void swap(int *M, int mat_size) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int N = mat_size; if((i < N) && (j < N) && (j%2 == 0) && (j != N - 1)) { int tmp = M[i * mat_size + j]; M[i * mat_size + j] = M[i*mat_size + j + 1]; M[i * mat_size + j + 1] = tmp; } __syncthreads(); } __global__ void reflect(int *M, int mat_size) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int N = mat_size; if((i < N) && (j < N) && (i > j)) { M[j*mat_size + i] = M[i*mat_size + j]; } __syncthreads(); }
0112bdf31da7d65c1026fdc3a9c207681399d1b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "src/fastertransformer/kernels/image_shift_partition_kernels.h" #include "src/fastertransformer/kernels/reduce_kernel_utils.cuh" namespace fastertransformer { /******************* invokeNormalizeForFMHA ***********************/ // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) // grid(2*num_head, seqlen, batch) // block((size_per_head/2 + 31)/31*32) __global__ void normalize_for_FMHA_kernel( half2* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, float factor_of_fmha) { const int batch_seqlen_id = blockIdx.z * seqlen + blockIdx.y; const int head_id = blockIdx.x / 2; const int qkv_id = blockIdx.x % 2; const int size_id = threadIdx.x; const size_t input_idx = ((batch_seqlen_id * num_head + head_id) * 3 + qkv_id) * size_per_head / 2 + size_id; const float logit_scale = ((qkv_id == 0) ? float(logit_scales[head_id]) * factor_of_fmha : 1.0f); const half2 zero = {half(0.0f), half(0.0f)}; const bool flag = size_id < size_per_head / 2; half2 input_val = (flag) ? data[input_idx] : zero; float2 input_val_float2 = __half22float2(input_val); __shared__ float norm_factor; const float local_sum = input_val_float2.x * input_val_float2.x + input_val_float2.y * input_val_float2.y; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrtf(local_sum_all + 1e-6) * logit_scale; } __syncthreads(); input_val_float2.x *= norm_factor; input_val_float2.y *= norm_factor; input_val = __float22half2_rn(input_val_float2); if (flag) { data[input_idx] = input_val; } } // This kernel is designed for size_per_head = 32, typical case in swin // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) // grid(batch*seqlen*num_head/(LOOP*HEADS_PER_WARP*WARPS_PER_BLOCK)) // block(32*WARPS_PER_BLOCK) each warp load 64*LOOP*HEADS_PER_WARP elements (LOOP*HEADS_PER_WARP heads of q&k) and do // normlization template<int LOOP, int HEADS_PER_WARP, int WARPS_PER_BLOCK> __global__ void normalize_for_FMHA_headz32_kernel( half2* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, float factor_of_fmha) { __shared__ half2 data_shm[LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP * 32]; __shared__ float norm_factor[LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP * 2]; const int batch_seqlen_head_offset = blockIdx.x * LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP; const int seqlen_head_offset = batch_seqlen_head_offset % (seqlen * num_head); const int tid = threadIdx.x; const int warp_id = tid / 32; const int tid_in_warp = tid % 32; const int HEADS_PER_WARP_x_WARPS_PER_BLOCK = HEADS_PER_WARP * WARPS_PER_BLOCK; // load from gmem to smem #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int head_offset = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP; #pragma unroll for (int head_i = 0; head_i < HEADS_PER_WARP; head_i++) { // one warp loads one head (32 threads load 32 half2) const size_t input_idx = ((batch_seqlen_head_offset + head_offset + head_i) * 3) * size_per_head / 2 + tid_in_warp; // we need to ensure no out of memory address when launch kernel. const half2 input_val = data[input_idx]; const int shm_idx = (head_offset + head_i) * 32 + tid_in_warp; data_shm[shm_idx] = input_val; } } __syncthreads(); // we use one warp to deal with HEADS_PER_WARP heads at one time, // so one thread deals with part of one single head at one time float local_sums[LOOP]; const int threads_per_head = 32 / HEADS_PER_WARP; // each head has 32 half2 const int half2Size_per_thread = 32 / threads_per_head; const int head_in_warp = tid_in_warp / threads_per_head; const int id_offset_in_head = tid_in_warp % threads_per_head; const int size_offset_in_head = half2Size_per_thread * id_offset_in_head; const int head_offset_of_warp = warp_id * HEADS_PER_WARP + head_in_warp; #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { float local_sum = 0.0f; const int shm_offset = (loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp) * 32 + size_offset_in_head; #pragma unroll for (int size_i = 0; size_i < half2Size_per_thread; size_i++) { const int shm_idx = shm_offset + size_i; const float2 tmp = __half22float2(data_shm[shm_idx]); local_sum += tmp.x * tmp.x + tmp.y * tmp.y; } local_sums[loop_i] = local_sum; } const int threads_per_head_2 = threads_per_head / 2; const bool is_q = id_offset_in_head < threads_per_head_2; #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int seqlen_head_id = seqlen_head_offset + loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp; const int head_id = seqlen_head_id % num_head; float local_sum = local_sums[loop_i]; #pragma unroll for (int i = 1; i < threads_per_head_2; i <<= 1) { local_sum += __shfl_xor_sync(FINAL_MASK, local_sum, i, 32); } if (id_offset_in_head % threads_per_head_2 == 0) { const float logit_scale = is_q ? float(logit_scales[head_id]) * factor_of_fmha : 1.f; const int norm_factor_idx = 2 * (loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp) + id_offset_in_head / threads_per_head_2; norm_factor[norm_factor_idx] = rsqrtf(local_sum + 1e-6) * logit_scale; } } __syncthreads(); // normalize and store to gmem #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int head_offset = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP; #pragma unroll for (int head_i = 0; head_i < HEADS_PER_WARP; head_i++) { // we need to ensure no out of memory address when launch kernel, one warp deals with one head const size_t output_idx = ((batch_seqlen_head_offset + head_offset + head_i) * 3) * size_per_head / 2 + tid_in_warp; const int head_idx = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP + head_i; const int shm_idx = (head_idx)*32 + tid_in_warp; const int norm_factor_idx = 2 * (head_idx) + tid_in_warp / 16; float norm_factor_ = norm_factor[norm_factor_idx]; half2 input_val = data_shm[shm_idx]; float2 input_val_float = __half22float2(input_val); input_val_float.x *= norm_factor_; input_val_float.y *= norm_factor_; data[output_idx] = __float22half2_rn(input_val_float); } } } #define NORMALIZE_FMHA_HEAD32_MACRO(LOOP, HEADS_PER_WARP, WARPS_PER_BLOCK) \ dim3 grid(batch* seqlen_num_head / (LOOP * HEADS_PER_WARP * WARPS_PER_BLOCK)); \ dim3 block(32 * WARPS_PER_BLOCK); \ hipLaunchKernelGGL(( normalize_for_FMHA_headz32_kernel<LOOP, HEADS_PER_WARP, WARPS_PER_BLOCK>), dim3(grid), dim3(block), 0, stream, \ (half2*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, sqrt(size_per_head)); // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) template<typename T> void invokeNormalizeForFMHA( T* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream) { if (std::is_same<T, half>::value) { if (size_per_head == 32) { const int seqlen_num_head = seqlen * num_head; // LOOP = 2, HEADS_PER_WARP = 4, WARPS_PER_BLOCK = 2 if (seqlen_num_head % (2 * 4 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(2, 4, 2) } // LOOP = 1, HEADS_PER_WARP = 4, WARPS_PER_BLOCK = 2 else if (seqlen_num_head % (1 * 4 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 4, 2) } // LOOP = 1, HEADS_PER_WARP = 2, WARPS_PER_BLOCK = 2 else if (seqlen_num_head % (1 * 2 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 2, 2) } // LOOP = 1, HEADS_PER_WARP = 2, WARPS_PER_BLOCK = 1 else if (seqlen_num_head % (1 * 2 * 1) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 2, 1) } // LOOP = 1, HEADS_PER_WARP = 1, WARPS_PER_BLOCK = 1 else { NORMALIZE_FMHA_HEAD32_MACRO(1, 1, 1) } } else if (size_per_head % 2 == 0) { dim3 grid(2 * num_head, seqlen, batch); dim3 block((size_per_head / 2 + 31) / 32 * 32); hipLaunchKernelGGL(( normalize_for_FMHA_kernel), dim3(grid), dim3(block), 0, stream, (half2*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, sqrt(size_per_head)); } else { printf("[ERROR][invokeNormalizeForFMHA] only supports size_per_head %% 2 == 0!\n"); exit(-1); } } else { printf("[ERROR][invokeNormalizeForFMHA] only supports half I/O!\n"); exit(-1); } } #undef NORMALIZE_FMHA_HEAD32_MACRO template void invokeNormalizeForFMHA<half>( half* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream); template void invokeNormalizeForFMHA<float>(float* data, const float* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream); #ifdef ENABLE_BF16 template void invokeNormalizeForFMHA<__nv_bfloat16>(__nv_bfloat16* data, const __nv_bfloat16* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream); #endif // This kernel is designed for size_per_head = 32, typical case in swin // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) // grid(batch*seqlen*num_head/(LOOP*HEADS_PER_WARP*WARPS_PER_BLOCK)) // block(32*WARPS_PER_BLOCK) each warp load 64*LOOP*HEADS_PER_WARP elements (LOOP*HEADS_PER_WARP heads of q&k) and do // normlization template<int LOOP, int HEADS_PER_WARP, int WARPS_PER_BLOCK> __global__ void normalize_for_FMHA_headz32_INT8_kernel(char2* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, const float query_deQ_scale, const float key_deQ_scale, const float query_Q_scale, const float key_Q_scale) { __shared__ char2 data_shm[LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP * 32]; __shared__ float norm_factor[LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP * 2]; const int batch_seqlen_head_offset = blockIdx.x * LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP; const int seqlen_head_offset = batch_seqlen_head_offset % (seqlen * num_head); const int tid = threadIdx.x; const int warp_id = tid / 32; const int tid_in_warp = tid % 32; const int HEADS_PER_WARP_x_WARPS_PER_BLOCK = HEADS_PER_WARP * WARPS_PER_BLOCK; // load from gmem to smem #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int head_offset = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP; #pragma unroll for (int head_i = 0; head_i < HEADS_PER_WARP; head_i++) { // one warp loads one head (32 threads load 32 half2) const size_t input_idx = ((batch_seqlen_head_offset + head_offset + head_i) * 3) * size_per_head / 2 + tid_in_warp; // we need to ensure no out of memory address when launch kernel. const char2 input_val = data[input_idx]; const int shm_idx = (head_offset + head_i) * 32 + tid_in_warp; data_shm[shm_idx] = input_val; } } __syncthreads(); // we use one warp to deal with HEADS_PER_WARP heads at one time, // so one thread deals with part of one single head at one time float local_sums[LOOP]; const int threads_per_head = 32 / HEADS_PER_WARP; // each head has 32 half2 const int half2Size_per_thread = 32 / threads_per_head; const int head_in_warp = tid_in_warp / threads_per_head; const int id_offset_in_head = tid_in_warp % threads_per_head; const int size_offset_in_head = half2Size_per_thread * id_offset_in_head; const int head_offset_of_warp = warp_id * HEADS_PER_WARP + head_in_warp; const int threads_per_head_2 = threads_per_head / 2; const bool is_q = id_offset_in_head < threads_per_head_2; float deQ_scale = is_q ? query_deQ_scale : key_deQ_scale; float Q_scale = is_q ? query_Q_scale : key_Q_scale; #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { float local_sum = 0.0f; const int shm_offset = (loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp) * 32 + size_offset_in_head; #pragma unroll for (int size_i = 0; size_i < half2Size_per_thread; size_i++) { const int shm_idx = shm_offset + size_i; const char2 tmp = data_shm[shm_idx]; float2 tmpFloat; tmpFloat.x = static_cast<float>(tmp.x) * deQ_scale; tmpFloat.y = static_cast<float>(tmp.y) * deQ_scale; local_sum += tmpFloat.x * tmpFloat.x + tmpFloat.y * tmpFloat.y; } local_sums[loop_i] = local_sum; } #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int seqlen_head_id = seqlen_head_offset + loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp; const int head_id = seqlen_head_id % num_head; float local_sum = local_sums[loop_i]; #pragma unroll for (int i = 1; i < threads_per_head_2; i <<= 1) { local_sum += __shfl_xor_sync(FINAL_MASK, local_sum, i, 32); } if (id_offset_in_head % threads_per_head_2 == 0) { const float logit_scale = is_q ? float(logit_scales[head_id]) : 1.f; const int norm_factor_idx = 2 * (loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp) + id_offset_in_head / threads_per_head_2; norm_factor[norm_factor_idx] = rsqrtf(local_sum + 1e-6) * logit_scale; } } __syncthreads(); // normalize and store to gmem #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int head_offset = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP; #pragma unroll for (int head_i = 0; head_i < HEADS_PER_WARP; head_i++) { // we need to ensure no out of memory address when launch kernel, one warp deals with one head const size_t output_idx = ((batch_seqlen_head_offset + head_offset + head_i) * 3) * size_per_head / 2 + tid_in_warp; const int head_idx = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP + head_i; const int shm_idx = (head_idx)*32 + tid_in_warp; const int norm_factor_idx = 2 * (head_idx) + tid_in_warp / 16; deQ_scale = (tid_in_warp < 16) ? query_deQ_scale : key_deQ_scale; Q_scale = (tid_in_warp < 16) ? query_Q_scale : key_Q_scale; float norm_factor_ = norm_factor[norm_factor_idx]; char2 input_val = data_shm[shm_idx]; float2 input_val_float; input_val_float.x = static_cast<float>(input_val.x) * deQ_scale * norm_factor_ * Q_scale; input_val_float.y = static_cast<float>(input_val.y) * deQ_scale * norm_factor_ * Q_scale; input_val.x = float_to_int8_rn(input_val_float.x); input_val.y = float_to_int8_rn(input_val_float.y); data[output_idx] = input_val; } } } // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) // grid(2*num_head, seqlen, batch) // block((size_per_head/2 + 31)/31*32) __global__ void normalize_for_FMHA_INT8_kernel(char2* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, const float query_deQ_scale, const float key_deQ_scale, const float query_Q_scale, const float key_Q_scale) { const int batch_seqlen_id = blockIdx.z * seqlen + blockIdx.y; const int head_id = blockIdx.x / 2; const int qkv_id = blockIdx.x % 2; const int size_id = threadIdx.x; const size_t input_idx = ((batch_seqlen_id * num_head + head_id) * 3 + qkv_id) * size_per_head / 2 + size_id; const float logit_scale = ((qkv_id == 0) ? float(logit_scales[head_id]) : 1.0f); const float deQ_scale = (qkv_id == 0) ? query_deQ_scale : key_deQ_scale; const float Q_scale = (qkv_id == 0) ? query_Q_scale : key_Q_scale; float2 input_val_float2 = {0.0f, 0.0f}; const bool flag = size_id < size_per_head / 2; if (flag) { char2 input_data = data[input_idx]; input_val_float2.x = static_cast<float>(input_data.x) * deQ_scale; input_val_float2.y = static_cast<float>(input_data.y) * deQ_scale; } __shared__ float norm_factor; const float local_sum = input_val_float2.x * input_val_float2.x + input_val_float2.y * input_val_float2.y; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrtf(local_sum_all + 1e-6) * logit_scale * Q_scale; } __syncthreads(); if (flag) { char2 output_val; output_val.x = float_to_int8_rn(input_val_float2.x * norm_factor); output_val.y = float_to_int8_rn(input_val_float2.y * norm_factor); data[input_idx] = output_val; } } #define NORMALIZE_FMHA_HEAD32_MACRO(LOOP, HEADS_PER_WARP, WARPS_PER_BLOCK) \ dim3 grid(batch* seqlen_num_head / (LOOP * HEADS_PER_WARP * WARPS_PER_BLOCK)); \ dim3 block(32 * WARPS_PER_BLOCK); \ hipLaunchKernelGGL(( normalize_for_FMHA_headz32_INT8_kernel<LOOP, HEADS_PER_WARP, WARPS_PER_BLOCK>) \ , dim3(grid), dim3(block), 0, stream, (char2*)data, \ (const half*)logit_scales, \ batch, \ seqlen, \ num_head, \ size_per_head, \ query_deQ_scale, \ key_deQ_scale, \ query_Q_scale, \ key_Q_scale); // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) template<typename T> void invokeNormalizeForFMHA(int8_t* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream, const float query_deQ_scale, const float key_deQ_scale, const float query_Q_scale, const float key_Q_scale) { if (std::is_same<T, half>::value) { if (size_per_head == 32) { const int seqlen_num_head = seqlen * num_head; if (seqlen_num_head % (2 * 4 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(2, 4, 2) } else if (seqlen_num_head % (1 * 4 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 4, 2) } else if (seqlen_num_head % (1 * 2 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 2, 2) } else if (seqlen_num_head % (1 * 2 * 1) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 2, 1) } else { NORMALIZE_FMHA_HEAD32_MACRO(1, 1, 1) } } else if (size_per_head % 2 == 0) { dim3 grid(2 * num_head, seqlen, batch); dim3 block((size_per_head / 2 + 31) / 32 * 32); hipLaunchKernelGGL(( normalize_for_FMHA_INT8_kernel), dim3(grid), dim3(block), 0, stream, (char2*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, query_deQ_scale, key_deQ_scale, query_Q_scale, key_Q_scale); } else { printf("[ERROR][invokeNormalizeForFMHA(INT8 version)] only supports size_per_head %% 2 == 0!\n"); exit(-1); } } else { printf("[ERROR][invokeNormalizeForFMHA(INT8 version)] only supports half Input!\n"); exit(-1); } } #undef NORMALIZE_FMHA_HEAD32_MACRO template void invokeNormalizeForFMHA<float>(int8_t* data, const float* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream, const float query_deQ_scale, const float key_deQ_scale, const float query_Q_scale, const float key_Q_scale); template void invokeNormalizeForFMHA<half>(int8_t* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream, const float query_deQ_scale, const float key_deQ_scale, const float query_Q_scale, const float key_Q_scale); /******************* invokeNormalize ***********************/ // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale) // grid(seqlen, num_head, batch) // block((size_per_head + 31)/31*32) // TODO : the trick of normalize_for_FMHA_headz32_kernel can be used here template<typename T, typename T2> __global__ void normalize_kernel(T* data, const T2* logit_scales, int batch, int seqlen, int num_head, int size_per_head) { const int batch_id = blockIdx.z; const int head_id = blockIdx.y; const int seq_id = blockIdx.x; const int size_id = threadIdx.x; const int input_idx = ((batch_id * num_head + head_id) * seqlen + seq_id) * size_per_head + size_id; const float logit_scale = (logit_scales != NULL) ? static_cast<float>(logit_scales[head_id]) : 1.0f; const bool flag = size_id < size_per_head; float input_val = (flag) ? static_cast<float>(data[input_idx]) : 0.0f; __shared__ float norm_factor; const float local_sum = input_val * input_val; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrt(local_sum_all + 1e-6) * logit_scale; } __syncthreads(); input_val *= norm_factor; if (flag) { data[input_idx] = input_val; } } // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale/sqrt(size_per_head)) // grid(seqlen, num_head, batch) // block((size_per_head/2 + 31)/31*32) // TODO : the trick of normalize_for_FMHA_headz32_kernel can be used here template<> __global__ void normalize_kernel(half2* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head) { const int batch_id = blockIdx.z; const int head_id = blockIdx.y; const int seq_id = blockIdx.x; const int size_id = threadIdx.x; const int input_idx = ((batch_id * num_head + head_id) * seqlen + seq_id) * size_per_head / 2 + size_id; const float logit_scale = (logit_scales != NULL) ? float(logit_scales[head_id]) : 1.0f; const half2 zero = {half(0.0f), half(0.0f)}; const bool flag = size_id < size_per_head / 2; half2 input_val = (flag) ? data[input_idx] : zero; float2 input_val_float2 = __half22float2(input_val); __shared__ float norm_factor; const float local_sum = input_val_float2.x * input_val_float2.x + input_val_float2.y * input_val_float2.y; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrtf(local_sum_all + 1e-6) * logit_scale; } __syncthreads(); input_val_float2.x *= norm_factor; input_val_float2.y *= norm_factor; input_val = __float22half2_rn(input_val_float2); if (flag) { data[input_idx] = input_val; } } extern __shared__ char normalize_kernel_v2_shm[]; // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale) // grid(batch*num_head*seqlen/(HEADS_PER_WARP*WARPS_PER_BLOCK)) // block(32*WARPS_PER_BLOCK) // size_per_head % ELEMENT_PER_LDG == 0 template<typename T_IO, typename T, int HEADS_PER_WARP, int WARPS_PER_BLOCK, int ELEMENT_PER_LDG> __global__ void normalize_kernel_v2(T_IO* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int threads_per_head = 32 / HEADS_PER_WARP; const int lane_id = tidx % 32; const int warp_id = tidx / 32; const int head_offset_of_grid = bidx * WARPS_PER_BLOCK * HEADS_PER_WARP; const int head_offset_of_block = warp_id * HEADS_PER_WARP; const int head_id_in_warp = lane_id / threads_per_head; const int head_id = head_offset_of_grid + head_offset_of_block + head_id_in_warp; const int num_head_id = (head_id / seqlen) % num_head; const int ldg_per_head = size_per_head / ELEMENT_PER_LDG; const float logit_scale = (logit_scales != NULL) ? static_cast<float>(logit_scales[num_head_id]) : 1.0f; T_IO* normalize_shm = (T_IO*)normalize_kernel_v2_shm; __shared__ float norm_factor[WARPS_PER_BLOCK * HEADS_PER_WARP]; // one factor for one head const int input_offset = head_offset_of_grid * ldg_per_head; // load from gmem to smem #pragma unroll for (int i = tidx; i < HEADS_PER_WARP * WARPS_PER_BLOCK * ldg_per_head; i += blockDim.x) { const int input_idx = input_offset + i; normalize_shm[i] = data[input_idx]; } __syncthreads(); // local sum float local_sum = 0.0f; const int elements_per_thread = (size_per_head + threads_per_head - 1) / threads_per_head; const int thread_id_in_head = tidx % threads_per_head; const int size_offset_in_head = elements_per_thread * thread_id_in_head; const int shm_offset = (head_offset_of_block + head_id_in_warp) * size_per_head + size_offset_in_head; const T* shm_ptr = (const T*)normalize_shm; #pragma unroll for (int size_i = 0; size_i < elements_per_thread && (size_i + size_offset_in_head < size_per_head); size_i++) { const int shm_idx = shm_offset + size_i; const float tmp = static_cast<float>(shm_ptr[shm_idx]); local_sum += tmp * tmp; } // reduction to get norm_factor #pragma unroll for (int i = 1; i < threads_per_head; i <<= 1) { local_sum += __shfl_xor_sync(FINAL_MASK, local_sum, i, 32); } if (thread_id_in_head == 0) { const int norm_factor_idx = head_offset_of_block + head_id_in_warp; norm_factor[norm_factor_idx] = rsqrtf(local_sum + 1e-6) * logit_scale; } __syncthreads(); // normalize and sts to gmem #pragma unroll for (int i = tidx; i < HEADS_PER_WARP * WARPS_PER_BLOCK * ldg_per_head; i += blockDim.x) { const int norm_factor_idx = i / ldg_per_head; const float norm_factor_val = norm_factor[norm_factor_idx]; T_IO val = normalize_shm[i]; T* val_ptr = (T*)(&val); #pragma unroll for (int ei = 0; ei < ELEMENT_PER_LDG; ei++) { val_ptr[ei] = T(static_cast<float>(val_ptr[ei]) * norm_factor_val); } const int input_idx = input_offset + i; data[input_idx] = val; } } #define NORMALIZE_MACRO(HEADS_PER_WARP_, T_4, T_2, T) \ dim3 grid(total_head_count / (HEADS_PER_WARP_ * WARPS_PER_BLOCK)); \ dim3 block(32 * WARPS_PER_BLOCK); \ const int shm_size = HEADS_PER_WARP_ * WARPS_PER_BLOCK * size_per_head * sizeof(T); \ if (size_per_head % 4 == 0) { \ hipLaunchKernelGGL(( normalize_kernel_v2<T_4, T, HEADS_PER_WARP_, WARPS_PER_BLOCK, 4>), dim3(grid), dim3(block), shm_size, stream, \ (T_4*)data, (const T*)logit_scales, batch, seqlen, num_head, size_per_head); \ } \ else if (size_per_head % 2 == 0) { \ hipLaunchKernelGGL(( normalize_kernel_v2<T_2, T, HEADS_PER_WARP_, WARPS_PER_BLOCK, 2>), dim3(grid), dim3(block), shm_size, stream, \ (T_2*)data, (const T*)logit_scales, batch, seqlen, num_head, size_per_head); \ } \ else { \ hipLaunchKernelGGL(( normalize_kernel_v2<T, T, HEADS_PER_WARP_, WARPS_PER_BLOCK, 1>), dim3(grid), dim3(block), shm_size, stream, \ (T*)data, (const T*)logit_scales, batch, seqlen, num_head, size_per_head); \ } // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale/sqrt(size_per_head)) template<typename T> void invokeNormalize( T* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream) { const int WARPS_PER_BLOCK = 4; const int total_head_count = batch * num_head * seqlen; if (std::is_same<T, float>::value) { // WARPS_PER_BLOCK = 4, HEADS_PER_WARP = 4 if (total_head_count % (WARPS_PER_BLOCK * 4) == 0) { NORMALIZE_MACRO(4, float4, float2, float); } // WARPS_PER_BLOCK = 4, HEAD_PER_WARPS = 2 else if (total_head_count % (WARPS_PER_BLOCK * 2) == 0) { NORMALIZE_MACRO(2, float4, float2, float); } // WARPS_PER_BLOCK = 4, HEAD_PER_WARPS = 1 else if (total_head_count % WARPS_PER_BLOCK == 0) { NORMALIZE_MACRO(1, float4, float2, float); } else { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head + 31) / 32 * 32); hipLaunchKernelGGL(( normalize_kernel), dim3(grid), dim3(block), 0, stream, (float*)data, (const float*)logit_scales, batch, seqlen, num_head, size_per_head); } } else if (std::is_same<T, half>::value) { // WARPS_PER_BLOCK = 4, HEADS_PER_WARP = 4 if (total_head_count % (WARPS_PER_BLOCK * 4) == 0) { NORMALIZE_MACRO(4, half4, half2, half); } // WARPS_PER_BLOCK = 4, HEAD_PER_WARPS = 2 else if (total_head_count % (WARPS_PER_BLOCK * 2) == 0) { NORMALIZE_MACRO(2, half4, half2, half); } // WARPS_PER_BLOCK = 4, HEAD_PER_WARPS = 1 else if (total_head_count % WARPS_PER_BLOCK == 0) { NORMALIZE_MACRO(1, half4, half2, half); } else { if (size_per_head % 2 == 0) { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head / 2 + 31) / 32 * 32); hipLaunchKernelGGL(( normalize_kernel), dim3(grid), dim3(block), 0, stream, (half2*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head); } else { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head + 31) / 32 * 32); hipLaunchKernelGGL(( normalize_kernel), dim3(grid), dim3(block), 0, stream, (half*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head); } } } #ifdef ENABLE_BF16 else { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head + 31) / 32 * 32); hipLaunchKernelGGL(( normalize_kernel), dim3(grid), dim3(block), 0, stream, data, logit_scales, batch, seqlen, num_head, size_per_head); } #endif } #undef NORMALIZE_MACRO template void invokeNormalize<half>( half* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream); template void invokeNormalize<float>(float* data, const float* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream); #ifdef ENABLE_BF16 template void invokeNormalize<__nv_bfloat16>(__nv_bfloat16* data, const __nv_bfloat16* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream); #endif /******************* invokeNormalize ***********************/ // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale) // grid(seqlen, num_head, batch) // block((size_per_head + 31)/31*32) template<typename T> __global__ void normalize_kernel(int8_t* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head, const float deQ_scale, const float Q_scale) { const int batch_id = blockIdx.z; const int head_id = blockIdx.y; const int seq_id = blockIdx.x; const int size_id = threadIdx.x; const int input_idx = ((batch_id * num_head + head_id) * seqlen + seq_id) * size_per_head + size_id; const float logit_scale = (logit_scales != NULL) ? static_cast<float>(logit_scales[head_id]) : 1.0f; const bool flag = size_id < size_per_head; float input_val = (flag) ? static_cast<float>(data[input_idx]) * deQ_scale : 0.0f; __shared__ float norm_factor; const float local_sum = input_val * input_val; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrt(local_sum_all + 1e-6) * logit_scale; } __syncthreads(); input_val *= norm_factor; if (flag) { data[input_idx] = float_to_int8_rn(input_val * Q_scale); } } // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale/sqrt(size_per_head)) // grid(seqlen, num_head, batch) // block((size_per_head/2 + 31)/31*32) template<> __global__ void normalize_kernel(int8_t* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, const float deQ_scale, const float Q_scale) { const int batch_id = blockIdx.z; const int head_id = blockIdx.y; const int seq_id = blockIdx.x; const int size_id = threadIdx.x; const int input_idx = ((batch_id * num_head + head_id) * seqlen + seq_id) * size_per_head / 2 + size_id; const float logit_scale = (logit_scales != NULL) ? float(logit_scales[head_id]) : 1.0f; float2 input_val_float2 = {0.0f, 0.0f}; const bool flag = size_id < size_per_head / 2; char2* dataPtr = (char2*)data; if (flag) { char2 dataTmp = dataPtr[input_idx]; input_val_float2.x = static_cast<float>(dataTmp.x) * deQ_scale; input_val_float2.y = static_cast<float>(dataTmp.y) * deQ_scale; } __shared__ float norm_factor; const float local_sum = input_val_float2.x * input_val_float2.x + input_val_float2.y * input_val_float2.y; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrtf(local_sum_all + 1e-6) * logit_scale; } __syncthreads(); input_val_float2.x *= norm_factor; input_val_float2.y *= norm_factor; if (flag) { char2 dataTmp; dataTmp.x = float_to_int8_rn(input_val_float2.x * Q_scale); dataTmp.y = float_to_int8_rn(input_val_float2.y * Q_scale); dataPtr[input_idx] = dataTmp; } } // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale) // grid(batch*num_head*seqlen/(HEADS_PER_WARP*WARPS_PER_BLOCK)) // block(32*WARPS_PER_BLOCK) // size_per_head % ELEMENT_PER_LDG == 0 template<int HEADS_PER_WARP, int WARPS_PER_BLOCK, int ELEMENT_PER_LDG> __global__ void normalize_kernel_v2(char4* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, const float deQ_scale, const float Q_scale) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int threads_per_head = 32 / HEADS_PER_WARP; const int lane_id = tidx % 32; const int warp_id = tidx / 32; const int head_offset_of_grid = bidx * WARPS_PER_BLOCK * HEADS_PER_WARP; const int head_offset_of_block = warp_id * HEADS_PER_WARP; const int head_id_in_warp = lane_id / threads_per_head; const int head_id = head_offset_of_grid + head_offset_of_block + head_id_in_warp; const int num_head_id = (head_id / seqlen) % num_head; const int ldg_per_head = size_per_head / ELEMENT_PER_LDG; const float logit_scale = (logit_scales != NULL) ? static_cast<float>(logit_scales[num_head_id]) : 1.0f; char4* normalize_shm = (char4*)normalize_kernel_v2_shm; __shared__ float norm_factor[WARPS_PER_BLOCK * HEADS_PER_WARP]; // one factor for one head const int input_offset = head_offset_of_grid * ldg_per_head; // load from gmem to smem #pragma unroll for (int i = tidx; i < HEADS_PER_WARP * WARPS_PER_BLOCK * ldg_per_head; i += blockDim.x) { const int input_idx = input_offset + i; normalize_shm[i] = data[input_idx]; } __syncthreads(); // local sum float local_sum = 0.0f; const int elements_per_thread = (size_per_head + threads_per_head - 1) / threads_per_head; const int thread_id_in_head = tidx % threads_per_head; const int size_offset_in_head = elements_per_thread * thread_id_in_head; const int shm_offset = (head_offset_of_block + head_id_in_warp) * size_per_head + size_offset_in_head; const int8_t* shm_ptr = (const int8_t*)normalize_shm; #pragma unroll for (int size_i = 0; size_i < elements_per_thread && (size_i + size_offset_in_head < size_per_head); size_i++) { const int shm_idx = shm_offset + size_i; const float tmp = static_cast<float>(shm_ptr[shm_idx]) * deQ_scale; local_sum += tmp * tmp; } // reduction to get norm_factor #pragma unroll for (int i = 1; i < threads_per_head; i <<= 1) { local_sum += __shfl_xor_sync(FINAL_MASK, local_sum, i, 32); } if (thread_id_in_head == 0) { const int norm_factor_idx = head_offset_of_block + head_id_in_warp; norm_factor[norm_factor_idx] = rsqrtf(local_sum + 1e-6) * logit_scale; } __syncthreads(); // normalize and sts to gmem #pragma unroll for (int i = tidx; i < HEADS_PER_WARP * WARPS_PER_BLOCK * ldg_per_head; i += blockDim.x) { const int norm_factor_idx = i / ldg_per_head; const float norm_factor_val = norm_factor[norm_factor_idx]; char4 val = normalize_shm[i]; int8_t* val_ptr = (int8_t*)(&val); #pragma unroll for (int ei = 0; ei < ELEMENT_PER_LDG; ei++) { val_ptr[ei] = float_to_int8_rn(static_cast<float>(val_ptr[ei]) * deQ_scale * norm_factor_val * Q_scale); } const int input_idx = input_offset + i; data[input_idx] = val; } } #define NORMALIZE_MACRO \ const int shm_size = WARPS_PER_BLOCK * HEADS_PER_WARP * size_per_head; \ dim3 grid(total_head_count / (WARPS_PER_BLOCK * HEADS_PER_WARP)); \ dim3 block(32 * WARPS_PER_BLOCK); \ hipLaunchKernelGGL(( normalize_kernel_v2<HEADS_PER_WARP, WARPS_PER_BLOCK, 4>), dim3(grid), dim3(block), shm_size, stream, \ (char4*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, deQ_scale, Q_scale); // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale/sqrt(size_per_head)) template<typename T> void invokeNormalize(int8_t* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream, const float deQ_scale, const float Q_scale) { if (std::is_same<T, half>::value) { if (size_per_head % 4 == 0) { const int HEADS_PER_WARP = 4; const int total_head_count = seqlen * num_head * batch; if (total_head_count % (HEADS_PER_WARP * 4) == 0) { const int WARPS_PER_BLOCK = 4; NORMALIZE_MACRO } else if (total_head_count % (HEADS_PER_WARP * 2) == 0) { const int WARPS_PER_BLOCK = 2; NORMALIZE_MACRO } else if (total_head_count % (HEADS_PER_WARP * 1) == 0) { const int WARPS_PER_BLOCK = 1; NORMALIZE_MACRO } else { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head / 2 + 31) / 32 * 32); hipLaunchKernelGGL(( normalize_kernel), dim3(grid), dim3(block), 0, stream, (int8_t*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, deQ_scale, Q_scale); } } else if (size_per_head % 2 == 0) { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head / 2 + 31) / 32 * 32); hipLaunchKernelGGL(( normalize_kernel), dim3(grid), dim3(block), 0, stream, (int8_t*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, deQ_scale, Q_scale); } else { printf("[ERROR][invokeNormalize(INT8 version)] only supports size_per_head %% 2 == 0!\n"); exit(-1); } } else { printf("[ERROR][invokeNormalize(INT8 version)] only supports [T=half] !\n"); exit(-1); } } #undef NORMALIZE_MACRO template void invokeNormalize<half>(int8_t* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream, const float deQ_scale, const float Q_scale); template void invokeNormalize<float>(int8_t* data, const float* logit_scales, int batch, int seqlen, int num_head, int size_per_head, hipStream_t stream, const float deQ_scale, const float Q_scale); } // namespace fastertransformer
0112bdf31da7d65c1026fdc3a9c207681399d1b7.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "src/fastertransformer/kernels/image_shift_partition_kernels.h" #include "src/fastertransformer/kernels/reduce_kernel_utils.cuh" namespace fastertransformer { /******************* invokeNormalizeForFMHA ***********************/ // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) // grid(2*num_head, seqlen, batch) // block((size_per_head/2 + 31)/31*32) __global__ void normalize_for_FMHA_kernel( half2* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, float factor_of_fmha) { const int batch_seqlen_id = blockIdx.z * seqlen + blockIdx.y; const int head_id = blockIdx.x / 2; const int qkv_id = blockIdx.x % 2; const int size_id = threadIdx.x; const size_t input_idx = ((batch_seqlen_id * num_head + head_id) * 3 + qkv_id) * size_per_head / 2 + size_id; const float logit_scale = ((qkv_id == 0) ? float(logit_scales[head_id]) * factor_of_fmha : 1.0f); const half2 zero = {half(0.0f), half(0.0f)}; const bool flag = size_id < size_per_head / 2; half2 input_val = (flag) ? data[input_idx] : zero; float2 input_val_float2 = __half22float2(input_val); __shared__ float norm_factor; const float local_sum = input_val_float2.x * input_val_float2.x + input_val_float2.y * input_val_float2.y; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrtf(local_sum_all + 1e-6) * logit_scale; } __syncthreads(); input_val_float2.x *= norm_factor; input_val_float2.y *= norm_factor; input_val = __float22half2_rn(input_val_float2); if (flag) { data[input_idx] = input_val; } } // This kernel is designed for size_per_head = 32, typical case in swin // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) // grid(batch*seqlen*num_head/(LOOP*HEADS_PER_WARP*WARPS_PER_BLOCK)) // block(32*WARPS_PER_BLOCK) each warp load 64*LOOP*HEADS_PER_WARP elements (LOOP*HEADS_PER_WARP heads of q&k) and do // normlization template<int LOOP, int HEADS_PER_WARP, int WARPS_PER_BLOCK> __global__ void normalize_for_FMHA_headz32_kernel( half2* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, float factor_of_fmha) { __shared__ half2 data_shm[LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP * 32]; __shared__ float norm_factor[LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP * 2]; const int batch_seqlen_head_offset = blockIdx.x * LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP; const int seqlen_head_offset = batch_seqlen_head_offset % (seqlen * num_head); const int tid = threadIdx.x; const int warp_id = tid / 32; const int tid_in_warp = tid % 32; const int HEADS_PER_WARP_x_WARPS_PER_BLOCK = HEADS_PER_WARP * WARPS_PER_BLOCK; // load from gmem to smem #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int head_offset = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP; #pragma unroll for (int head_i = 0; head_i < HEADS_PER_WARP; head_i++) { // one warp loads one head (32 threads load 32 half2) const size_t input_idx = ((batch_seqlen_head_offset + head_offset + head_i) * 3) * size_per_head / 2 + tid_in_warp; // we need to ensure no out of memory address when launch kernel. const half2 input_val = data[input_idx]; const int shm_idx = (head_offset + head_i) * 32 + tid_in_warp; data_shm[shm_idx] = input_val; } } __syncthreads(); // we use one warp to deal with HEADS_PER_WARP heads at one time, // so one thread deals with part of one single head at one time float local_sums[LOOP]; const int threads_per_head = 32 / HEADS_PER_WARP; // each head has 32 half2 const int half2Size_per_thread = 32 / threads_per_head; const int head_in_warp = tid_in_warp / threads_per_head; const int id_offset_in_head = tid_in_warp % threads_per_head; const int size_offset_in_head = half2Size_per_thread * id_offset_in_head; const int head_offset_of_warp = warp_id * HEADS_PER_WARP + head_in_warp; #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { float local_sum = 0.0f; const int shm_offset = (loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp) * 32 + size_offset_in_head; #pragma unroll for (int size_i = 0; size_i < half2Size_per_thread; size_i++) { const int shm_idx = shm_offset + size_i; const float2 tmp = __half22float2(data_shm[shm_idx]); local_sum += tmp.x * tmp.x + tmp.y * tmp.y; } local_sums[loop_i] = local_sum; } const int threads_per_head_2 = threads_per_head / 2; const bool is_q = id_offset_in_head < threads_per_head_2; #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int seqlen_head_id = seqlen_head_offset + loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp; const int head_id = seqlen_head_id % num_head; float local_sum = local_sums[loop_i]; #pragma unroll for (int i = 1; i < threads_per_head_2; i <<= 1) { local_sum += __shfl_xor_sync(FINAL_MASK, local_sum, i, 32); } if (id_offset_in_head % threads_per_head_2 == 0) { const float logit_scale = is_q ? float(logit_scales[head_id]) * factor_of_fmha : 1.f; const int norm_factor_idx = 2 * (loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp) + id_offset_in_head / threads_per_head_2; norm_factor[norm_factor_idx] = rsqrtf(local_sum + 1e-6) * logit_scale; } } __syncthreads(); // normalize and store to gmem #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int head_offset = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP; #pragma unroll for (int head_i = 0; head_i < HEADS_PER_WARP; head_i++) { // we need to ensure no out of memory address when launch kernel, one warp deals with one head const size_t output_idx = ((batch_seqlen_head_offset + head_offset + head_i) * 3) * size_per_head / 2 + tid_in_warp; const int head_idx = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP + head_i; const int shm_idx = (head_idx)*32 + tid_in_warp; const int norm_factor_idx = 2 * (head_idx) + tid_in_warp / 16; float norm_factor_ = norm_factor[norm_factor_idx]; half2 input_val = data_shm[shm_idx]; float2 input_val_float = __half22float2(input_val); input_val_float.x *= norm_factor_; input_val_float.y *= norm_factor_; data[output_idx] = __float22half2_rn(input_val_float); } } } #define NORMALIZE_FMHA_HEAD32_MACRO(LOOP, HEADS_PER_WARP, WARPS_PER_BLOCK) \ dim3 grid(batch* seqlen_num_head / (LOOP * HEADS_PER_WARP * WARPS_PER_BLOCK)); \ dim3 block(32 * WARPS_PER_BLOCK); \ normalize_for_FMHA_headz32_kernel<LOOP, HEADS_PER_WARP, WARPS_PER_BLOCK><<<grid, block, 0, stream>>>( \ (half2*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, sqrt(size_per_head)); // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) template<typename T> void invokeNormalizeForFMHA( T* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream) { if (std::is_same<T, half>::value) { if (size_per_head == 32) { const int seqlen_num_head = seqlen * num_head; // LOOP = 2, HEADS_PER_WARP = 4, WARPS_PER_BLOCK = 2 if (seqlen_num_head % (2 * 4 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(2, 4, 2) } // LOOP = 1, HEADS_PER_WARP = 4, WARPS_PER_BLOCK = 2 else if (seqlen_num_head % (1 * 4 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 4, 2) } // LOOP = 1, HEADS_PER_WARP = 2, WARPS_PER_BLOCK = 2 else if (seqlen_num_head % (1 * 2 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 2, 2) } // LOOP = 1, HEADS_PER_WARP = 2, WARPS_PER_BLOCK = 1 else if (seqlen_num_head % (1 * 2 * 1) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 2, 1) } // LOOP = 1, HEADS_PER_WARP = 1, WARPS_PER_BLOCK = 1 else { NORMALIZE_FMHA_HEAD32_MACRO(1, 1, 1) } } else if (size_per_head % 2 == 0) { dim3 grid(2 * num_head, seqlen, batch); dim3 block((size_per_head / 2 + 31) / 32 * 32); normalize_for_FMHA_kernel<<<grid, block, 0, stream>>>( (half2*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, sqrt(size_per_head)); } else { printf("[ERROR][invokeNormalizeForFMHA] only supports size_per_head %% 2 == 0!\n"); exit(-1); } } else { printf("[ERROR][invokeNormalizeForFMHA] only supports half I/O!\n"); exit(-1); } } #undef NORMALIZE_FMHA_HEAD32_MACRO template void invokeNormalizeForFMHA<half>( half* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream); template void invokeNormalizeForFMHA<float>(float* data, const float* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream); #ifdef ENABLE_BF16 template void invokeNormalizeForFMHA<__nv_bfloat16>(__nv_bfloat16* data, const __nv_bfloat16* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream); #endif // This kernel is designed for size_per_head = 32, typical case in swin // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) // grid(batch*seqlen*num_head/(LOOP*HEADS_PER_WARP*WARPS_PER_BLOCK)) // block(32*WARPS_PER_BLOCK) each warp load 64*LOOP*HEADS_PER_WARP elements (LOOP*HEADS_PER_WARP heads of q&k) and do // normlization template<int LOOP, int HEADS_PER_WARP, int WARPS_PER_BLOCK> __global__ void normalize_for_FMHA_headz32_INT8_kernel(char2* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, const float query_deQ_scale, const float key_deQ_scale, const float query_Q_scale, const float key_Q_scale) { __shared__ char2 data_shm[LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP * 32]; __shared__ float norm_factor[LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP * 2]; const int batch_seqlen_head_offset = blockIdx.x * LOOP * WARPS_PER_BLOCK * HEADS_PER_WARP; const int seqlen_head_offset = batch_seqlen_head_offset % (seqlen * num_head); const int tid = threadIdx.x; const int warp_id = tid / 32; const int tid_in_warp = tid % 32; const int HEADS_PER_WARP_x_WARPS_PER_BLOCK = HEADS_PER_WARP * WARPS_PER_BLOCK; // load from gmem to smem #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int head_offset = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP; #pragma unroll for (int head_i = 0; head_i < HEADS_PER_WARP; head_i++) { // one warp loads one head (32 threads load 32 half2) const size_t input_idx = ((batch_seqlen_head_offset + head_offset + head_i) * 3) * size_per_head / 2 + tid_in_warp; // we need to ensure no out of memory address when launch kernel. const char2 input_val = data[input_idx]; const int shm_idx = (head_offset + head_i) * 32 + tid_in_warp; data_shm[shm_idx] = input_val; } } __syncthreads(); // we use one warp to deal with HEADS_PER_WARP heads at one time, // so one thread deals with part of one single head at one time float local_sums[LOOP]; const int threads_per_head = 32 / HEADS_PER_WARP; // each head has 32 half2 const int half2Size_per_thread = 32 / threads_per_head; const int head_in_warp = tid_in_warp / threads_per_head; const int id_offset_in_head = tid_in_warp % threads_per_head; const int size_offset_in_head = half2Size_per_thread * id_offset_in_head; const int head_offset_of_warp = warp_id * HEADS_PER_WARP + head_in_warp; const int threads_per_head_2 = threads_per_head / 2; const bool is_q = id_offset_in_head < threads_per_head_2; float deQ_scale = is_q ? query_deQ_scale : key_deQ_scale; float Q_scale = is_q ? query_Q_scale : key_Q_scale; #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { float local_sum = 0.0f; const int shm_offset = (loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp) * 32 + size_offset_in_head; #pragma unroll for (int size_i = 0; size_i < half2Size_per_thread; size_i++) { const int shm_idx = shm_offset + size_i; const char2 tmp = data_shm[shm_idx]; float2 tmpFloat; tmpFloat.x = static_cast<float>(tmp.x) * deQ_scale; tmpFloat.y = static_cast<float>(tmp.y) * deQ_scale; local_sum += tmpFloat.x * tmpFloat.x + tmpFloat.y * tmpFloat.y; } local_sums[loop_i] = local_sum; } #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int seqlen_head_id = seqlen_head_offset + loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp; const int head_id = seqlen_head_id % num_head; float local_sum = local_sums[loop_i]; #pragma unroll for (int i = 1; i < threads_per_head_2; i <<= 1) { local_sum += __shfl_xor_sync(FINAL_MASK, local_sum, i, 32); } if (id_offset_in_head % threads_per_head_2 == 0) { const float logit_scale = is_q ? float(logit_scales[head_id]) : 1.f; const int norm_factor_idx = 2 * (loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + head_offset_of_warp) + id_offset_in_head / threads_per_head_2; norm_factor[norm_factor_idx] = rsqrtf(local_sum + 1e-6) * logit_scale; } } __syncthreads(); // normalize and store to gmem #pragma unroll for (int loop_i = 0; loop_i < LOOP; loop_i++) { const int head_offset = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP; #pragma unroll for (int head_i = 0; head_i < HEADS_PER_WARP; head_i++) { // we need to ensure no out of memory address when launch kernel, one warp deals with one head const size_t output_idx = ((batch_seqlen_head_offset + head_offset + head_i) * 3) * size_per_head / 2 + tid_in_warp; const int head_idx = loop_i * HEADS_PER_WARP_x_WARPS_PER_BLOCK + warp_id * HEADS_PER_WARP + head_i; const int shm_idx = (head_idx)*32 + tid_in_warp; const int norm_factor_idx = 2 * (head_idx) + tid_in_warp / 16; deQ_scale = (tid_in_warp < 16) ? query_deQ_scale : key_deQ_scale; Q_scale = (tid_in_warp < 16) ? query_Q_scale : key_Q_scale; float norm_factor_ = norm_factor[norm_factor_idx]; char2 input_val = data_shm[shm_idx]; float2 input_val_float; input_val_float.x = static_cast<float>(input_val.x) * deQ_scale * norm_factor_ * Q_scale; input_val_float.y = static_cast<float>(input_val.y) * deQ_scale * norm_factor_ * Q_scale; input_val.x = float_to_int8_rn(input_val_float.x); input_val.y = float_to_int8_rn(input_val_float.y); data[output_idx] = input_val; } } } // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) // grid(2*num_head, seqlen, batch) // block((size_per_head/2 + 31)/31*32) __global__ void normalize_for_FMHA_INT8_kernel(char2* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, const float query_deQ_scale, const float key_deQ_scale, const float query_Q_scale, const float key_Q_scale) { const int batch_seqlen_id = blockIdx.z * seqlen + blockIdx.y; const int head_id = blockIdx.x / 2; const int qkv_id = blockIdx.x % 2; const int size_id = threadIdx.x; const size_t input_idx = ((batch_seqlen_id * num_head + head_id) * 3 + qkv_id) * size_per_head / 2 + size_id; const float logit_scale = ((qkv_id == 0) ? float(logit_scales[head_id]) : 1.0f); const float deQ_scale = (qkv_id == 0) ? query_deQ_scale : key_deQ_scale; const float Q_scale = (qkv_id == 0) ? query_Q_scale : key_Q_scale; float2 input_val_float2 = {0.0f, 0.0f}; const bool flag = size_id < size_per_head / 2; if (flag) { char2 input_data = data[input_idx]; input_val_float2.x = static_cast<float>(input_data.x) * deQ_scale; input_val_float2.y = static_cast<float>(input_data.y) * deQ_scale; } __shared__ float norm_factor; const float local_sum = input_val_float2.x * input_val_float2.x + input_val_float2.y * input_val_float2.y; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrtf(local_sum_all + 1e-6) * logit_scale * Q_scale; } __syncthreads(); if (flag) { char2 output_val; output_val.x = float_to_int8_rn(input_val_float2.x * norm_factor); output_val.y = float_to_int8_rn(input_val_float2.y * norm_factor); data[input_idx] = output_val; } } #define NORMALIZE_FMHA_HEAD32_MACRO(LOOP, HEADS_PER_WARP, WARPS_PER_BLOCK) \ dim3 grid(batch* seqlen_num_head / (LOOP * HEADS_PER_WARP * WARPS_PER_BLOCK)); \ dim3 block(32 * WARPS_PER_BLOCK); \ normalize_for_FMHA_headz32_INT8_kernel<LOOP, HEADS_PER_WARP, WARPS_PER_BLOCK> \ <<<grid, block, 0, stream>>>((char2*)data, \ (const half*)logit_scales, \ batch, \ seqlen, \ num_head, \ size_per_head, \ query_deQ_scale, \ key_deQ_scale, \ query_Q_scale, \ key_Q_scale); // input should be the qkv for trt fmha kernels with shape of [batch, seqlen, num_head, 3, size_per_head] // do normlization on size_per_head for q & k [-, -, -, 0, *] && [-, -, -, 1, *] && *(logit_scale/sqrt(size_per_head)) // for q (since fmha will divide sqrt(size_per_head), we multiple sqrt(size_per_head) first) template<typename T> void invokeNormalizeForFMHA(int8_t* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream, const float query_deQ_scale, const float key_deQ_scale, const float query_Q_scale, const float key_Q_scale) { if (std::is_same<T, half>::value) { if (size_per_head == 32) { const int seqlen_num_head = seqlen * num_head; if (seqlen_num_head % (2 * 4 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(2, 4, 2) } else if (seqlen_num_head % (1 * 4 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 4, 2) } else if (seqlen_num_head % (1 * 2 * 2) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 2, 2) } else if (seqlen_num_head % (1 * 2 * 1) == 0) { NORMALIZE_FMHA_HEAD32_MACRO(1, 2, 1) } else { NORMALIZE_FMHA_HEAD32_MACRO(1, 1, 1) } } else if (size_per_head % 2 == 0) { dim3 grid(2 * num_head, seqlen, batch); dim3 block((size_per_head / 2 + 31) / 32 * 32); normalize_for_FMHA_INT8_kernel<<<grid, block, 0, stream>>>((char2*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, query_deQ_scale, key_deQ_scale, query_Q_scale, key_Q_scale); } else { printf("[ERROR][invokeNormalizeForFMHA(INT8 version)] only supports size_per_head %% 2 == 0!\n"); exit(-1); } } else { printf("[ERROR][invokeNormalizeForFMHA(INT8 version)] only supports half Input!\n"); exit(-1); } } #undef NORMALIZE_FMHA_HEAD32_MACRO template void invokeNormalizeForFMHA<float>(int8_t* data, const float* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream, const float query_deQ_scale, const float key_deQ_scale, const float query_Q_scale, const float key_Q_scale); template void invokeNormalizeForFMHA<half>(int8_t* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream, const float query_deQ_scale, const float key_deQ_scale, const float query_Q_scale, const float key_Q_scale); /******************* invokeNormalize ***********************/ // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale) // grid(seqlen, num_head, batch) // block((size_per_head + 31)/31*32) // TODO : the trick of normalize_for_FMHA_headz32_kernel can be used here template<typename T, typename T2> __global__ void normalize_kernel(T* data, const T2* logit_scales, int batch, int seqlen, int num_head, int size_per_head) { const int batch_id = blockIdx.z; const int head_id = blockIdx.y; const int seq_id = blockIdx.x; const int size_id = threadIdx.x; const int input_idx = ((batch_id * num_head + head_id) * seqlen + seq_id) * size_per_head + size_id; const float logit_scale = (logit_scales != NULL) ? static_cast<float>(logit_scales[head_id]) : 1.0f; const bool flag = size_id < size_per_head; float input_val = (flag) ? static_cast<float>(data[input_idx]) : 0.0f; __shared__ float norm_factor; const float local_sum = input_val * input_val; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrt(local_sum_all + 1e-6) * logit_scale; } __syncthreads(); input_val *= norm_factor; if (flag) { data[input_idx] = input_val; } } // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale/sqrt(size_per_head)) // grid(seqlen, num_head, batch) // block((size_per_head/2 + 31)/31*32) // TODO : the trick of normalize_for_FMHA_headz32_kernel can be used here template<> __global__ void normalize_kernel(half2* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head) { const int batch_id = blockIdx.z; const int head_id = blockIdx.y; const int seq_id = blockIdx.x; const int size_id = threadIdx.x; const int input_idx = ((batch_id * num_head + head_id) * seqlen + seq_id) * size_per_head / 2 + size_id; const float logit_scale = (logit_scales != NULL) ? float(logit_scales[head_id]) : 1.0f; const half2 zero = {half(0.0f), half(0.0f)}; const bool flag = size_id < size_per_head / 2; half2 input_val = (flag) ? data[input_idx] : zero; float2 input_val_float2 = __half22float2(input_val); __shared__ float norm_factor; const float local_sum = input_val_float2.x * input_val_float2.x + input_val_float2.y * input_val_float2.y; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrtf(local_sum_all + 1e-6) * logit_scale; } __syncthreads(); input_val_float2.x *= norm_factor; input_val_float2.y *= norm_factor; input_val = __float22half2_rn(input_val_float2); if (flag) { data[input_idx] = input_val; } } extern __shared__ char normalize_kernel_v2_shm[]; // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale) // grid(batch*num_head*seqlen/(HEADS_PER_WARP*WARPS_PER_BLOCK)) // block(32*WARPS_PER_BLOCK) // size_per_head % ELEMENT_PER_LDG == 0 template<typename T_IO, typename T, int HEADS_PER_WARP, int WARPS_PER_BLOCK, int ELEMENT_PER_LDG> __global__ void normalize_kernel_v2(T_IO* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int threads_per_head = 32 / HEADS_PER_WARP; const int lane_id = tidx % 32; const int warp_id = tidx / 32; const int head_offset_of_grid = bidx * WARPS_PER_BLOCK * HEADS_PER_WARP; const int head_offset_of_block = warp_id * HEADS_PER_WARP; const int head_id_in_warp = lane_id / threads_per_head; const int head_id = head_offset_of_grid + head_offset_of_block + head_id_in_warp; const int num_head_id = (head_id / seqlen) % num_head; const int ldg_per_head = size_per_head / ELEMENT_PER_LDG; const float logit_scale = (logit_scales != NULL) ? static_cast<float>(logit_scales[num_head_id]) : 1.0f; T_IO* normalize_shm = (T_IO*)normalize_kernel_v2_shm; __shared__ float norm_factor[WARPS_PER_BLOCK * HEADS_PER_WARP]; // one factor for one head const int input_offset = head_offset_of_grid * ldg_per_head; // load from gmem to smem #pragma unroll for (int i = tidx; i < HEADS_PER_WARP * WARPS_PER_BLOCK * ldg_per_head; i += blockDim.x) { const int input_idx = input_offset + i; normalize_shm[i] = data[input_idx]; } __syncthreads(); // local sum float local_sum = 0.0f; const int elements_per_thread = (size_per_head + threads_per_head - 1) / threads_per_head; const int thread_id_in_head = tidx % threads_per_head; const int size_offset_in_head = elements_per_thread * thread_id_in_head; const int shm_offset = (head_offset_of_block + head_id_in_warp) * size_per_head + size_offset_in_head; const T* shm_ptr = (const T*)normalize_shm; #pragma unroll for (int size_i = 0; size_i < elements_per_thread && (size_i + size_offset_in_head < size_per_head); size_i++) { const int shm_idx = shm_offset + size_i; const float tmp = static_cast<float>(shm_ptr[shm_idx]); local_sum += tmp * tmp; } // reduction to get norm_factor #pragma unroll for (int i = 1; i < threads_per_head; i <<= 1) { local_sum += __shfl_xor_sync(FINAL_MASK, local_sum, i, 32); } if (thread_id_in_head == 0) { const int norm_factor_idx = head_offset_of_block + head_id_in_warp; norm_factor[norm_factor_idx] = rsqrtf(local_sum + 1e-6) * logit_scale; } __syncthreads(); // normalize and sts to gmem #pragma unroll for (int i = tidx; i < HEADS_PER_WARP * WARPS_PER_BLOCK * ldg_per_head; i += blockDim.x) { const int norm_factor_idx = i / ldg_per_head; const float norm_factor_val = norm_factor[norm_factor_idx]; T_IO val = normalize_shm[i]; T* val_ptr = (T*)(&val); #pragma unroll for (int ei = 0; ei < ELEMENT_PER_LDG; ei++) { val_ptr[ei] = T(static_cast<float>(val_ptr[ei]) * norm_factor_val); } const int input_idx = input_offset + i; data[input_idx] = val; } } #define NORMALIZE_MACRO(HEADS_PER_WARP_, T_4, T_2, T) \ dim3 grid(total_head_count / (HEADS_PER_WARP_ * WARPS_PER_BLOCK)); \ dim3 block(32 * WARPS_PER_BLOCK); \ const int shm_size = HEADS_PER_WARP_ * WARPS_PER_BLOCK * size_per_head * sizeof(T); \ if (size_per_head % 4 == 0) { \ normalize_kernel_v2<T_4, T, HEADS_PER_WARP_, WARPS_PER_BLOCK, 4><<<grid, block, shm_size, stream>>>( \ (T_4*)data, (const T*)logit_scales, batch, seqlen, num_head, size_per_head); \ } \ else if (size_per_head % 2 == 0) { \ normalize_kernel_v2<T_2, T, HEADS_PER_WARP_, WARPS_PER_BLOCK, 2><<<grid, block, shm_size, stream>>>( \ (T_2*)data, (const T*)logit_scales, batch, seqlen, num_head, size_per_head); \ } \ else { \ normalize_kernel_v2<T, T, HEADS_PER_WARP_, WARPS_PER_BLOCK, 1><<<grid, block, shm_size, stream>>>( \ (T*)data, (const T*)logit_scales, batch, seqlen, num_head, size_per_head); \ } // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale/sqrt(size_per_head)) template<typename T> void invokeNormalize( T* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream) { const int WARPS_PER_BLOCK = 4; const int total_head_count = batch * num_head * seqlen; if (std::is_same<T, float>::value) { // WARPS_PER_BLOCK = 4, HEADS_PER_WARP = 4 if (total_head_count % (WARPS_PER_BLOCK * 4) == 0) { NORMALIZE_MACRO(4, float4, float2, float); } // WARPS_PER_BLOCK = 4, HEAD_PER_WARPS = 2 else if (total_head_count % (WARPS_PER_BLOCK * 2) == 0) { NORMALIZE_MACRO(2, float4, float2, float); } // WARPS_PER_BLOCK = 4, HEAD_PER_WARPS = 1 else if (total_head_count % WARPS_PER_BLOCK == 0) { NORMALIZE_MACRO(1, float4, float2, float); } else { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head + 31) / 32 * 32); normalize_kernel<<<grid, block, 0, stream>>>( (float*)data, (const float*)logit_scales, batch, seqlen, num_head, size_per_head); } } else if (std::is_same<T, half>::value) { // WARPS_PER_BLOCK = 4, HEADS_PER_WARP = 4 if (total_head_count % (WARPS_PER_BLOCK * 4) == 0) { NORMALIZE_MACRO(4, half4, half2, half); } // WARPS_PER_BLOCK = 4, HEAD_PER_WARPS = 2 else if (total_head_count % (WARPS_PER_BLOCK * 2) == 0) { NORMALIZE_MACRO(2, half4, half2, half); } // WARPS_PER_BLOCK = 4, HEAD_PER_WARPS = 1 else if (total_head_count % WARPS_PER_BLOCK == 0) { NORMALIZE_MACRO(1, half4, half2, half); } else { if (size_per_head % 2 == 0) { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head / 2 + 31) / 32 * 32); normalize_kernel<<<grid, block, 0, stream>>>( (half2*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head); } else { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head + 31) / 32 * 32); normalize_kernel<<<grid, block, 0, stream>>>( (half*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head); } } } #ifdef ENABLE_BF16 else { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head + 31) / 32 * 32); normalize_kernel<<<grid, block, 0, stream>>>(data, logit_scales, batch, seqlen, num_head, size_per_head); } #endif } #undef NORMALIZE_MACRO template void invokeNormalize<half>( half* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream); template void invokeNormalize<float>(float* data, const float* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream); #ifdef ENABLE_BF16 template void invokeNormalize<__nv_bfloat16>(__nv_bfloat16* data, const __nv_bfloat16* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream); #endif /******************* invokeNormalize ***********************/ // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale) // grid(seqlen, num_head, batch) // block((size_per_head + 31)/31*32) template<typename T> __global__ void normalize_kernel(int8_t* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head, const float deQ_scale, const float Q_scale) { const int batch_id = blockIdx.z; const int head_id = blockIdx.y; const int seq_id = blockIdx.x; const int size_id = threadIdx.x; const int input_idx = ((batch_id * num_head + head_id) * seqlen + seq_id) * size_per_head + size_id; const float logit_scale = (logit_scales != NULL) ? static_cast<float>(logit_scales[head_id]) : 1.0f; const bool flag = size_id < size_per_head; float input_val = (flag) ? static_cast<float>(data[input_idx]) * deQ_scale : 0.0f; __shared__ float norm_factor; const float local_sum = input_val * input_val; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrt(local_sum_all + 1e-6) * logit_scale; } __syncthreads(); input_val *= norm_factor; if (flag) { data[input_idx] = float_to_int8_rn(input_val * Q_scale); } } // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale/sqrt(size_per_head)) // grid(seqlen, num_head, batch) // block((size_per_head/2 + 31)/31*32) template<> __global__ void normalize_kernel(int8_t* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, const float deQ_scale, const float Q_scale) { const int batch_id = blockIdx.z; const int head_id = blockIdx.y; const int seq_id = blockIdx.x; const int size_id = threadIdx.x; const int input_idx = ((batch_id * num_head + head_id) * seqlen + seq_id) * size_per_head / 2 + size_id; const float logit_scale = (logit_scales != NULL) ? float(logit_scales[head_id]) : 1.0f; float2 input_val_float2 = {0.0f, 0.0f}; const bool flag = size_id < size_per_head / 2; char2* dataPtr = (char2*)data; if (flag) { char2 dataTmp = dataPtr[input_idx]; input_val_float2.x = static_cast<float>(dataTmp.x) * deQ_scale; input_val_float2.y = static_cast<float>(dataTmp.y) * deQ_scale; } __shared__ float norm_factor; const float local_sum = input_val_float2.x * input_val_float2.x + input_val_float2.y * input_val_float2.y; const float local_sum_all = blockDim.x <= 32 ? warpReduceSum(local_sum) : blockReduceSum(local_sum); if (threadIdx.x == 0) { norm_factor = rsqrtf(local_sum_all + 1e-6) * logit_scale; } __syncthreads(); input_val_float2.x *= norm_factor; input_val_float2.y *= norm_factor; if (flag) { char2 dataTmp; dataTmp.x = float_to_int8_rn(input_val_float2.x * Q_scale); dataTmp.y = float_to_int8_rn(input_val_float2.y * Q_scale); dataPtr[input_idx] = dataTmp; } } // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale) // grid(batch*num_head*seqlen/(HEADS_PER_WARP*WARPS_PER_BLOCK)) // block(32*WARPS_PER_BLOCK) // size_per_head % ELEMENT_PER_LDG == 0 template<int HEADS_PER_WARP, int WARPS_PER_BLOCK, int ELEMENT_PER_LDG> __global__ void normalize_kernel_v2(char4* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, const float deQ_scale, const float Q_scale) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int threads_per_head = 32 / HEADS_PER_WARP; const int lane_id = tidx % 32; const int warp_id = tidx / 32; const int head_offset_of_grid = bidx * WARPS_PER_BLOCK * HEADS_PER_WARP; const int head_offset_of_block = warp_id * HEADS_PER_WARP; const int head_id_in_warp = lane_id / threads_per_head; const int head_id = head_offset_of_grid + head_offset_of_block + head_id_in_warp; const int num_head_id = (head_id / seqlen) % num_head; const int ldg_per_head = size_per_head / ELEMENT_PER_LDG; const float logit_scale = (logit_scales != NULL) ? static_cast<float>(logit_scales[num_head_id]) : 1.0f; char4* normalize_shm = (char4*)normalize_kernel_v2_shm; __shared__ float norm_factor[WARPS_PER_BLOCK * HEADS_PER_WARP]; // one factor for one head const int input_offset = head_offset_of_grid * ldg_per_head; // load from gmem to smem #pragma unroll for (int i = tidx; i < HEADS_PER_WARP * WARPS_PER_BLOCK * ldg_per_head; i += blockDim.x) { const int input_idx = input_offset + i; normalize_shm[i] = data[input_idx]; } __syncthreads(); // local sum float local_sum = 0.0f; const int elements_per_thread = (size_per_head + threads_per_head - 1) / threads_per_head; const int thread_id_in_head = tidx % threads_per_head; const int size_offset_in_head = elements_per_thread * thread_id_in_head; const int shm_offset = (head_offset_of_block + head_id_in_warp) * size_per_head + size_offset_in_head; const int8_t* shm_ptr = (const int8_t*)normalize_shm; #pragma unroll for (int size_i = 0; size_i < elements_per_thread && (size_i + size_offset_in_head < size_per_head); size_i++) { const int shm_idx = shm_offset + size_i; const float tmp = static_cast<float>(shm_ptr[shm_idx]) * deQ_scale; local_sum += tmp * tmp; } // reduction to get norm_factor #pragma unroll for (int i = 1; i < threads_per_head; i <<= 1) { local_sum += __shfl_xor_sync(FINAL_MASK, local_sum, i, 32); } if (thread_id_in_head == 0) { const int norm_factor_idx = head_offset_of_block + head_id_in_warp; norm_factor[norm_factor_idx] = rsqrtf(local_sum + 1e-6) * logit_scale; } __syncthreads(); // normalize and sts to gmem #pragma unroll for (int i = tidx; i < HEADS_PER_WARP * WARPS_PER_BLOCK * ldg_per_head; i += blockDim.x) { const int norm_factor_idx = i / ldg_per_head; const float norm_factor_val = norm_factor[norm_factor_idx]; char4 val = normalize_shm[i]; int8_t* val_ptr = (int8_t*)(&val); #pragma unroll for (int ei = 0; ei < ELEMENT_PER_LDG; ei++) { val_ptr[ei] = float_to_int8_rn(static_cast<float>(val_ptr[ei]) * deQ_scale * norm_factor_val * Q_scale); } const int input_idx = input_offset + i; data[input_idx] = val; } } #define NORMALIZE_MACRO \ const int shm_size = WARPS_PER_BLOCK * HEADS_PER_WARP * size_per_head; \ dim3 grid(total_head_count / (WARPS_PER_BLOCK * HEADS_PER_WARP)); \ dim3 block(32 * WARPS_PER_BLOCK); \ normalize_kernel_v2<HEADS_PER_WARP, WARPS_PER_BLOCK, 4><<<grid, block, shm_size, stream>>>( \ (char4*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, deQ_scale, Q_scale); // input should be [batch, num_head, seqlen, size_per_head] // do normlization on size_per_head && *(logit_scale/sqrt(size_per_head)) template<typename T> void invokeNormalize(int8_t* data, const T* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream, const float deQ_scale, const float Q_scale) { if (std::is_same<T, half>::value) { if (size_per_head % 4 == 0) { const int HEADS_PER_WARP = 4; const int total_head_count = seqlen * num_head * batch; if (total_head_count % (HEADS_PER_WARP * 4) == 0) { const int WARPS_PER_BLOCK = 4; NORMALIZE_MACRO } else if (total_head_count % (HEADS_PER_WARP * 2) == 0) { const int WARPS_PER_BLOCK = 2; NORMALIZE_MACRO } else if (total_head_count % (HEADS_PER_WARP * 1) == 0) { const int WARPS_PER_BLOCK = 1; NORMALIZE_MACRO } else { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head / 2 + 31) / 32 * 32); normalize_kernel<<<grid, block, 0, stream>>>((int8_t*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, deQ_scale, Q_scale); } } else if (size_per_head % 2 == 0) { dim3 grid(seqlen, num_head, batch); dim3 block((size_per_head / 2 + 31) / 32 * 32); normalize_kernel<<<grid, block, 0, stream>>>( (int8_t*)data, (const half*)logit_scales, batch, seqlen, num_head, size_per_head, deQ_scale, Q_scale); } else { printf("[ERROR][invokeNormalize(INT8 version)] only supports size_per_head %% 2 == 0!\n"); exit(-1); } } else { printf("[ERROR][invokeNormalize(INT8 version)] only supports [T=half] !\n"); exit(-1); } } #undef NORMALIZE_MACRO template void invokeNormalize<half>(int8_t* data, const half* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream, const float deQ_scale, const float Q_scale); template void invokeNormalize<float>(int8_t* data, const float* logit_scales, int batch, int seqlen, int num_head, int size_per_head, cudaStream_t stream, const float deQ_scale, const float Q_scale); } // namespace fastertransformer
ca111cc3b573c6deba67aaadcfc8946962710e51.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <rocblas.h> // clang-format off #include "tensors/gpu/prod.h" #include "tensors/gpu/backend.h" #include "tensors/gpu/cuda_helpers.h" // clang-format on namespace marian { namespace gpu { void Prod(marian::Tensor C, const marian::Tensor& A, const marian::Tensor& B, bool transA, bool transB, float beta, float scalar) { hipSetDevice(C->getDevice().no); float alpha = scalar; size_t m = A->shape().elements() / A->shape().back(); size_t k = A->shape().back(); if(transA) std::swap(m, k); size_t l = B->shape().elements() / B->shape().back(); size_t n = B->shape().back(); if(transB) std::swap(l, n); size_t lda = A->shape().back(); size_t ldb = B->shape().back(); size_t ldc = B->shape().back(); if(transB) ldc = B->shape().elements() / B->shape().back(); hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N; auto cublasHandle = std::static_pointer_cast<gpu::Backend>(C->getBackend()) ->getCublasHandle(); #if TORCH_HIP_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); #endif hipblasSgemm(cublasHandle, opB, opA, n, m, k, &alpha, B->data(), ldb, A->data(), lda, &beta, C->data(), ldc); #if TORCH_HIP_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH); #endif } __global__ void gAddBias(float* out, const float* bias, size_t length, size_t cols) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { size_t index2 = index % cols; out[index] += bias[index2]; } } } void AddBias(marian::Tensor C, const marian::Tensor bias) { hipSetDevice(C->getDevice().no); int length = C->shape().elements(); int cols = bias->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); hipLaunchKernelGGL(( gAddBias), dim3(blocks), dim3(threads), 0, 0, C->data(), bias->data(), length, cols); hipStreamSynchronize(0); } void ProdWithBias(marian::Tensor C, const marian::Tensor& A, const marian::Tensor& B, const marian::Tensor& bias, bool transA, bool transB, float beta, float scalar) { marian::gpu::Prod(C, A, B, transA, transB, beta, scalar); marian::gpu::AddBias(C, bias); } void ProdBatched(marian::Tensor C, Ptr<Allocator> allocator, const marian::Tensor A, const marian::Tensor B, bool transA, bool transB, float beta, float scalar) { hipSetDevice(C->getDevice().no); float alpha = scalar; size_t batchA = A->shape().elements() / (A->shape()[-1] * A->shape()[-2]); size_t batchB = B->shape().elements() / (B->shape()[-1] * B->shape()[-2]); size_t m = A->shape()[-2]; size_t k = A->shape()[-1]; if(transA) std::swap(m, k); size_t l = B->shape()[-2]; size_t n = B->shape()[-1]; if(transB) std::swap(l, n); size_t lda = A->shape()[-1]; size_t ldb = B->shape()[-1]; size_t ldc = B->shape()[-1]; if(transB) ldc = B->shape()[-2]; hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N; auto cublasHandle = std::static_pointer_cast<gpu::Backend>(C->getBackend()) ->getCublasHandle(); int strideA = batchA == 1 ? 0 : m * k; int strideB = batchB == 1 ? 0 : n * k; int strideC = n * m; int batchC = ::max(batchA, batchB); std::vector<const float*> aptr; std::vector<const float*> bptr; std::vector<float*> cptr; for(int i = 0; i < batchC; i++) { aptr.push_back(A->data() + (i % batchA) * strideA); bptr.push_back(B->data() + (i % batchB) * strideB); cptr.push_back(C->data() + i * strideC); } auto mp_aptr = allocator->alloc<const float*>(aptr.size()); CudaCopy(aptr.data(), aptr.data() + aptr.size(), mp_aptr->data<const float*>()); auto mp_bptr = allocator->alloc<const float*>(bptr.size()); CudaCopy(bptr.data(), bptr.data() + bptr.size(), mp_bptr->data<const float*>()); auto mp_cptr = allocator->alloc<float*>(cptr.size()); CudaCopy(cptr.data(), cptr.data() + cptr.size(), mp_cptr->data<float*>()); #if TORCH_HIP_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); #endif hipblasSgemmBatched(cublasHandle, opB, opA, n, m, k, &alpha, mp_bptr->data<const float*>(), ldb, mp_aptr->data<const float*>(), lda, &beta, mp_cptr->data<float*>(), ldc, batchC); #if TORCH_HIP_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH); #endif allocator->free(mp_aptr); allocator->free(mp_bptr); allocator->free(mp_cptr); } } }
ca111cc3b573c6deba67aaadcfc8946962710e51.cu
#include <cublas_v2.h> // clang-format off #include "tensors/gpu/prod.h" #include "tensors/gpu/backend.h" #include "tensors/gpu/cuda_helpers.h" // clang-format on namespace marian { namespace gpu { void Prod(marian::Tensor C, const marian::Tensor& A, const marian::Tensor& B, bool transA, bool transB, float beta, float scalar) { cudaSetDevice(C->getDevice().no); float alpha = scalar; size_t m = A->shape().elements() / A->shape().back(); size_t k = A->shape().back(); if(transA) std::swap(m, k); size_t l = B->shape().elements() / B->shape().back(); size_t n = B->shape().back(); if(transB) std::swap(l, n); size_t lda = A->shape().back(); size_t ldb = B->shape().back(); size_t ldc = B->shape().back(); if(transB) ldc = B->shape().elements() / B->shape().back(); cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N; cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N; auto cublasHandle = std::static_pointer_cast<gpu::Backend>(C->getBackend()) ->getCublasHandle(); #if CUDA_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); #endif cublasSgemm(cublasHandle, opB, opA, n, m, k, &alpha, B->data(), ldb, A->data(), lda, &beta, C->data(), ldc); #if CUDA_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH); #endif } __global__ void gAddBias(float* out, const float* bias, size_t length, size_t cols) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { size_t index2 = index % cols; out[index] += bias[index2]; } } } void AddBias(marian::Tensor C, const marian::Tensor bias) { cudaSetDevice(C->getDevice().no); int length = C->shape().elements(); int cols = bias->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); gAddBias<<<blocks, threads>>>(C->data(), bias->data(), length, cols); cudaStreamSynchronize(0); } void ProdWithBias(marian::Tensor C, const marian::Tensor& A, const marian::Tensor& B, const marian::Tensor& bias, bool transA, bool transB, float beta, float scalar) { marian::gpu::Prod(C, A, B, transA, transB, beta, scalar); marian::gpu::AddBias(C, bias); } void ProdBatched(marian::Tensor C, Ptr<Allocator> allocator, const marian::Tensor A, const marian::Tensor B, bool transA, bool transB, float beta, float scalar) { cudaSetDevice(C->getDevice().no); float alpha = scalar; size_t batchA = A->shape().elements() / (A->shape()[-1] * A->shape()[-2]); size_t batchB = B->shape().elements() / (B->shape()[-1] * B->shape()[-2]); size_t m = A->shape()[-2]; size_t k = A->shape()[-1]; if(transA) std::swap(m, k); size_t l = B->shape()[-2]; size_t n = B->shape()[-1]; if(transB) std::swap(l, n); size_t lda = A->shape()[-1]; size_t ldb = B->shape()[-1]; size_t ldc = B->shape()[-1]; if(transB) ldc = B->shape()[-2]; cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N; cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N; auto cublasHandle = std::static_pointer_cast<gpu::Backend>(C->getBackend()) ->getCublasHandle(); int strideA = batchA == 1 ? 0 : m * k; int strideB = batchB == 1 ? 0 : n * k; int strideC = n * m; int batchC = std::max(batchA, batchB); std::vector<const float*> aptr; std::vector<const float*> bptr; std::vector<float*> cptr; for(int i = 0; i < batchC; i++) { aptr.push_back(A->data() + (i % batchA) * strideA); bptr.push_back(B->data() + (i % batchB) * strideB); cptr.push_back(C->data() + i * strideC); } auto mp_aptr = allocator->alloc<const float*>(aptr.size()); CudaCopy(aptr.data(), aptr.data() + aptr.size(), mp_aptr->data<const float*>()); auto mp_bptr = allocator->alloc<const float*>(bptr.size()); CudaCopy(bptr.data(), bptr.data() + bptr.size(), mp_bptr->data<const float*>()); auto mp_cptr = allocator->alloc<float*>(cptr.size()); CudaCopy(cptr.data(), cptr.data() + cptr.size(), mp_cptr->data<float*>()); #if CUDA_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); #endif cublasSgemmBatched(cublasHandle, opB, opA, n, m, k, &alpha, mp_bptr->data<const float*>(), ldb, mp_aptr->data<const float*>(), lda, &beta, mp_cptr->data<float*>(), ldc, batchC); #if CUDA_VERSION >= 9000 cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH); #endif allocator->free(mp_aptr); allocator->free(mp_bptr); allocator->free(mp_cptr); } } }
824189685a005fa875cd1397512d90a76acb91a2.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <exception> #include <sstream> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/for_each.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include "NVStrings.h" #include "NVStringsImpl.h" #include "../custring_view.cuh" #include "../regex/regex.cuh" #include "../unicode/is_flags.h" #include "../util.h" // Like the other regex functors, this one has two modes: size/count calculation // and then the operation itself (findall). This minimizes the inlining of // the regex code while not causing divergence. Makes the code a bit messy // but build times are reduced by half since only one regex find() is inlined. template<size_t stack_size> struct findall_record_fn { dreprog* prog; custring_view_array d_strings; int* d_counts; int* d_sizes; bool bcompute_size_only{true}; char** d_buffers; custring_view_array* d_rows; __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; if( !dstr ) return; u_char data1[stack_size], data2[stack_size]; prog->set_stack_mem(data1,data2); if( !bcompute_size_only && (d_counts[idx] < 1) ) return; char* buffer = nullptr; custring_view_array drow = nullptr; if( !bcompute_size_only ) { buffer = (char*)d_buffers[idx]; drow = d_rows[idx]; } int nbytes = 0, nchars = (int)dstr->chars_count(); int spos = 0, rows_idx = 0, find_count = 0; while( spos <= nchars ) { int epos = nchars; if( prog->find(idx,dstr,spos,epos) <=0 ) break; if( bcompute_size_only ) { unsigned int bytes = (dstr->byte_offset_for(epos)-dstr->byte_offset_for(spos)); unsigned int size = custring_view::alloc_size(bytes,(epos-spos)); nbytes += ALIGN_SIZE(size); ++find_count; } else { custring_view* str = dstr->substr((unsigned)spos,(unsigned)(epos-spos),1,buffer); drow[rows_idx++] = str; buffer += ALIGN_SIZE(str->alloc_size()); } spos = epos > spos ? epos : spos + 1; } if( bcompute_size_only ) { d_sizes[idx] = nbytes; d_counts[idx] = find_count; } } }; // for each string, return substring(s) which match specified pattern int NVStrings::findall_record( const char* pattern, std::vector<NVStrings*>& results ) { if( pattern==0 ) return -1; unsigned int count = size(); if( count==0 ) return 0; auto execpol = rmm::exec_policy(0); // compile regex into device object const char32_t* ptn32 = to_char32(pattern); dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags()); delete ptn32; // allocate regex working memory if necessary int regex_insts = prog->inst_counts(); if( regex_insts > MAX_STACK_INSTS ) { if( !prog->alloc_relists(count) ) { std::ostringstream message; message << "nvstrings::findall_record: number of instructions (" << prog->inst_counts() << ") "; message << "and number of strings (" << count << ") "; message << "exceeds available memory"; dreprog::destroy(prog); throw std::invalid_argument(message.str()); } } // compute counts of each match and size of the buffers custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> sizes(count,0); int* d_sizes = sizes.data().get(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) ) thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, d_sizes}); else if( regex_insts <= 100 ) thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, d_sizes}); else thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, d_sizes}); hipDeviceSynchronize(); // // create rows of buffers thrust::host_vector<int> hcounts(counts); // copies counts from device thrust::host_vector<custring_view_array> hrows(count,nullptr); thrust::host_vector<char*> hbuffers(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { int rcount = hcounts[idx]; NVStrings* row = new NVStrings(rcount); results.push_back(row); if( rcount==0 ) continue; hrows[idx] = row->pImpl->getStringsPtr(); int size = sizes[idx]; char* d_buffer = device_alloc<char>(size,0); row->pImpl->setMemoryBuffer(d_buffer,size); hbuffers[idx] = d_buffer; } // copy substrings into buffers rmm::device_vector<custring_view_array> rows(hrows); // copies hrows to device custring_view_array* d_rows = rows.data().get(); rmm::device_vector<char*> buffers(hbuffers); // copies hbuffers to device char** d_buffers = buffers.data().get(); if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) ) thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows}); else if( regex_insts <= 100 ) thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows}); else thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows}); // printCudaError(hipDeviceSynchronize(),"nvs-findall_record"); dreprog::destroy(prog); return (int)results.size(); }
824189685a005fa875cd1397512d90a76acb91a2.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <exception> #include <sstream> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/for_each.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include "NVStrings.h" #include "NVStringsImpl.h" #include "../custring_view.cuh" #include "../regex/regex.cuh" #include "../unicode/is_flags.h" #include "../util.h" // Like the other regex functors, this one has two modes: size/count calculation // and then the operation itself (findall). This minimizes the inlining of // the regex code while not causing divergence. Makes the code a bit messy // but build times are reduced by half since only one regex find() is inlined. template<size_t stack_size> struct findall_record_fn { dreprog* prog; custring_view_array d_strings; int* d_counts; int* d_sizes; bool bcompute_size_only{true}; char** d_buffers; custring_view_array* d_rows; __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; if( !dstr ) return; u_char data1[stack_size], data2[stack_size]; prog->set_stack_mem(data1,data2); if( !bcompute_size_only && (d_counts[idx] < 1) ) return; char* buffer = nullptr; custring_view_array drow = nullptr; if( !bcompute_size_only ) { buffer = (char*)d_buffers[idx]; drow = d_rows[idx]; } int nbytes = 0, nchars = (int)dstr->chars_count(); int spos = 0, rows_idx = 0, find_count = 0; while( spos <= nchars ) { int epos = nchars; if( prog->find(idx,dstr,spos,epos) <=0 ) break; if( bcompute_size_only ) { unsigned int bytes = (dstr->byte_offset_for(epos)-dstr->byte_offset_for(spos)); unsigned int size = custring_view::alloc_size(bytes,(epos-spos)); nbytes += ALIGN_SIZE(size); ++find_count; } else { custring_view* str = dstr->substr((unsigned)spos,(unsigned)(epos-spos),1,buffer); drow[rows_idx++] = str; buffer += ALIGN_SIZE(str->alloc_size()); } spos = epos > spos ? epos : spos + 1; } if( bcompute_size_only ) { d_sizes[idx] = nbytes; d_counts[idx] = find_count; } } }; // for each string, return substring(s) which match specified pattern int NVStrings::findall_record( const char* pattern, std::vector<NVStrings*>& results ) { if( pattern==0 ) return -1; unsigned int count = size(); if( count==0 ) return 0; auto execpol = rmm::exec_policy(0); // compile regex into device object const char32_t* ptn32 = to_char32(pattern); dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags()); delete ptn32; // allocate regex working memory if necessary int regex_insts = prog->inst_counts(); if( regex_insts > MAX_STACK_INSTS ) { if( !prog->alloc_relists(count) ) { std::ostringstream message; message << "nvstrings::findall_record: number of instructions (" << prog->inst_counts() << ") "; message << "and number of strings (" << count << ") "; message << "exceeds available memory"; dreprog::destroy(prog); throw std::invalid_argument(message.str()); } } // compute counts of each match and size of the buffers custring_view_array d_strings = pImpl->getStringsPtr(); rmm::device_vector<int> sizes(count,0); int* d_sizes = sizes.data().get(); rmm::device_vector<int> counts(count,0); int* d_counts = counts.data().get(); if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) ) thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, d_sizes}); else if( regex_insts <= 100 ) thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, d_sizes}); else thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, d_sizes}); cudaDeviceSynchronize(); // // create rows of buffers thrust::host_vector<int> hcounts(counts); // copies counts from device thrust::host_vector<custring_view_array> hrows(count,nullptr); thrust::host_vector<char*> hbuffers(count,nullptr); for( unsigned int idx=0; idx < count; ++idx ) { int rcount = hcounts[idx]; NVStrings* row = new NVStrings(rcount); results.push_back(row); if( rcount==0 ) continue; hrows[idx] = row->pImpl->getStringsPtr(); int size = sizes[idx]; char* d_buffer = device_alloc<char>(size,0); row->pImpl->setMemoryBuffer(d_buffer,size); hbuffers[idx] = d_buffer; } // copy substrings into buffers rmm::device_vector<custring_view_array> rows(hrows); // copies hrows to device custring_view_array* d_rows = rows.data().get(); rmm::device_vector<char*> buffers(hbuffers); // copies hbuffers to device char** d_buffers = buffers.data().get(); if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) ) thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows}); else if( regex_insts <= 100 ) thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows}); else thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, findall_record_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, d_sizes, false, d_buffers, d_rows}); // printCudaError(cudaDeviceSynchronize(),"nvs-findall_record"); dreprog::destroy(prog); return (int)results.size(); }
8ca952f1817d2277128bee85ceee8830557df202.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/unary/unary_red.h" #include "cunumeric/unary/unary_red_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { using namespace Legion; template <typename T> static constexpr T div_and_ceil(T value, T divider) { return std::max<T>((value + divider - 1) / divider, 1); } static constexpr coord_t WARP_SIZE = 32; // This helper class is to compute the shape of thread blocks for reduction kernels. // The strategy is to parallelize on dimensions, from the outermost one to the innermost, // that are not being collapsed, thereby having threads work on independet lanes of // reductions as much as possible. In case where the non-collapsing dimensions don't // have enough elements to be assigned to the threads, we also parallelize on // the collapsing domain. One exceptional case to this strategy is where the collapsing // dimension is the innermost one, in which case we prefer that dimension to the others // in order to enjoy wrap coalescing. The maximum degree of such parallelism woudl be 32, // which is the size of a wrap. template <int32_t DIM> struct ThreadBlock { void initialize(const Rect<DIM>& domain, int32_t collapsed_dim) { auto remaining = static_cast<coord_t>(THREADS_PER_BLOCK); Point<DIM> domain_extents; for (int32_t idx = 0; idx < DIM; ++idx) domain_extents[idx] = domain.hi[idx] - domain.lo[idx] + 1; // If the innermost dimension is being collapsed, we assign at least one warp to it // for warp coalsecing. if (collapsed_dim == DIM - 1) { auto extent = std::min<coord_t>(WARP_SIZE, domain_extents[collapsed_dim]); extents_[collapsed_dim] = extent; remaining = std::max<coord_t>(remaining / extent, 1); } // Then, we compute how many threads there should be along aech dimension, // excluding the one being collapsed for (int32_t idx = DIM - 1; idx >= 0; --idx) { if (idx == collapsed_dim) continue; auto extent = ::min(remaining, domain_extents[idx]); extents_[idx] = extent; remaining = std::max<coord_t>(remaining / extent, 1); } // Finally, we determine degree of parallelism for the collapsed dimension if we didn't above if (collapsed_dim != DIM - 1) extents_[collapsed_dim] = ::min(remaining, domain_extents[collapsed_dim]); // Cache the aggregate number of threads per increment in each dimension, // which later will be used for de-linearization of a thread id num_threads_ = 1; for (int32_t idx = DIM - 1; idx >= 0; --idx) { pitches_[idx] = num_threads_; num_threads_ *= extents_[idx]; } } // Compute a relative coordiate of a given thread __host__ __device__ Point<DIM> point(coord_t tid) const { Point<DIM> p; for (int32_t dim = 0; dim < DIM; ++dim) { p[dim] = tid / pitches_[dim]; tid = tid % pitches_[dim]; } return p; } // Total number of threads size_t num_threads_; // Number of threads along each dimension Point<DIM> extents_; // Aggregate number of threads per increment in each dimension Point<DIM> pitches_; }; // This class represents a set of concurrent thread blocks. Concurrent thread blocks form // hyperplanes in N-dimensional integer lattice such that the collapsed dimension is normal to them. // The size of thread blocks is determined by the maximum number of CTAs for a given kernel; // the number of concurrent thread blocks is the minimum number of hyperplanes whose aggregate // volume exceeds the maximum number of CTAs. template <int32_t DIM> struct ThreadBlocks { void initialize(const Rect<DIM>& domain, int32_t collapsed_dim) { collapsed_dim_ = collapsed_dim; block_.initialize(domain, collapsed_dim); for (int32_t idx = 0; idx < DIM; ++idx) { auto domain_extent = domain.hi[idx] - domain.lo[idx] + 1; extents_[idx] = div_and_ceil(domain_extent, block_.extents_[idx]); } // We want the collapsed dimension to be the outermost one when // de-linearizing the block id. dim_order_[0] = collapsed_dim_; for (int32_t dim = 0, idx = 1; dim < DIM; ++dim) if (dim != collapsed_dim_) dim_order_[idx++] = dim; // Compute the aggregate number of blocks per increment in each dimension coord_t num_blocks = 1; for (int32_t idx = DIM - 1; idx >= 0; --idx) { auto dim = dim_order_[idx]; pitches_[dim] = num_blocks; num_blocks *= extents_[dim]; } // For now we say all blocks can run concurrent. num_blocks_ = num_blocks; // Also compute the stride on the collapsed dimension collapsed_dim_stride_ = extents_[collapsed_dim_] * block_.extents_[collapsed_dim_]; } // De-linearized the linearized block id and thread it into an N-dimensional point __host__ __device__ Point<DIM> point(coord_t bid, coord_t tid, const Point<DIM>& origin) const { Point<DIM> p = origin; for (int32_t dim : dim_order_) { p[dim] += (bid / pitches_[dim]) * block_.extents_[dim]; bid = bid % pitches_[dim]; } p += block_.point(tid); return p; } void compute_maximum_concurrency(const void* func) { int32_t num_ctas = 0; hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_ctas, func, num_threads(), 0); size_t plane_size = pitches_[collapsed_dim_]; // Calculate the number of planes whose volume barely exceeds the maximum number of CTAs size_t max_num_concurrent_planes = std::max<size_t>(div_and_ceil<size_t>(num_ctas, plane_size), 1); // Then we update the number of concurrent thread blocks and the stride on the collapsed // dimension num_blocks_ = plane_size * max_num_concurrent_planes; collapsed_dim_stride_ = max_num_concurrent_planes * block_.extents_[collapsed_dim_]; } __host__ __device__ inline void next_point(Point<DIM>& point) const { point[collapsed_dim_] += collapsed_dim_stride_; } constexpr size_t num_blocks() const { return num_blocks_; } constexpr size_t num_threads() const { return block_.num_threads_; } // List of dimensions, from the outermost one to the innermost int32_t dim_order_[DIM]; int32_t collapsed_dim_; coord_t collapsed_dim_stride_; // Shape of each thread block ThreadBlock<DIM> block_; // Number of thread blocks along each dimension Point<DIM> extents_; // Aggregate number of thread blocks per increment in each dimension Point<DIM> pitches_; // Number of concurrent thread blocks size_t num_blocks_; }; template <int32_t DIM> std::ostream& operator<<(std::ostream& os, const ThreadBlock<DIM>& block) { os << "ThreadBlock(extents: " << block.extents_ << ", pitches: " << block.pitches_ << ")"; return os; } template <int32_t DIM> std::ostream& operator<<(std::ostream& os, const ThreadBlocks<DIM>& blocks) { os << "ThreadBlocks(" << blocks.block_ << ", extents: " << blocks.extents_ << ", pitches: " << blocks.pitches_ << ", num concurrent blocks: " << blocks.num_blocks_ << ", dim order: {"; for (int32_t dim : blocks.dim_order_) os << dim << ", "; os << "})"; return os; } template <typename REDOP, typename CTOR, typename LHS, typename RHS, int32_t DIM> static __device__ __forceinline__ Point<DIM> local_reduce(CTOR ctor, LHS& result, AccessorRO<RHS, DIM> in, LHS identity, const ThreadBlocks<DIM>& blocks, const Rect<DIM>& domain, int32_t collapsed_dim) { const coord_t tid = threadIdx.x; const coord_t bid = blockIdx.x; Point<DIM> point = blocks.point(bid, tid, domain.lo); if (!domain.contains(point)) return point; while (point[collapsed_dim] <= domain.hi[collapsed_dim]) { LHS value = ctor(point, in[point], collapsed_dim); REDOP::template fold<true>(result, value); blocks.next_point(point); } #if __CUDA_ARCH__ >= 700 // If we're collapsing the innermost dimension, we perform some optimization // with shared memory to reduce memory traffic due to atomic updates if (collapsed_dim == DIM - 1) { __shared__ uint8_t shmem[THREADS_PER_BLOCK * sizeof(LHS)]; LHS* trampoline = reinterpret_cast<LHS*>(shmem); // Check for the case where all the threads in the same warp have // the same x value in which case they're all going to conflict // so instead we do a warp-level reduction so just one thread ends // up doing the full atomic coord_t bucket = 0; for (int32_t dim = DIM - 2; dim >= 0; --dim) bucket = bucket * (domain.hi[dim + 1] - domain.lo[dim + 1] + 1) + point[dim]; const uint32_t same_mask = __match_any_sync(0xffffffff, bucket); int32_t laneid; asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid)); const uint32_t active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid)); if (active_mask) { // Store our data into shared trampoline[tid] = result; // Make sure all the threads in the warp are done writing __syncwarp(active_mask); // Have the lowest thread in each mask pull in the values int32_t lowest_index = -1; for (int32_t i = 0; i < warpSize; i++) if (same_mask & (1 << i)) { if (lowest_index == -1) { if (i != laneid) { // We're not the lowest thread in the warp for // this value so we're done, set the value back // to identity to ensure that we don't try to // perform the reduction out to memory result = identity; break; } else // Make sure we don't do this test again lowest_index = i; // It was already our value, so just keep going } else { // Pull in the value from shared memory const int32_t index = tid + i - laneid; REDOP::template fold<true>(result, trampoline[index]); } } } } #endif return point; } template <typename REDOP, typename CTOR, typename LHS, typename RHS, int32_t DIM> static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) reduce_with_rw_acc(AccessorRW<LHS, DIM> out, AccessorRO<RHS, DIM> in, LHS identity, ThreadBlocks<DIM> blocks, Rect<DIM> domain, int32_t collapsed_dim) { auto result = identity; auto point = local_reduce<REDOP, CTOR, LHS, RHS, DIM>( CTOR{}, result, in, identity, blocks, domain, collapsed_dim); if (result != identity) REDOP::template fold<false>(out[point], result); } template <typename REDOP, typename CTOR, typename LHS, typename RHS, int32_t DIM> static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) reduce_with_rd_acc(AccessorRD<REDOP, false, DIM> out, AccessorRO<RHS, DIM> in, LHS identity, ThreadBlocks<DIM> blocks, Rect<DIM> domain, int32_t collapsed_dim) { auto result = identity; auto point = local_reduce<REDOP, CTOR, LHS, RHS, DIM>( CTOR{}, result, in, identity, blocks, domain, collapsed_dim); if (result != identity) out.reduce(point, result); } template <UnaryRedCode OP_CODE, LegateTypeCode CODE, int DIM> struct UnaryRedImplBody<VariantKind::GPU, OP_CODE, CODE, DIM> { using OP = UnaryRedOp<OP_CODE, CODE>; using LG_OP = typename OP::OP; using VAL = legate_type_of<CODE>; using CTOR = ValueConstructor<VAL, DIM>; void operator()(AccessorRD<LG_OP, false, DIM> lhs, AccessorRO<VAL, DIM> rhs, const Rect<DIM>& rect, const Pitches<DIM - 1>& pitches, int collapsed_dim, size_t volume) const { auto Kernel = reduce_with_rd_acc<LG_OP, CTOR, VAL, VAL, DIM>; ThreadBlocks<DIM> blocks; blocks.initialize(rect, collapsed_dim); blocks.compute_maximum_concurrency(reinterpret_cast<const void*>(Kernel)); hipLaunchKernelGGL(( Kernel), dim3(blocks.num_blocks()), dim3(blocks.num_threads()), 0, 0, lhs, rhs, LG_OP::identity, blocks, rect, collapsed_dim); } void operator()(AccessorRW<VAL, DIM> lhs, AccessorRO<VAL, DIM> rhs, const Rect<DIM>& rect, const Pitches<DIM - 1>& pitches, int collapsed_dim, size_t volume) const { auto Kernel = reduce_with_rw_acc<LG_OP, CTOR, VAL, VAL, DIM>; ThreadBlocks<DIM> blocks; blocks.initialize(rect, collapsed_dim); blocks.compute_maximum_concurrency(reinterpret_cast<const void*>(Kernel)); hipLaunchKernelGGL(( Kernel), dim3(blocks.num_blocks()), dim3(blocks.num_threads()), 0, 0, lhs, rhs, LG_OP::identity, blocks, rect, collapsed_dim); } }; template <UnaryRedCode OP_CODE, LegateTypeCode CODE, int DIM> struct ArgRedImplBody<VariantKind::GPU, OP_CODE, CODE, DIM> { using OP = UnaryRedOp<OP_CODE, CODE>; using LG_OP = typename OP::OP; using RHS = legate_type_of<CODE>; using LHS = Argval<RHS>; using CTOR = ArgvalConstructor<RHS, DIM>; void operator()(AccessorRD<LG_OP, false, DIM> lhs, AccessorRO<RHS, DIM> rhs, const Rect<DIM>& rect, const Pitches<DIM - 1>& pitches, int collapsed_dim, size_t volume) const { auto Kernel = reduce_with_rd_acc<LG_OP, CTOR, LHS, RHS, DIM>; ThreadBlocks<DIM> blocks; blocks.initialize(rect, collapsed_dim); blocks.compute_maximum_concurrency(reinterpret_cast<const void*>(Kernel)); hipLaunchKernelGGL(( Kernel), dim3(blocks.num_blocks()), dim3(blocks.num_threads()), 0, 0, lhs, rhs, LG_OP::identity, blocks, rect, collapsed_dim); } void operator()(AccessorRW<LHS, DIM> lhs, AccessorRO<RHS, DIM> rhs, const Rect<DIM>& rect, const Pitches<DIM - 1>& pitches, int collapsed_dim, size_t volume) const { auto Kernel = reduce_with_rw_acc<LG_OP, CTOR, LHS, RHS, DIM>; ThreadBlocks<DIM> blocks; blocks.initialize(rect, collapsed_dim); blocks.compute_maximum_concurrency(reinterpret_cast<const void*>(Kernel)); hipLaunchKernelGGL(( Kernel), dim3(blocks.num_blocks()), dim3(blocks.num_threads()), 0, 0, lhs, rhs, LG_OP::identity, blocks, rect, collapsed_dim); } }; /*static*/ void UnaryRedTask::gpu_variant(TaskContext& context) { unary_red_template<VariantKind::GPU>(context); } } // namespace cunumeric
8ca952f1817d2277128bee85ceee8830557df202.cu
/* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/unary/unary_red.h" #include "cunumeric/unary/unary_red_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { using namespace Legion; template <typename T> static constexpr T div_and_ceil(T value, T divider) { return std::max<T>((value + divider - 1) / divider, 1); } static constexpr coord_t WARP_SIZE = 32; // This helper class is to compute the shape of thread blocks for reduction kernels. // The strategy is to parallelize on dimensions, from the outermost one to the innermost, // that are not being collapsed, thereby having threads work on independet lanes of // reductions as much as possible. In case where the non-collapsing dimensions don't // have enough elements to be assigned to the threads, we also parallelize on // the collapsing domain. One exceptional case to this strategy is where the collapsing // dimension is the innermost one, in which case we prefer that dimension to the others // in order to enjoy wrap coalescing. The maximum degree of such parallelism woudl be 32, // which is the size of a wrap. template <int32_t DIM> struct ThreadBlock { void initialize(const Rect<DIM>& domain, int32_t collapsed_dim) { auto remaining = static_cast<coord_t>(THREADS_PER_BLOCK); Point<DIM> domain_extents; for (int32_t idx = 0; idx < DIM; ++idx) domain_extents[idx] = domain.hi[idx] - domain.lo[idx] + 1; // If the innermost dimension is being collapsed, we assign at least one warp to it // for warp coalsecing. if (collapsed_dim == DIM - 1) { auto extent = std::min<coord_t>(WARP_SIZE, domain_extents[collapsed_dim]); extents_[collapsed_dim] = extent; remaining = std::max<coord_t>(remaining / extent, 1); } // Then, we compute how many threads there should be along aech dimension, // excluding the one being collapsed for (int32_t idx = DIM - 1; idx >= 0; --idx) { if (idx == collapsed_dim) continue; auto extent = std::min(remaining, domain_extents[idx]); extents_[idx] = extent; remaining = std::max<coord_t>(remaining / extent, 1); } // Finally, we determine degree of parallelism for the collapsed dimension if we didn't above if (collapsed_dim != DIM - 1) extents_[collapsed_dim] = std::min(remaining, domain_extents[collapsed_dim]); // Cache the aggregate number of threads per increment in each dimension, // which later will be used for de-linearization of a thread id num_threads_ = 1; for (int32_t idx = DIM - 1; idx >= 0; --idx) { pitches_[idx] = num_threads_; num_threads_ *= extents_[idx]; } } // Compute a relative coordiate of a given thread __host__ __device__ Point<DIM> point(coord_t tid) const { Point<DIM> p; for (int32_t dim = 0; dim < DIM; ++dim) { p[dim] = tid / pitches_[dim]; tid = tid % pitches_[dim]; } return p; } // Total number of threads size_t num_threads_; // Number of threads along each dimension Point<DIM> extents_; // Aggregate number of threads per increment in each dimension Point<DIM> pitches_; }; // This class represents a set of concurrent thread blocks. Concurrent thread blocks form // hyperplanes in N-dimensional integer lattice such that the collapsed dimension is normal to them. // The size of thread blocks is determined by the maximum number of CTAs for a given kernel; // the number of concurrent thread blocks is the minimum number of hyperplanes whose aggregate // volume exceeds the maximum number of CTAs. template <int32_t DIM> struct ThreadBlocks { void initialize(const Rect<DIM>& domain, int32_t collapsed_dim) { collapsed_dim_ = collapsed_dim; block_.initialize(domain, collapsed_dim); for (int32_t idx = 0; idx < DIM; ++idx) { auto domain_extent = domain.hi[idx] - domain.lo[idx] + 1; extents_[idx] = div_and_ceil(domain_extent, block_.extents_[idx]); } // We want the collapsed dimension to be the outermost one when // de-linearizing the block id. dim_order_[0] = collapsed_dim_; for (int32_t dim = 0, idx = 1; dim < DIM; ++dim) if (dim != collapsed_dim_) dim_order_[idx++] = dim; // Compute the aggregate number of blocks per increment in each dimension coord_t num_blocks = 1; for (int32_t idx = DIM - 1; idx >= 0; --idx) { auto dim = dim_order_[idx]; pitches_[dim] = num_blocks; num_blocks *= extents_[dim]; } // For now we say all blocks can run concurrent. num_blocks_ = num_blocks; // Also compute the stride on the collapsed dimension collapsed_dim_stride_ = extents_[collapsed_dim_] * block_.extents_[collapsed_dim_]; } // De-linearized the linearized block id and thread it into an N-dimensional point __host__ __device__ Point<DIM> point(coord_t bid, coord_t tid, const Point<DIM>& origin) const { Point<DIM> p = origin; for (int32_t dim : dim_order_) { p[dim] += (bid / pitches_[dim]) * block_.extents_[dim]; bid = bid % pitches_[dim]; } p += block_.point(tid); return p; } void compute_maximum_concurrency(const void* func) { int32_t num_ctas = 0; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_ctas, func, num_threads(), 0); size_t plane_size = pitches_[collapsed_dim_]; // Calculate the number of planes whose volume barely exceeds the maximum number of CTAs size_t max_num_concurrent_planes = std::max<size_t>(div_and_ceil<size_t>(num_ctas, plane_size), 1); // Then we update the number of concurrent thread blocks and the stride on the collapsed // dimension num_blocks_ = plane_size * max_num_concurrent_planes; collapsed_dim_stride_ = max_num_concurrent_planes * block_.extents_[collapsed_dim_]; } __host__ __device__ inline void next_point(Point<DIM>& point) const { point[collapsed_dim_] += collapsed_dim_stride_; } constexpr size_t num_blocks() const { return num_blocks_; } constexpr size_t num_threads() const { return block_.num_threads_; } // List of dimensions, from the outermost one to the innermost int32_t dim_order_[DIM]; int32_t collapsed_dim_; coord_t collapsed_dim_stride_; // Shape of each thread block ThreadBlock<DIM> block_; // Number of thread blocks along each dimension Point<DIM> extents_; // Aggregate number of thread blocks per increment in each dimension Point<DIM> pitches_; // Number of concurrent thread blocks size_t num_blocks_; }; template <int32_t DIM> std::ostream& operator<<(std::ostream& os, const ThreadBlock<DIM>& block) { os << "ThreadBlock(extents: " << block.extents_ << ", pitches: " << block.pitches_ << ")"; return os; } template <int32_t DIM> std::ostream& operator<<(std::ostream& os, const ThreadBlocks<DIM>& blocks) { os << "ThreadBlocks(" << blocks.block_ << ", extents: " << blocks.extents_ << ", pitches: " << blocks.pitches_ << ", num concurrent blocks: " << blocks.num_blocks_ << ", dim order: {"; for (int32_t dim : blocks.dim_order_) os << dim << ", "; os << "})"; return os; } template <typename REDOP, typename CTOR, typename LHS, typename RHS, int32_t DIM> static __device__ __forceinline__ Point<DIM> local_reduce(CTOR ctor, LHS& result, AccessorRO<RHS, DIM> in, LHS identity, const ThreadBlocks<DIM>& blocks, const Rect<DIM>& domain, int32_t collapsed_dim) { const coord_t tid = threadIdx.x; const coord_t bid = blockIdx.x; Point<DIM> point = blocks.point(bid, tid, domain.lo); if (!domain.contains(point)) return point; while (point[collapsed_dim] <= domain.hi[collapsed_dim]) { LHS value = ctor(point, in[point], collapsed_dim); REDOP::template fold<true>(result, value); blocks.next_point(point); } #if __CUDA_ARCH__ >= 700 // If we're collapsing the innermost dimension, we perform some optimization // with shared memory to reduce memory traffic due to atomic updates if (collapsed_dim == DIM - 1) { __shared__ uint8_t shmem[THREADS_PER_BLOCK * sizeof(LHS)]; LHS* trampoline = reinterpret_cast<LHS*>(shmem); // Check for the case where all the threads in the same warp have // the same x value in which case they're all going to conflict // so instead we do a warp-level reduction so just one thread ends // up doing the full atomic coord_t bucket = 0; for (int32_t dim = DIM - 2; dim >= 0; --dim) bucket = bucket * (domain.hi[dim + 1] - domain.lo[dim + 1] + 1) + point[dim]; const uint32_t same_mask = __match_any_sync(0xffffffff, bucket); int32_t laneid; asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid)); const uint32_t active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid)); if (active_mask) { // Store our data into shared trampoline[tid] = result; // Make sure all the threads in the warp are done writing __syncwarp(active_mask); // Have the lowest thread in each mask pull in the values int32_t lowest_index = -1; for (int32_t i = 0; i < warpSize; i++) if (same_mask & (1 << i)) { if (lowest_index == -1) { if (i != laneid) { // We're not the lowest thread in the warp for // this value so we're done, set the value back // to identity to ensure that we don't try to // perform the reduction out to memory result = identity; break; } else // Make sure we don't do this test again lowest_index = i; // It was already our value, so just keep going } else { // Pull in the value from shared memory const int32_t index = tid + i - laneid; REDOP::template fold<true>(result, trampoline[index]); } } } } #endif return point; } template <typename REDOP, typename CTOR, typename LHS, typename RHS, int32_t DIM> static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) reduce_with_rw_acc(AccessorRW<LHS, DIM> out, AccessorRO<RHS, DIM> in, LHS identity, ThreadBlocks<DIM> blocks, Rect<DIM> domain, int32_t collapsed_dim) { auto result = identity; auto point = local_reduce<REDOP, CTOR, LHS, RHS, DIM>( CTOR{}, result, in, identity, blocks, domain, collapsed_dim); if (result != identity) REDOP::template fold<false>(out[point], result); } template <typename REDOP, typename CTOR, typename LHS, typename RHS, int32_t DIM> static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) reduce_with_rd_acc(AccessorRD<REDOP, false, DIM> out, AccessorRO<RHS, DIM> in, LHS identity, ThreadBlocks<DIM> blocks, Rect<DIM> domain, int32_t collapsed_dim) { auto result = identity; auto point = local_reduce<REDOP, CTOR, LHS, RHS, DIM>( CTOR{}, result, in, identity, blocks, domain, collapsed_dim); if (result != identity) out.reduce(point, result); } template <UnaryRedCode OP_CODE, LegateTypeCode CODE, int DIM> struct UnaryRedImplBody<VariantKind::GPU, OP_CODE, CODE, DIM> { using OP = UnaryRedOp<OP_CODE, CODE>; using LG_OP = typename OP::OP; using VAL = legate_type_of<CODE>; using CTOR = ValueConstructor<VAL, DIM>; void operator()(AccessorRD<LG_OP, false, DIM> lhs, AccessorRO<VAL, DIM> rhs, const Rect<DIM>& rect, const Pitches<DIM - 1>& pitches, int collapsed_dim, size_t volume) const { auto Kernel = reduce_with_rd_acc<LG_OP, CTOR, VAL, VAL, DIM>; ThreadBlocks<DIM> blocks; blocks.initialize(rect, collapsed_dim); blocks.compute_maximum_concurrency(reinterpret_cast<const void*>(Kernel)); Kernel<<<blocks.num_blocks(), blocks.num_threads()>>>( lhs, rhs, LG_OP::identity, blocks, rect, collapsed_dim); } void operator()(AccessorRW<VAL, DIM> lhs, AccessorRO<VAL, DIM> rhs, const Rect<DIM>& rect, const Pitches<DIM - 1>& pitches, int collapsed_dim, size_t volume) const { auto Kernel = reduce_with_rw_acc<LG_OP, CTOR, VAL, VAL, DIM>; ThreadBlocks<DIM> blocks; blocks.initialize(rect, collapsed_dim); blocks.compute_maximum_concurrency(reinterpret_cast<const void*>(Kernel)); Kernel<<<blocks.num_blocks(), blocks.num_threads()>>>( lhs, rhs, LG_OP::identity, blocks, rect, collapsed_dim); } }; template <UnaryRedCode OP_CODE, LegateTypeCode CODE, int DIM> struct ArgRedImplBody<VariantKind::GPU, OP_CODE, CODE, DIM> { using OP = UnaryRedOp<OP_CODE, CODE>; using LG_OP = typename OP::OP; using RHS = legate_type_of<CODE>; using LHS = Argval<RHS>; using CTOR = ArgvalConstructor<RHS, DIM>; void operator()(AccessorRD<LG_OP, false, DIM> lhs, AccessorRO<RHS, DIM> rhs, const Rect<DIM>& rect, const Pitches<DIM - 1>& pitches, int collapsed_dim, size_t volume) const { auto Kernel = reduce_with_rd_acc<LG_OP, CTOR, LHS, RHS, DIM>; ThreadBlocks<DIM> blocks; blocks.initialize(rect, collapsed_dim); blocks.compute_maximum_concurrency(reinterpret_cast<const void*>(Kernel)); Kernel<<<blocks.num_blocks(), blocks.num_threads()>>>( lhs, rhs, LG_OP::identity, blocks, rect, collapsed_dim); } void operator()(AccessorRW<LHS, DIM> lhs, AccessorRO<RHS, DIM> rhs, const Rect<DIM>& rect, const Pitches<DIM - 1>& pitches, int collapsed_dim, size_t volume) const { auto Kernel = reduce_with_rw_acc<LG_OP, CTOR, LHS, RHS, DIM>; ThreadBlocks<DIM> blocks; blocks.initialize(rect, collapsed_dim); blocks.compute_maximum_concurrency(reinterpret_cast<const void*>(Kernel)); Kernel<<<blocks.num_blocks(), blocks.num_threads()>>>( lhs, rhs, LG_OP::identity, blocks, rect, collapsed_dim); } }; /*static*/ void UnaryRedTask::gpu_variant(TaskContext& context) { unary_red_template<VariantKind::GPU>(context); } } // namespace cunumeric
a8ff32bd4e52468a271bf0d8d62697fb59d56814.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <string> #include <stdio.h> #include <stdlib.h> #include <iterator> #include <algorithm> #include <random> #include <math.h> using namespace std; struct Ion { float x, y; }; void poblar(Ion iones[]) { FILE *in = fopen("dataset", "r"); for (int i = 0; i < 5000; i++) { fscanf(in, "%f %f", &iones[i].x, &iones[i.y]); // cout << iones[i] << " " << iones[6000+i] << endl; } fclose(in); //sort(iones, iones + 5000, ionCompare); } __host__ __device__ float distanciaEuclidiana(Ion a, int x, int y) { float d = sqrtf(powf(a.x - x, 2) + powf(a.y - y, 2)); return d > 0.0 ? d : 1.0; // evitar divisin por 0 } __global__ void calcular_carga(Ion ion, float* cargas, int cantidad, int tIdO) { int tId = threadIdx.x + blockIdx.x * blockDim.x; int tIdC = tIdO + tId; if(tId < 200 * 200 && tIdC >= 0 && tIdC < 8192 * 8192) { int signo_x = (tId / (200) < 100) * (-1) + (tId / (200) >= 100) * (1); int signo_y = (tId % (200) < 100) * (1) + (tId % (200) >= 100) * (-1); int a = (tId / 200) * signo_x; int b = (tId % 200) * signo_y; int valido_x = !(ion.x + a < 0 || ion.x + a > 8192) * 1; int valido_y = !(ion.y + b < 0 || ion.y + b > 8192) * 1; float dist = distanciaEuclidiana(ion, a, b); atomicAdd(&cargas[tIdC], valido_x * valido_y * (dist <= 100.0) * (1.0 / dist)); //if(cargas[tIdC] > 0.0) // printf("%f ", cargas[tIdC]); } } __global__ void vertices_cercanos(Ion iones[], float* cargas, int cantidad) { int tId = threadIdx.x + blockIdx.x * blockDim.x; if(tId < cantidad) { int block_size = 256; int grid_size = (int) ceil( (float) 200 * 200 / block_size); hipLaunchKernelGGL(( calcular_carga), dim3(grid_size), dim3(block_size), 0, 0, iones[tId], cargas, cantidad, tId); } } __global__ void posicionar_ion(Ion iones[], float* cargas, int cantidad) { int tId = threadIdx.x + blockIdx.x * blockDim.x; float Q_menor = 100000000000; int a; int b; if(tId < 1) { for (int i = 0; i < 8192*3; i+=3) { if(cargas[i] < Q_menor){ Q_menor = cargas[i]; a = cargas[i+1]; b = cargas[i+2]; } } iones[cantidad].x = a; iones[cantidad].y = b; } } int main(int argc, char const *argv[]) { Ion iones[6000]; poblar(iones); Ion *gpu_iones; float *cargas; hipEvent_t ct1, ct2; float dt; int cantidad; hipMalloc(&gpu_iones, sizeof(Ion) * 6000); hipMalloc(&cargas, sizeof(float*) * 8192 * 8192); hipMemcpy(gpu_iones, iones, sizeof(Ion) * 6000, hipMemcpyHostToDevice); hipEventCreate(&ct1); hipEventCreate(&ct2); hipEventRecord(ct1); for (cantidad = 5000; cantidad < 5009; cantidad++) { int block_size = 256; int grid_size = (int) ceil( (float) cantidad / block_size); cout << "Calculando carga para " << cantidad << endl; hipLaunchKernelGGL(( vertices_cercanos), dim3(grid_size), dim3(block_size), 0, 0, gpu_iones, cargas, cantidad); hipDeviceSynchronize(); grid_size = (int) ceil( (float) 1 / block_size); hipLaunchKernelGGL(( posicionar_ion), dim3(grid_size), dim3(block_size), 0, 0, iones, cargas, cantidad); hipDeviceSynchronize(); hipMemcpy(iones, gpu_iones,sizeof(Ion) * 6000, hipMemcpyDeviceToHost); cout << iones[cantidad].x << " " << iones[cantidad].y << endl; } hipEventRecord(ct2); hipEventSynchronize(ct2); hipEventElapsedTime(&dt, ct1, ct2); cout << "Tiempo: " << dt << "[ms]" << '\n'; hipFree(gpu_iones); return 0; } // Aqu yacen los restos de la grandiosa idea de ordenar los puntos y encontrar los que pertenecen a la circunferencia con bsqueda binaria /* __host__ __device__ int busquedaBinaria(Ion iones[], int x, int y, int l, int r, float dist) { int m; while(l < r) { m = (l + r) / 2; if(distanciaEuclidiana(iones[m], x, y) < dist) r = m - 1; else if(distanciaEuclidiana(iones[m], x, y) > dist) l = m + 1; else if(distanciaEuclidiana(iones[m], x, y) == dist) break; } return m; } */ /* bool ionCompare(Ion const & a, Ion const & b) { return a.x < b.x || (a.y <= b.y && a.x == b.x); } */ /* __global__ void posicionar_ion(float* cargas) { int tId = threadIdx.x + blockIdx.x * blockDim.x; if(tId < 8192*8192) { int a = tId/8192; int b = tId%8192; minimo_cuda = (cargas[tId] < minimo_cuda) * cargas[tId] + (cargas[tId] >= minimo_cuda) * minimo_cuda; minimo_x_cuda = (cargas[tId] < minimo_cuda) * a + (cargas[tId] >= minimo_cuda) * minimo_x_cuda; minimo_y_cuda = (cargas[tId] < minimo_cuda) * b + (cargas[tId] >= minimo_cuda) * minimo_y_cuda; if(cargas[tId] > 0.0) printf("MIRA MAMA, SIN MANOS: %f %f %d %d\n", cargas[tId], minimo_cuda, minimo_x_cuda, minimo_y_cuda ); } } */
a8ff32bd4e52468a271bf0d8d62697fb59d56814.cu
#include <iostream> #include <fstream> #include <string> #include <stdio.h> #include <stdlib.h> #include <iterator> #include <algorithm> #include <random> #include <math.h> using namespace std; struct Ion { float x, y; }; void poblar(Ion iones[]) { FILE *in = fopen("dataset", "r"); for (int i = 0; i < 5000; i++) { fscanf(in, "%f %f", &iones[i].x, &iones[i.y]); // cout << iones[i] << " " << iones[6000+i] << endl; } fclose(in); //sort(iones, iones + 5000, ionCompare); } __host__ __device__ float distanciaEuclidiana(Ion a, int x, int y) { float d = sqrtf(powf(a.x - x, 2) + powf(a.y - y, 2)); return d > 0.0 ? d : 1.0; // evitar división por 0 } __global__ void calcular_carga(Ion ion, float* cargas, int cantidad, int tIdO) { int tId = threadIdx.x + blockIdx.x * blockDim.x; int tIdC = tIdO + tId; if(tId < 200 * 200 && tIdC >= 0 && tIdC < 8192 * 8192) { int signo_x = (tId / (200) < 100) * (-1) + (tId / (200) >= 100) * (1); int signo_y = (tId % (200) < 100) * (1) + (tId % (200) >= 100) * (-1); int a = (tId / 200) * signo_x; int b = (tId % 200) * signo_y; int valido_x = !(ion.x + a < 0 || ion.x + a > 8192) * 1; int valido_y = !(ion.y + b < 0 || ion.y + b > 8192) * 1; float dist = distanciaEuclidiana(ion, a, b); atomicAdd(&cargas[tIdC], valido_x * valido_y * (dist <= 100.0) * (1.0 / dist)); //if(cargas[tIdC] > 0.0) // printf("%f ", cargas[tIdC]); } } __global__ void vertices_cercanos(Ion iones[], float* cargas, int cantidad) { int tId = threadIdx.x + blockIdx.x * blockDim.x; if(tId < cantidad) { int block_size = 256; int grid_size = (int) ceil( (float) 200 * 200 / block_size); calcular_carga<<<grid_size, block_size>>>(iones[tId], cargas, cantidad, tId); } } __global__ void posicionar_ion(Ion iones[], float* cargas, int cantidad) { int tId = threadIdx.x + blockIdx.x * blockDim.x; float Q_menor = 100000000000; int a; int b; if(tId < 1) { for (int i = 0; i < 8192*3; i+=3) { if(cargas[i] < Q_menor){ Q_menor = cargas[i]; a = cargas[i+1]; b = cargas[i+2]; } } iones[cantidad].x = a; iones[cantidad].y = b; } } int main(int argc, char const *argv[]) { Ion iones[6000]; poblar(iones); Ion *gpu_iones; float *cargas; cudaEvent_t ct1, ct2; float dt; int cantidad; cudaMalloc(&gpu_iones, sizeof(Ion) * 6000); cudaMalloc(&cargas, sizeof(float*) * 8192 * 8192); cudaMemcpy(gpu_iones, iones, sizeof(Ion) * 6000, cudaMemcpyHostToDevice); cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); for (cantidad = 5000; cantidad < 5009; cantidad++) { int block_size = 256; int grid_size = (int) ceil( (float) cantidad / block_size); cout << "Calculando carga para " << cantidad << endl; vertices_cercanos<<<grid_size, block_size>>>(gpu_iones, cargas, cantidad); cudaDeviceSynchronize(); grid_size = (int) ceil( (float) 1 / block_size); posicionar_ion<<<grid_size, block_size>>>(iones, cargas, cantidad); cudaDeviceSynchronize(); cudaMemcpy(iones, gpu_iones,sizeof(Ion) * 6000, cudaMemcpyDeviceToHost); cout << iones[cantidad].x << " " << iones[cantidad].y << endl; } cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); cout << "Tiempo: " << dt << "[ms]" << '\n'; cudaFree(gpu_iones); return 0; } // Aquí yacen los restos de la grandiosa idea de ordenar los puntos y encontrar los que pertenecen a la circunferencia con búsqueda binaria /* __host__ __device__ int busquedaBinaria(Ion iones[], int x, int y, int l, int r, float dist) { int m; while(l < r) { m = (l + r) / 2; if(distanciaEuclidiana(iones[m], x, y) < dist) r = m - 1; else if(distanciaEuclidiana(iones[m], x, y) > dist) l = m + 1; else if(distanciaEuclidiana(iones[m], x, y) == dist) break; } return m; } */ /* bool ionCompare(Ion const & a, Ion const & b) { return a.x < b.x || (a.y <= b.y && a.x == b.x); } */ /* __global__ void posicionar_ion(float* cargas) { int tId = threadIdx.x + blockIdx.x * blockDim.x; if(tId < 8192*8192) { int a = tId/8192; int b = tId%8192; minimo_cuda = (cargas[tId] < minimo_cuda) * cargas[tId] + (cargas[tId] >= minimo_cuda) * minimo_cuda; minimo_x_cuda = (cargas[tId] < minimo_cuda) * a + (cargas[tId] >= minimo_cuda) * minimo_x_cuda; minimo_y_cuda = (cargas[tId] < minimo_cuda) * b + (cargas[tId] >= minimo_cuda) * minimo_y_cuda; if(cargas[tId] > 0.0) printf("MIRA MAMA, SIN MANOS: %f %f %d %d\n", cargas[tId], minimo_cuda, minimo_x_cuda, minimo_y_cuda ); } } */
1f705c973e2a5ef7a5e4528cdcf3502e849189e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : backpropagate.cu Author : Christophoros Bekos ([email protected]) Version : Copyright : @ copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include <stdio.h> #define work_per_block 100 #define threads_per_warp 32 #define threads_per_warp 32 __device__ void sigmoid(float& z) { z = 1.0 / (1.0 + exp(-(z))); } __device__ void hadamard_product_small(float* sh_a, float* sh_b, float* sh_res, int multiplier, int size, int mult) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int block_id = blockIdx.x; // start the computations int cnt = 0; for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { sh_res[i * mult] = sh_b[i] * sh_a[i] * ((int) (i < size)); cnt++; } // result is stored in sh_b vector\ //done } __device__ void array_sum_small(float* sha, float& result, int size, int start) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; // start the computations for (int i = threads_per_warp; i < work_per_block; i = i * 2) { // switch 1 : even warps add their's neighbors contents switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // add the "more next vector" sha[thread_id] = sha[thread_id] + sha[i + thread_id] * ((int) (start + thread_id + i < size)); break; default: // thread_id % i == odd // do nothing break; } __syncthreads(); // switch2 : odd warps clean up their content switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // do nothing break; default: // thread_id % i == odd // clean up sha[thread_id] = 0; //__syncthreads(); break; } __syncthreads(); } // loop ended, sha[0:threads_per_warp] got the sum if (thread_id == 0) { for (int i = 0; i < threads_per_warp; i++) { result = result + sha[i]; sha[i] = 0; } } } __device__ void backpropagate_some_cols(float* result, int rows_per_block, int col_length, float* matrix, float* vector, int last_block, int size, float* sigm_der) { // README : // each block uses rows threads // each block modifies rows columns ( cols columns per block) // each thread modifies one column , column's length is col_length // cols : number of columns that this block will modify // one last block has less job to do, this one takes parameter last_block == 1 // and size (after index exceeds size in last block, no computation must be made) int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int block_id = blockIdx.x; extern __shared__ float shared[]; float* temp = shared; float* m = &temp[rows_per_block]; float* v = &m[col_length * rows_per_block]; float* res = &v[col_length * rows_per_block]; // move data in shared memory for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { m[i] = matrix[i]; } v[thread_id] = 0; v[thread_id] = vector[thread_id] * (thread_id < col_length); __syncthreads(); int cnt = 0; for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { m[i] = m[i] * v[cnt]; cnt++; } __syncthreads(); temp[thread_id] = 0; for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { temp[thread_id] += m[i]; } __syncthreads(); result[thread_id] = temp[thread_id] * sigm_der[thread_id]; } __global__ void backpropagate(float* result, int rows_per_block, int col_length, float* matrix, float* vector, int last_block, int size, float* sigm_der) { int block_id = blockIdx.y * gridDim.x + blockIdx.x; int thread_id = threadIdx.y * blockDim.x + threadIdx.x; backpropagate_some_cols(&result[block_id * rows_per_block], rows_per_block, col_length, &matrix[block_id * rows_per_block], vector, (block_id == last_block), size, &sigm_der[block_id * rows_per_block]); } void initialize(float *data, unsigned size, float arg) { for (unsigned i = 0; i < size; ++i) { data[i] = arg; } } void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new, float* sigm_der, float* w); int main(void) { int rows = 783; int cols = 30; float *w = new float[rows * cols]; float *d_old = new float[cols]; float *delta = new float[rows]; float *delta_gpu = new float[rows]; float* sigm_der = new float[rows]; float *m, *v, *new_delta, *sigm_der_gpu; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { w[i * cols + j] = 1.2; } } initialize(d_old, cols, 1.5); initialize(sigm_der, rows, 1.6); hipMalloc((void**) &m, sizeof(float) * (rows * cols)); hipMalloc((void**) &v, sizeof(float) * cols); hipMalloc((void**) &new_delta, sizeof(float) * rows); hipMalloc((void**) &sigm_der_gpu, sizeof(float) * rows); hipMemcpy(m, w, sizeof(float) * (rows * cols), hipMemcpyHostToDevice); hipMemcpy(v, d_old, sizeof(float) * cols, hipMemcpyHostToDevice); hipMemcpy(sigm_der_gpu, sigm_der, sizeof(float) * rows, hipMemcpyHostToDevice); int numofthreads = work_per_block; int rows_per_block = numofthreads; int col_length = cols; int last_block = floor(rows / work_per_block); float cache = 11000 * sizeof(float); int num_of_blocks = floor(rows / work_per_block) + 1; int size_for_last_block = rows - floor(rows / work_per_block) * numofthreads; // printf("aaaa %d \n", num_of_blocks); // BACKPROPAGATE FOR 1 ITERATION // IN GPU //printf("sadfa %d ",size_for_last_block); hipLaunchKernelGGL(( backpropagate), dim3(num_of_blocks), dim3(rows_per_block), cache, 0, new_delta, rows_per_block, col_length, m, v, last_block, size_for_last_block, sigm_der_gpu); hipDeviceSynchronize(); hipMemcpy(delta_gpu, new_delta, sizeof(float) * rows, hipMemcpyDeviceToHost); // IN CPU cpu_backpropagate(d_old, rows, cols, &delta, sigm_der, w); // COMPARE RESULTS int success = 1; for (int i = 0; i < rows; i++) { // printf("kappa %f \n", delta[i]); if (delta[i] != delta_gpu[i]) { printf("ERROR in a, cpu = %f, gpu = %f\n", delta[i], delta_gpu[i]); success = 0; } } /* Free memory */ hipFree(new_delta); hipFree(m); hipFree(v); if (success) { printf("SUCCESS \n"); } return 0; } float* hadamard_product(int size, float* a, float* b) { // returns the datamard product for vectors a and b // (return a.*b in matlab) // size = length of arrays a and b float* result = new float[size]; for (int i = 0; i < size; i++) { result[i] = a[i] * b[i]; } return result; } float* mull_backpropagate(int rows, int cols, float* matrix, float* vector) { // TESTED // returns "rows x 1" vector float* temp = NULL; float* res = new float[rows]; for (int j = 0; j < rows; j++) { temp = hadamard_product(cols, &matrix[j * cols], vector); res[j] = 0; for (int i = 0; i < cols; i++) { res[j] += temp[i]; } delete[] temp; } return res; } void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new, float* sigm_der, float* w) { float* w_d; w_d = mull_backpropagate(rows, cols, w, d_L); d_new[0] = hadamard_product(rows, w_d, sigm_der); delete[] w_d; }
1f705c973e2a5ef7a5e4528cdcf3502e849189e4.cu
/* ============================================================================ Name : backpropagate.cu Author : Christophoros Bekos ([email protected]) Version : Copyright : @ copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include <stdio.h> #define work_per_block 100 #define threads_per_warp 32 #define threads_per_warp 32 __device__ void sigmoid(float& z) { z = 1.0 / (1.0 + exp(-(z))); } __device__ void hadamard_product_small(float* sh_a, float* sh_b, float* sh_res, int multiplier, int size, int mult) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int block_id = blockIdx.x; // start the computations int cnt = 0; for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { sh_res[i * mult] = sh_b[i] * sh_a[i] * ((int) (i < size)); cnt++; } // result is stored in sh_b vector\ //done } __device__ void array_sum_small(float* sha, float& result, int size, int start) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; // start the computations for (int i = threads_per_warp; i < work_per_block; i = i * 2) { // switch 1 : even warps add their's neighbors contents switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // add the "more next vector" sha[thread_id] = sha[thread_id] + sha[i + thread_id] * ((int) (start + thread_id + i < size)); break; default: // thread_id % i == odd // do nothing break; } __syncthreads(); // switch2 : odd warps clean up their content switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // do nothing break; default: // thread_id % i == odd // clean up sha[thread_id] = 0; //__syncthreads(); break; } __syncthreads(); } // loop ended, sha[0:threads_per_warp] got the sum if (thread_id == 0) { for (int i = 0; i < threads_per_warp; i++) { result = result + sha[i]; sha[i] = 0; } } } __device__ void backpropagate_some_cols(float* result, int rows_per_block, int col_length, float* matrix, float* vector, int last_block, int size, float* sigm_der) { // README : // each block uses rows threads // each block modifies rows columns ( cols columns per block) // each thread modifies one column , column's length is col_length // cols : number of columns that this block will modify // one last block has less job to do, this one takes parameter last_block == 1 // and size (after index exceeds size in last block, no computation must be made) int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int block_id = blockIdx.x; extern __shared__ float shared[]; float* temp = shared; float* m = &temp[rows_per_block]; float* v = &m[col_length * rows_per_block]; float* res = &v[col_length * rows_per_block]; // move data in shared memory for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { m[i] = matrix[i]; } v[thread_id] = 0; v[thread_id] = vector[thread_id] * (thread_id < col_length); __syncthreads(); int cnt = 0; for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { m[i] = m[i] * v[cnt]; cnt++; } __syncthreads(); temp[thread_id] = 0; for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { temp[thread_id] += m[i]; } __syncthreads(); result[thread_id] = temp[thread_id] * sigm_der[thread_id]; } __global__ void backpropagate(float* result, int rows_per_block, int col_length, float* matrix, float* vector, int last_block, int size, float* sigm_der) { int block_id = blockIdx.y * gridDim.x + blockIdx.x; int thread_id = threadIdx.y * blockDim.x + threadIdx.x; backpropagate_some_cols(&result[block_id * rows_per_block], rows_per_block, col_length, &matrix[block_id * rows_per_block], vector, (block_id == last_block), size, &sigm_der[block_id * rows_per_block]); } void initialize(float *data, unsigned size, float arg) { for (unsigned i = 0; i < size; ++i) { data[i] = arg; } } void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new, float* sigm_der, float* w); int main(void) { int rows = 783; int cols = 30; float *w = new float[rows * cols]; float *d_old = new float[cols]; float *delta = new float[rows]; float *delta_gpu = new float[rows]; float* sigm_der = new float[rows]; float *m, *v, *new_delta, *sigm_der_gpu; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { w[i * cols + j] = 1.2; } } initialize(d_old, cols, 1.5); initialize(sigm_der, rows, 1.6); cudaMalloc((void**) &m, sizeof(float) * (rows * cols)); cudaMalloc((void**) &v, sizeof(float) * cols); cudaMalloc((void**) &new_delta, sizeof(float) * rows); cudaMalloc((void**) &sigm_der_gpu, sizeof(float) * rows); cudaMemcpy(m, w, sizeof(float) * (rows * cols), cudaMemcpyHostToDevice); cudaMemcpy(v, d_old, sizeof(float) * cols, cudaMemcpyHostToDevice); cudaMemcpy(sigm_der_gpu, sigm_der, sizeof(float) * rows, cudaMemcpyHostToDevice); int numofthreads = work_per_block; int rows_per_block = numofthreads; int col_length = cols; int last_block = floor(rows / work_per_block); float cache = 11000 * sizeof(float); int num_of_blocks = floor(rows / work_per_block) + 1; int size_for_last_block = rows - floor(rows / work_per_block) * numofthreads; // printf("aaaa %d \n", num_of_blocks); // BACKPROPAGATE FOR 1 ITERATION // IN GPU //printf("sadfa %d ",size_for_last_block); backpropagate<<<num_of_blocks, rows_per_block, cache>>>(new_delta, rows_per_block, col_length, m, v, last_block, size_for_last_block, sigm_der_gpu); cudaDeviceSynchronize(); cudaMemcpy(delta_gpu, new_delta, sizeof(float) * rows, cudaMemcpyDeviceToHost); // IN CPU cpu_backpropagate(d_old, rows, cols, &delta, sigm_der, w); // COMPARE RESULTS int success = 1; for (int i = 0; i < rows; i++) { // printf("kappa %f \n", delta[i]); if (delta[i] != delta_gpu[i]) { printf("ERROR in a, cpu = %f, gpu = %f\n", delta[i], delta_gpu[i]); success = 0; } } /* Free memory */ cudaFree(new_delta); cudaFree(m); cudaFree(v); if (success) { printf("SUCCESS \n"); } return 0; } float* hadamard_product(int size, float* a, float* b) { // returns the datamard product for vectors a and b // (return a.*b in matlab) // size = length of arrays a and b float* result = new float[size]; for (int i = 0; i < size; i++) { result[i] = a[i] * b[i]; } return result; } float* mull_backpropagate(int rows, int cols, float* matrix, float* vector) { // TESTED // returns "rows x 1" vector float* temp = NULL; float* res = new float[rows]; for (int j = 0; j < rows; j++) { temp = hadamard_product(cols, &matrix[j * cols], vector); res[j] = 0; for (int i = 0; i < cols; i++) { res[j] += temp[i]; } delete[] temp; } return res; } void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new, float* sigm_der, float* w) { float* w_d; w_d = mull_backpropagate(rows, cols, w, d_L); d_new[0] = hadamard_product(rows, w_d, sigm_der); delete[] w_d; }
3e97ef2e4b529f50bbb09f12fcc7f4baf513c2ab.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> class cuStopwatch{ // todo: add your internal data structure, all in private private: hipEvent_t startEvent; hipEvent_t endEvent; bool started; public: cuStopwatch(); ~cuStopwatch(); void start(); float stop(); }; cuStopwatch::cuStopwatch(){ // todo: constructor hipEventCreate(&startEvent); hipEventCreate(&endEvent); started = false; } cuStopwatch::~cuStopwatch(){ // todo: destructor hipEventDestroy(startEvent); hipEventDestroy(endEvent); } void cuStopwatch::start(){ // todo: start the stopwatch, and ignore double start if(!started) { hipEventRecord(startEvent); started = true; } } float cuStopwatch::stop(){ // todo: stop the stopwatch and return elapsed time, ignore invalid stops (e.g. stop when not yet started or double stop) if(!started) { return -1; } hipEventSynchronize(startEvent); hipEventRecord(endEvent); hipEventSynchronize(endEvent); float ms; hipEventElapsedTime(&ms, startEvent, endEvent); started = false; return ms; }
3e97ef2e4b529f50bbb09f12fcc7f4baf513c2ab.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> class cuStopwatch{ // todo: add your internal data structure, all in private private: cudaEvent_t startEvent; cudaEvent_t endEvent; bool started; public: cuStopwatch(); ~cuStopwatch(); void start(); float stop(); }; cuStopwatch::cuStopwatch(){ // todo: constructor cudaEventCreate(&startEvent); cudaEventCreate(&endEvent); started = false; } cuStopwatch::~cuStopwatch(){ // todo: destructor cudaEventDestroy(startEvent); cudaEventDestroy(endEvent); } void cuStopwatch::start(){ // todo: start the stopwatch, and ignore double start if(!started) { cudaEventRecord(startEvent); started = true; } } float cuStopwatch::stop(){ // todo: stop the stopwatch and return elapsed time, ignore invalid stops (e.g. stop when not yet started or double stop) if(!started) { return -1; } cudaEventSynchronize(startEvent); cudaEventRecord(endEvent); cudaEventSynchronize(endEvent); float ms; cudaEventElapsedTime(&ms, startEvent, endEvent); started = false; return ms; }
e77fa87d661a59338ed4bb226eeaa410851ee3a5.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "radix.h" #include "efficient.h" namespace StreamCompaction { namespace Radix { /* * Get INVERTED `d`th digit of odata[k]. */ __global__ void kDigit(int n, int d, int *dv_odata, const int *dv_idata) { int k = threadIdx.x; if (k >= n) { return; } dv_odata[k] = (dv_idata[k] & (1 << d)) > 0 ? 1 : 0; } __global__ void kInvert(int n, int *odata, const int *idata) { int k = threadIdx.x; if (k >= n) { return; } odata[k] = idata[k] == 0 ? 1 : 0; } __global__ void kMapToIndex(int n, int *odata, int *b, int *f_indices, int pivot) { int k = threadIdx.x; if (k >= n) { return; } odata[k] = (b[k] == 1) ? (k - f_indices[k] + pivot) : f_indices[k]; } /* * Implement split on device memory. * Returns totalFalses (eg. the split point). */ __host__ int split(int n, int d, int *dv_odata, int *dv_idata) { printf("---- split %d %d ----\n", n, d); int array_size = n * sizeof(int); int *TMP = (int*)malloc(array_size); int *b; int *e; int *t; int *indices; hipMalloc((void**) &b, array_size); hipMalloc((void**) &e, array_size); hipMalloc((void**) &t, array_size); hipMalloc((void**) &indices, array_size); hipLaunchKernelGGL(( kDigit), dim3(1), dim3(n), 0, 0, n, d, b, dv_idata); // b printf("b: "); hipMemcpy(TMP, b, array_size, hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) { printf("%d\t", TMP[i]); } printf("\n"); hipLaunchKernelGGL(( kInvert), dim3(1), dim3(n), 0, 0, n, e, b); // e printf("e: "); hipMemcpy(TMP, e, array_size, hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) { printf("%d\t", TMP[i]); } printf("\n"); int lastElt; hipMemcpy(&lastElt, e + n-1, sizeof(int), hipMemcpyDeviceToHost); StreamCompaction::Efficient::dv_scan(n, e); // f IN PLACE OF e int totalFalses; hipMemcpy(&totalFalses, e + n-1, sizeof(int), hipMemcpyDeviceToHost); totalFalses += lastElt; printf("totalFalses = %d\n", totalFalses); hipLaunchKernelGGL(( kMapToIndex), dim3(1), dim3(n), 0, 0, n, indices, b, e, totalFalses); printf("indices: "); hipMemcpy(TMP, indices, array_size, hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) { printf("%d\t", TMP[i]); } printf("\n"); hipLaunchKernelGGL(( StreamCompaction::Common::kernScatter), dim3(1), dim3(n), 0, 0, n, dv_odata, indices, dv_idata); // scatter printf("scattered: "); hipMemcpy(TMP, dv_odata, array_size, hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) { printf("%d\t", TMP[i]); } printf("\n"); hipFree(b); return totalFalses; } int testArrayOrder(int n, int *a) { for (int i = 0; i < n-1; i++) { if (a[i] > a[i+1]) { return 1; } } return 0; } /* * odata and idata are device memory points. */ __host__ void sortRecursive(int n, int d, int dmax, int *odata, int *idata) { if (d >= dmax) { return; } int pivot = split(n, d, odata, idata); //sortRecursive(n, d+1, dmax, odata, odata); //if (pivot != 0) { // sortRecursive(pivot, d+1, dmax, odata, odata); //} //if (pivot != n) { // sortRecursive(n-pivot, d+1, dmax, odata+n, odata+n); //} } __host__ void sortRecursive2(int n, int d, int dmax, int *odata, int *idata) { if (d <= 0) { return; } int pivot = split(n, d, odata, idata); if (pivot != 0) { sortRecursive(pivot, d-1, dmax, odata, odata); } if (pivot != n) { sortRecursive(n-pivot, d-1, dmax, odata+n, odata+n); } } __host__ void sort(int n, int *odata, const int *idata) { int max = idata[0]; for (int i = 0; i < n; i++) { if (idata[i] > max) { max = idata[i]; } } int maxDigits = ilog2ceil(max); int *dv_odata; int *dv_idata; int array_size = n * sizeof(int); hipMalloc((void**) &dv_odata, array_size); hipMalloc((void**) &dv_idata, array_size); hipMemcpy(dv_idata, idata, array_size, hipMemcpyHostToDevice); //sortRecursive(n, 0, maxDigits, dv_odata, dv_idata); sortRecursive2(n, 0, maxDigits, dv_odata, dv_idata); hipMemcpy(odata, dv_odata, array_size, hipMemcpyDeviceToHost); //for (int i = 0; i < n; i++) { printf("%d\t%d\n", idata[i], odata[i]); } hipFree(dv_odata); hipFree(dv_idata); } } }
e77fa87d661a59338ed4bb226eeaa410851ee3a5.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "radix.h" #include "efficient.h" namespace StreamCompaction { namespace Radix { /* * Get INVERTED `d`th digit of odata[k]. */ __global__ void kDigit(int n, int d, int *dv_odata, const int *dv_idata) { int k = threadIdx.x; if (k >= n) { return; } dv_odata[k] = (dv_idata[k] & (1 << d)) > 0 ? 1 : 0; } __global__ void kInvert(int n, int *odata, const int *idata) { int k = threadIdx.x; if (k >= n) { return; } odata[k] = idata[k] == 0 ? 1 : 0; } __global__ void kMapToIndex(int n, int *odata, int *b, int *f_indices, int pivot) { int k = threadIdx.x; if (k >= n) { return; } odata[k] = (b[k] == 1) ? (k - f_indices[k] + pivot) : f_indices[k]; } /* * Implement split on device memory. * Returns totalFalses (eg. the split point). */ __host__ int split(int n, int d, int *dv_odata, int *dv_idata) { printf("---- split %d %d ----\n", n, d); int array_size = n * sizeof(int); int *TMP = (int*)malloc(array_size); int *b; int *e; int *t; int *indices; cudaMalloc((void**) &b, array_size); cudaMalloc((void**) &e, array_size); cudaMalloc((void**) &t, array_size); cudaMalloc((void**) &indices, array_size); kDigit<<<1, n>>>(n, d, b, dv_idata); // b printf("b: "); cudaMemcpy(TMP, b, array_size, cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) { printf("%d\t", TMP[i]); } printf("\n"); kInvert<<<1, n>>>(n, e, b); // e printf("e: "); cudaMemcpy(TMP, e, array_size, cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) { printf("%d\t", TMP[i]); } printf("\n"); int lastElt; cudaMemcpy(&lastElt, e + n-1, sizeof(int), cudaMemcpyDeviceToHost); StreamCompaction::Efficient::dv_scan(n, e); // f IN PLACE OF e int totalFalses; cudaMemcpy(&totalFalses, e + n-1, sizeof(int), cudaMemcpyDeviceToHost); totalFalses += lastElt; printf("totalFalses = %d\n", totalFalses); kMapToIndex<<<1, n>>>(n, indices, b, e, totalFalses); printf("indices: "); cudaMemcpy(TMP, indices, array_size, cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) { printf("%d\t", TMP[i]); } printf("\n"); StreamCompaction::Common::kernScatter<<<1, n>>>(n, dv_odata, indices, dv_idata); // scatter printf("scattered: "); cudaMemcpy(TMP, dv_odata, array_size, cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) { printf("%d\t", TMP[i]); } printf("\n"); cudaFree(b); return totalFalses; } int testArrayOrder(int n, int *a) { for (int i = 0; i < n-1; i++) { if (a[i] > a[i+1]) { return 1; } } return 0; } /* * odata and idata are device memory points. */ __host__ void sortRecursive(int n, int d, int dmax, int *odata, int *idata) { if (d >= dmax) { return; } int pivot = split(n, d, odata, idata); //sortRecursive(n, d+1, dmax, odata, odata); //if (pivot != 0) { // sortRecursive(pivot, d+1, dmax, odata, odata); //} //if (pivot != n) { // sortRecursive(n-pivot, d+1, dmax, odata+n, odata+n); //} } __host__ void sortRecursive2(int n, int d, int dmax, int *odata, int *idata) { if (d <= 0) { return; } int pivot = split(n, d, odata, idata); if (pivot != 0) { sortRecursive(pivot, d-1, dmax, odata, odata); } if (pivot != n) { sortRecursive(n-pivot, d-1, dmax, odata+n, odata+n); } } __host__ void sort(int n, int *odata, const int *idata) { int max = idata[0]; for (int i = 0; i < n; i++) { if (idata[i] > max) { max = idata[i]; } } int maxDigits = ilog2ceil(max); int *dv_odata; int *dv_idata; int array_size = n * sizeof(int); cudaMalloc((void**) &dv_odata, array_size); cudaMalloc((void**) &dv_idata, array_size); cudaMemcpy(dv_idata, idata, array_size, cudaMemcpyHostToDevice); //sortRecursive(n, 0, maxDigits, dv_odata, dv_idata); sortRecursive2(n, 0, maxDigits, dv_odata, dv_idata); cudaMemcpy(odata, dv_odata, array_size, cudaMemcpyDeviceToHost); //for (int i = 0; i < n; i++) { printf("%d\t%d\n", idata[i], odata[i]); } cudaFree(dv_odata); cudaFree(dv_idata); } } }
773d154b2b98f541d93eb1cf57919d4326aef113.hip
// !!! This is a file automatically generated by hipify!!! // Copyright Naoki Shibata and contributors 2010 - 2021. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <float.h> #include <inttypes.h> #include <stdarg.h> #include <ctype.h> #include <assert.h> #include <hip/hip_runtime.h> #include "sleefquadinline_cuda.h" #include "sleefquadinline_purec_scalar.h" #define STDIN_FILENO 0 // static int startsWith(const char *str, const char *prefix) { while(*prefix != '\0') if (*str++ != *prefix++) return 0; return *prefix == '\0'; } static double u2d(uint64_t u) { union { double f; uint64_t i; } tmp; tmp.i = u; return tmp.f; } static uint64_t d2u(double d) { union { double f; uint64_t i; } tmp; tmp.f = d; return tmp.i; } // __global__ void xaddq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_addq1_u05cuda(*a0, *a1); } __global__ void xsubq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_subq1_u05cuda(*a0, *a1); } __global__ void xmulq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_mulq1_u05cuda(*a0, *a1); } __global__ void xdivq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_divq1_u05cuda(*a0, *a1); } __global__ void xnegq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_negq1_cuda(*a0); } __global__ void xicmpltq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpltq1_cuda(*a0, *a1); } __global__ void xicmpgtq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpgtq1_cuda(*a0, *a1); } __global__ void xicmpleq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpleq1_cuda(*a0, *a1); } __global__ void xicmpgeq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpgeq1_cuda(*a0, *a1); } __global__ void xicmpeqq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpeqq1_cuda(*a0, *a1); } __global__ void xicmpneq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpneq1_cuda(*a0, *a1); } __global__ void xicmpq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpq1_cuda(*a0, *a1); } __global__ void xiunordq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_iunordq1_cuda(*a0, *a1); } __global__ void xcast_from_doubleq(Sleef_quadx1 *r0, double *d0) { *r0 = Sleef_cast_from_doubleq1_cuda(*d0); } __global__ void xcast_to_doubleq(double *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_doubleq1_cuda(*a0); } __global__ void xcast_from_int64q(Sleef_quadx1 *r0, int64_t *i0) { *r0 = Sleef_cast_from_int64q1_cuda(*i0); } __global__ void xcast_to_int64q(int64_t *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_int64q1_cuda(*a0); } __global__ void xcast_from_uint64q(Sleef_quadx1 *r0, uint64_t *u0) { *r0 = Sleef_cast_from_uint64q1_cuda(*u0); } __global__ void xcast_to_uint64q(uint64_t *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_uint64q1_cuda(*a0); } __global__ void xsqrtq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sqrtq1_u05cuda(*a0); } __global__ void xcbrtq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_cbrtq1_u10cuda(*a0); } __global__ void xsinq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sinq1_u10cuda(*a0); } __global__ void xcosq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_cosq1_u10cuda(*a0); } __global__ void xtanq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_tanq1_u10cuda(*a0); } __global__ void xasinq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_asinq1_u10cuda(*a0); } __global__ void xacosq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_acosq1_u10cuda(*a0); } __global__ void xatanq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_atanq1_u10cuda(*a0); } __global__ void xatan2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_atan2q1_u10cuda(*a0, *a1); } __global__ void xexpq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_expq1_u10cuda(*a0); } __global__ void xexp2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_exp2q1_u10cuda(*a0); } __global__ void xexp10q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_exp10q1_u10cuda(*a0); } __global__ void xexpm1q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_expm1q1_u10cuda(*a0); } __global__ void xlogq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_logq1_u10cuda(*a0); } __global__ void xlog2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log2q1_u10cuda(*a0); } __global__ void xlog10q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log10q1_u10cuda(*a0); } __global__ void xlog1pq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log1pq1_u10cuda(*a0); } __global__ void xpowq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_powq1_u10cuda(*a0, *a1); } __global__ void xsinhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sinhq1_u10cuda(*a0); } __global__ void xcoshq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_coshq1_u10cuda(*a0); } __global__ void xtanhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_tanhq1_u10cuda(*a0); } __global__ void xasinhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_asinhq1_u10cuda(*a0); } __global__ void xacoshq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_acoshq1_u10cuda(*a0); } __global__ void xatanhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_atanhq1_u10cuda(*a0); } __global__ void xfabsq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_fabsq1_cuda(*a0); } __global__ void xcopysignq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_copysignq1_cuda(*a0, *a1); } __global__ void xfmaxq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fmaxq1_cuda(*a0, *a1); } __global__ void xfminq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fminq1_cuda(*a0, *a1); } __global__ void xfdimq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fdimq1_u05cuda(*a0, *a1); } __global__ void xfmodq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fmodq1_cuda(*a0, *a1); } __global__ void xremainderq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_remainderq1_cuda(*a0, *a1); } __global__ void xfrexpq(Sleef_quadx1 *r, Sleef_quadx1 *a0, int *i0) { *r = Sleef_frexpq1_cuda(*a0, i0); } __global__ void xmodfq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_modfq1_cuda(*a0, a1); } __global__ void xfmaq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1, Sleef_quadx1 *a2) { *r = Sleef_fmaq1_u05cuda(*a0, *a1, *a2); } __global__ void xhypotq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_hypotq1_u05cuda(*a0, *a1); } __global__ void xilogbq(int *r, Sleef_quadx1 *a0) { *r = Sleef_ilogbq1_cuda(*a0); } __global__ void xldexpq(Sleef_quadx1 *r, Sleef_quadx1 *a0, int *i0) { *r = Sleef_ldexpq1_cuda(*a0, *i0); } __global__ void xtruncq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_truncq1_cuda(*a0); } __global__ void xfloorq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_floorq1_cuda(*a0); } __global__ void xceilq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_ceilq1_cuda(*a0); } __global__ void xroundq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_roundq1_cuda(*a0); } __global__ void xrintq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_rintq1_cuda(*a0); } // typedef union { Sleef_quad q; struct { uint64_t l, h; }; } cnv128; #define BUFSIZE 1024 #define func_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, a0); \ hipDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l, &c1.h, &c1.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, a0, a1); \ hipDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1, c2; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, \ &c0.h, &c0.l, &c1.h, &c1.l, &c2.h, &c2.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \ *a2 = Sleef_setq1_cuda(*a2, 0, c2.q); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, a0, a1, a2); \ hipDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_i_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, i0, a0); \ hipDeviceSynchronize(); \ printf("%d\n", *i0); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_i_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l, &c1.h, &c1.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, i0, a0, a1); \ hipDeviceSynchronize(); \ printf("%d\n", *i0); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_i(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ int k; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %d", &c0.h, &c0.l, &k); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *i0 = k; \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, a0, i0); \ hipDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_d_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, d0, a0); \ hipDeviceSynchronize(); \ printf("%" PRIx64 "\n", d2u(*d0)); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_d(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ uint64_t u; \ sscanf(buf, funcStr " %" PRIx64, &u); \ *d0 = u2d(u); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, d0); \ hipDeviceSynchronize(); \ cnv128 c0; \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_i64_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, i64, a0); \ hipDeviceSynchronize(); \ printf("%" PRIx64 "\n", *i64); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_i64(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ sscanf(buf, funcStr " %" PRIx64, i64); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, i64); \ hipDeviceSynchronize(); \ cnv128 c0; \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_u64_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, u64, a0); \ hipDeviceSynchronize(); \ printf("%" PRIx64 "\n", *u64); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_u64(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ sscanf(buf, funcStr " %" PRIx64, u64); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, u64); \ hipDeviceSynchronize(); \ cnv128 c0; \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_pi(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, a0, i0); \ hipDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 " %d\n", c0.h, c0.l, *i0); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_pq(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, a0, a1); \ hipDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ c1.q = Sleef_getq1_cuda(*a1, 0); \ printf("%" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l, c1.h, c1.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } int main(int argc, char **argv) { #if 0 hipInit(0); int ndevice; hipGetDeviceCount(&ndevice); if (ndevice == 0) { fprintf(stderr, "No cuda device available\n"); exit(0); } hipDevice_t device; char deviceName[1024]; hipDeviceGet(&device, 0); hipDeviceGetName(deviceName, 1000, device); fprintf(stderr, "Device : %s\n", deviceName); #endif hipSetDeviceFlags(hipDeviceScheduleSpin); Sleef_quadx1 *r, *a0, *a1, *a2; double *d0; int *i0; int64_t *i64; uint64_t *u64; hipMallocManaged(&r , 1*sizeof(Sleef_quadx1)); hipMallocManaged(&a0, 1*sizeof(Sleef_quadx1)); hipMallocManaged(&a1, 1*sizeof(Sleef_quadx1)); hipMallocManaged(&a2, 1*sizeof(Sleef_quadx1)); hipMallocManaged(&d0, 1*sizeof(double)); hipMallocManaged(&i0, 1*sizeof(int)); hipMallocManaged(&i64, 1*sizeof(int64_t)); hipMallocManaged(&u64, 1*sizeof(uint64_t)); // printf("1\n"); fflush(stdout); // { *a0 = Sleef_setq1_cuda(*a0, 0, SLEEF_M_PIq); *a1 = Sleef_setq1_cuda(*a1, 0, Sleef_strtoq("2.718281828459045235360287471352662498", NULL)); hipLaunchKernelGGL(( xmulq_u05), dim3(1), dim3(1), 0, 0, r, a0, a1); hipDeviceSynchronize(); Sleef_quad v0 = Sleef_getq1_cuda(*r, 0); if (Sleef_icmpneq1_purec(v0, sleef_q(+0x1114580b45d47LL, 0x49e6108579a2d0caULL, 3))) { fprintf(stderr, "Testing with Sleef_mulq1_u05cuda failed\n"); exit(-1); } } // char buf[BUFSIZE]; if (fgets(buf, BUFSIZE-1, stdin)) {} int sentinel = 0; while(!feof(stdin) && sentinel < 2) { func_q_q_q("addq_u05", xaddq_u05); func_q_q_q("subq_u05", xsubq_u05); func_q_q_q("mulq_u05", xmulq_u05); func_q_q_q("divq_u05", xdivq_u05); func_q_q("sqrtq_u05", xsqrtq_u05); func_q_q("cbrtq_u10", xcbrtq_u10); func_q_q("sinq_u10", xsinq_u10); func_q_q("cosq_u10", xcosq_u10); func_q_q("tanq_u10", xtanq_u10); func_q_q("asinq_u10", xasinq_u10); func_q_q("acosq_u10", xacosq_u10); func_q_q("atanq_u10", xatanq_u10); func_q_q_q("atan2q_u10", xatan2q_u10); func_q_q("expq_u10", xexpq_u10); func_q_q("exp2q_u10", xexp2q_u10); func_q_q("exp10q_u10", xexp10q_u10); func_q_q("expm1q_u10", xexpm1q_u10); func_q_q("logq_u10", xlogq_u10); func_q_q("log2q_u10", xlog2q_u10); func_q_q("log10q_u10", xlog10q_u10); func_q_q("log1pq_u10", xlog1pq_u10); func_q_q_q("powq_u10", xpowq_u10); func_q_q("sinhq_u10", xsinhq_u10); func_q_q("coshq_u10", xcoshq_u10); func_q_q("tanhq_u10", xtanhq_u10); func_q_q("asinhq_u10", xasinhq_u10); func_q_q("acoshq_u10", xacoshq_u10); func_q_q("atanhq_u10", xatanhq_u10); func_q_q("negq", xnegq); func_q_q("fabsq", xfabsq); func_q_q_q("copysignq", xcopysignq); func_q_q_q("fmaxq", xfmaxq); func_q_q_q("fminq", xfminq); func_q_q_q("fdimq_u05", xfdimq_u05); func_q_q_q("fmodq", xfmodq); func_q_q_q("remainderq", xremainderq); func_q_q_pi("frexpq", xfrexpq); func_q_q_pq("modfq", xmodfq); func_i_q("ilogbq", xilogbq); func_q_q_i("ldexpq", xldexpq); func_q_q_q_q("fmaq_u05", xfmaq_u05); func_q_q_q("hypotq_u05", xhypotq_u05); func_q_q("truncq", xtruncq); func_q_q("floorq", xfloorq); func_q_q("ceilq", xceilq); func_q_q("roundq", xroundq); func_q_q("rintq", xrintq); func_q_d("cast_from_doubleq", xcast_from_doubleq); func_d_q("cast_to_doubleq", xcast_to_doubleq); func_q_i64("cast_from_int64q", xcast_from_int64q); func_i64_q("cast_to_int64q", xcast_to_int64q); func_q_u64("cast_from_uint64q", xcast_from_uint64q); func_u64_q("cast_to_uint64q", xcast_to_uint64q); func_i_q_q("icmpltq", xicmpltq); func_i_q_q("icmpgtq", xicmpgtq); func_i_q_q("icmpleq", xicmpleq); func_i_q_q("icmpgeq", xicmpgeq); func_i_q_q("icmpeqq", xicmpeqq); func_i_q_q("icmpneq", xicmpneq); func_i_q_q("icmpq", xicmpq); func_i_q_q("iunordq", xiunordq); sentinel++; } // return 0; }
773d154b2b98f541d93eb1cf57919d4326aef113.cu
// Copyright Naoki Shibata and contributors 2010 - 2021. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <float.h> #include <inttypes.h> #include <stdarg.h> #include <ctype.h> #include <assert.h> #include <cuda.h> #include "sleefquadinline_cuda.h" #include "sleefquadinline_purec_scalar.h" #define STDIN_FILENO 0 // static int startsWith(const char *str, const char *prefix) { while(*prefix != '\0') if (*str++ != *prefix++) return 0; return *prefix == '\0'; } static double u2d(uint64_t u) { union { double f; uint64_t i; } tmp; tmp.i = u; return tmp.f; } static uint64_t d2u(double d) { union { double f; uint64_t i; } tmp; tmp.f = d; return tmp.i; } // __global__ void xaddq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_addq1_u05cuda(*a0, *a1); } __global__ void xsubq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_subq1_u05cuda(*a0, *a1); } __global__ void xmulq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_mulq1_u05cuda(*a0, *a1); } __global__ void xdivq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_divq1_u05cuda(*a0, *a1); } __global__ void xnegq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_negq1_cuda(*a0); } __global__ void xicmpltq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpltq1_cuda(*a0, *a1); } __global__ void xicmpgtq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpgtq1_cuda(*a0, *a1); } __global__ void xicmpleq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpleq1_cuda(*a0, *a1); } __global__ void xicmpgeq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpgeq1_cuda(*a0, *a1); } __global__ void xicmpeqq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpeqq1_cuda(*a0, *a1); } __global__ void xicmpneq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpneq1_cuda(*a0, *a1); } __global__ void xicmpq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpq1_cuda(*a0, *a1); } __global__ void xiunordq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_iunordq1_cuda(*a0, *a1); } __global__ void xcast_from_doubleq(Sleef_quadx1 *r0, double *d0) { *r0 = Sleef_cast_from_doubleq1_cuda(*d0); } __global__ void xcast_to_doubleq(double *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_doubleq1_cuda(*a0); } __global__ void xcast_from_int64q(Sleef_quadx1 *r0, int64_t *i0) { *r0 = Sleef_cast_from_int64q1_cuda(*i0); } __global__ void xcast_to_int64q(int64_t *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_int64q1_cuda(*a0); } __global__ void xcast_from_uint64q(Sleef_quadx1 *r0, uint64_t *u0) { *r0 = Sleef_cast_from_uint64q1_cuda(*u0); } __global__ void xcast_to_uint64q(uint64_t *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_uint64q1_cuda(*a0); } __global__ void xsqrtq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sqrtq1_u05cuda(*a0); } __global__ void xcbrtq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_cbrtq1_u10cuda(*a0); } __global__ void xsinq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sinq1_u10cuda(*a0); } __global__ void xcosq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_cosq1_u10cuda(*a0); } __global__ void xtanq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_tanq1_u10cuda(*a0); } __global__ void xasinq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_asinq1_u10cuda(*a0); } __global__ void xacosq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_acosq1_u10cuda(*a0); } __global__ void xatanq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_atanq1_u10cuda(*a0); } __global__ void xatan2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_atan2q1_u10cuda(*a0, *a1); } __global__ void xexpq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_expq1_u10cuda(*a0); } __global__ void xexp2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_exp2q1_u10cuda(*a0); } __global__ void xexp10q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_exp10q1_u10cuda(*a0); } __global__ void xexpm1q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_expm1q1_u10cuda(*a0); } __global__ void xlogq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_logq1_u10cuda(*a0); } __global__ void xlog2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log2q1_u10cuda(*a0); } __global__ void xlog10q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log10q1_u10cuda(*a0); } __global__ void xlog1pq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log1pq1_u10cuda(*a0); } __global__ void xpowq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_powq1_u10cuda(*a0, *a1); } __global__ void xsinhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sinhq1_u10cuda(*a0); } __global__ void xcoshq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_coshq1_u10cuda(*a0); } __global__ void xtanhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_tanhq1_u10cuda(*a0); } __global__ void xasinhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_asinhq1_u10cuda(*a0); } __global__ void xacoshq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_acoshq1_u10cuda(*a0); } __global__ void xatanhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_atanhq1_u10cuda(*a0); } __global__ void xfabsq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_fabsq1_cuda(*a0); } __global__ void xcopysignq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_copysignq1_cuda(*a0, *a1); } __global__ void xfmaxq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fmaxq1_cuda(*a0, *a1); } __global__ void xfminq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fminq1_cuda(*a0, *a1); } __global__ void xfdimq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fdimq1_u05cuda(*a0, *a1); } __global__ void xfmodq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fmodq1_cuda(*a0, *a1); } __global__ void xremainderq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_remainderq1_cuda(*a0, *a1); } __global__ void xfrexpq(Sleef_quadx1 *r, Sleef_quadx1 *a0, int *i0) { *r = Sleef_frexpq1_cuda(*a0, i0); } __global__ void xmodfq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_modfq1_cuda(*a0, a1); } __global__ void xfmaq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1, Sleef_quadx1 *a2) { *r = Sleef_fmaq1_u05cuda(*a0, *a1, *a2); } __global__ void xhypotq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_hypotq1_u05cuda(*a0, *a1); } __global__ void xilogbq(int *r, Sleef_quadx1 *a0) { *r = Sleef_ilogbq1_cuda(*a0); } __global__ void xldexpq(Sleef_quadx1 *r, Sleef_quadx1 *a0, int *i0) { *r = Sleef_ldexpq1_cuda(*a0, *i0); } __global__ void xtruncq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_truncq1_cuda(*a0); } __global__ void xfloorq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_floorq1_cuda(*a0); } __global__ void xceilq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_ceilq1_cuda(*a0); } __global__ void xroundq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_roundq1_cuda(*a0); } __global__ void xrintq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_rintq1_cuda(*a0); } // typedef union { Sleef_quad q; struct { uint64_t l, h; }; } cnv128; #define BUFSIZE 1024 #define func_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(r, a0); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l, &c1.h, &c1.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \ funcName<<<1, 1>>>(r, a0, a1); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1, c2; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, \ &c0.h, &c0.l, &c1.h, &c1.l, &c2.h, &c2.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \ *a2 = Sleef_setq1_cuda(*a2, 0, c2.q); \ funcName<<<1, 1>>>(r, a0, a1, a2); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_i_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(i0, a0); \ cudaDeviceSynchronize(); \ printf("%d\n", *i0); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_i_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l, &c1.h, &c1.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \ funcName<<<1, 1>>>(i0, a0, a1); \ cudaDeviceSynchronize(); \ printf("%d\n", *i0); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_i(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ int k; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %d", &c0.h, &c0.l, &k); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *i0 = k; \ funcName<<<1, 1>>>(r, a0, i0); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_d_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(d0, a0); \ cudaDeviceSynchronize(); \ printf("%" PRIx64 "\n", d2u(*d0)); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_d(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ uint64_t u; \ sscanf(buf, funcStr " %" PRIx64, &u); \ *d0 = u2d(u); \ funcName<<<1, 1>>>(r, d0); \ cudaDeviceSynchronize(); \ cnv128 c0; \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_i64_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(i64, a0); \ cudaDeviceSynchronize(); \ printf("%" PRIx64 "\n", *i64); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_i64(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ sscanf(buf, funcStr " %" PRIx64, i64); \ funcName<<<1, 1>>>(r, i64); \ cudaDeviceSynchronize(); \ cnv128 c0; \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_u64_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(u64, a0); \ cudaDeviceSynchronize(); \ printf("%" PRIx64 "\n", *u64); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_u64(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ sscanf(buf, funcStr " %" PRIx64, u64); \ funcName<<<1, 1>>>(r, u64); \ cudaDeviceSynchronize(); \ cnv128 c0; \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_pi(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(r, a0, i0); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 " %d\n", c0.h, c0.l, *i0); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_pq(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(r, a0, a1); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ c1.q = Sleef_getq1_cuda(*a1, 0); \ printf("%" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l, c1.h, c1.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } int main(int argc, char **argv) { #if 0 cuInit(0); int ndevice; cuDeviceGetCount(&ndevice); if (ndevice == 0) { fprintf(stderr, "No cuda device available\n"); exit(0); } CUdevice device; char deviceName[1024]; cuDeviceGet(&device, 0); cuDeviceGetName(deviceName, 1000, device); fprintf(stderr, "Device : %s\n", deviceName); #endif cudaSetDeviceFlags(cudaDeviceScheduleSpin); Sleef_quadx1 *r, *a0, *a1, *a2; double *d0; int *i0; int64_t *i64; uint64_t *u64; cudaMallocManaged(&r , 1*sizeof(Sleef_quadx1)); cudaMallocManaged(&a0, 1*sizeof(Sleef_quadx1)); cudaMallocManaged(&a1, 1*sizeof(Sleef_quadx1)); cudaMallocManaged(&a2, 1*sizeof(Sleef_quadx1)); cudaMallocManaged(&d0, 1*sizeof(double)); cudaMallocManaged(&i0, 1*sizeof(int)); cudaMallocManaged(&i64, 1*sizeof(int64_t)); cudaMallocManaged(&u64, 1*sizeof(uint64_t)); // printf("1\n"); fflush(stdout); // { *a0 = Sleef_setq1_cuda(*a0, 0, SLEEF_M_PIq); *a1 = Sleef_setq1_cuda(*a1, 0, Sleef_strtoq("2.718281828459045235360287471352662498", NULL)); xmulq_u05<<<1, 1>>>(r, a0, a1); cudaDeviceSynchronize(); Sleef_quad v0 = Sleef_getq1_cuda(*r, 0); if (Sleef_icmpneq1_purec(v0, sleef_q(+0x1114580b45d47LL, 0x49e6108579a2d0caULL, 3))) { fprintf(stderr, "Testing with Sleef_mulq1_u05cuda failed\n"); exit(-1); } } // char buf[BUFSIZE]; if (fgets(buf, BUFSIZE-1, stdin)) {} int sentinel = 0; while(!feof(stdin) && sentinel < 2) { func_q_q_q("addq_u05", xaddq_u05); func_q_q_q("subq_u05", xsubq_u05); func_q_q_q("mulq_u05", xmulq_u05); func_q_q_q("divq_u05", xdivq_u05); func_q_q("sqrtq_u05", xsqrtq_u05); func_q_q("cbrtq_u10", xcbrtq_u10); func_q_q("sinq_u10", xsinq_u10); func_q_q("cosq_u10", xcosq_u10); func_q_q("tanq_u10", xtanq_u10); func_q_q("asinq_u10", xasinq_u10); func_q_q("acosq_u10", xacosq_u10); func_q_q("atanq_u10", xatanq_u10); func_q_q_q("atan2q_u10", xatan2q_u10); func_q_q("expq_u10", xexpq_u10); func_q_q("exp2q_u10", xexp2q_u10); func_q_q("exp10q_u10", xexp10q_u10); func_q_q("expm1q_u10", xexpm1q_u10); func_q_q("logq_u10", xlogq_u10); func_q_q("log2q_u10", xlog2q_u10); func_q_q("log10q_u10", xlog10q_u10); func_q_q("log1pq_u10", xlog1pq_u10); func_q_q_q("powq_u10", xpowq_u10); func_q_q("sinhq_u10", xsinhq_u10); func_q_q("coshq_u10", xcoshq_u10); func_q_q("tanhq_u10", xtanhq_u10); func_q_q("asinhq_u10", xasinhq_u10); func_q_q("acoshq_u10", xacoshq_u10); func_q_q("atanhq_u10", xatanhq_u10); func_q_q("negq", xnegq); func_q_q("fabsq", xfabsq); func_q_q_q("copysignq", xcopysignq); func_q_q_q("fmaxq", xfmaxq); func_q_q_q("fminq", xfminq); func_q_q_q("fdimq_u05", xfdimq_u05); func_q_q_q("fmodq", xfmodq); func_q_q_q("remainderq", xremainderq); func_q_q_pi("frexpq", xfrexpq); func_q_q_pq("modfq", xmodfq); func_i_q("ilogbq", xilogbq); func_q_q_i("ldexpq", xldexpq); func_q_q_q_q("fmaq_u05", xfmaq_u05); func_q_q_q("hypotq_u05", xhypotq_u05); func_q_q("truncq", xtruncq); func_q_q("floorq", xfloorq); func_q_q("ceilq", xceilq); func_q_q("roundq", xroundq); func_q_q("rintq", xrintq); func_q_d("cast_from_doubleq", xcast_from_doubleq); func_d_q("cast_to_doubleq", xcast_to_doubleq); func_q_i64("cast_from_int64q", xcast_from_int64q); func_i64_q("cast_to_int64q", xcast_to_int64q); func_q_u64("cast_from_uint64q", xcast_from_uint64q); func_u64_q("cast_to_uint64q", xcast_to_uint64q); func_i_q_q("icmpltq", xicmpltq); func_i_q_q("icmpgtq", xicmpgtq); func_i_q_q("icmpleq", xicmpleq); func_i_q_q("icmpgeq", xicmpgeq); func_i_q_q("icmpeqq", xicmpeqq); func_i_q_q("icmpneq", xicmpneq); func_i_q_q("icmpq", xicmpq); func_i_q_q("iunordq", xiunordq); sentinel++; } // return 0; }
721658832fe6eef4ad94bb00e84c70a910678fc6.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // CUDA sample demonstrating a integer GEMM computation using the Warp Matrix // Multiply and Accumulate API. // In this program, the compute_gemm kernel computes the result of a matrix // multiplication and addition: D = alpha * A * B + beta * C. The dimensions of // both C and D matrices are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x // K_GLOBAL (row-major), the B matrix is K_GLOBAL x N_GLOBAL (column-major). In // that kernel, each CTA computes one 128 x 128 tile of the resulting matrix per // iteration. When the tile is computed, the CTA stores it to the global memory // and begins a new iteration, selecting a new 128 x 128 tile to compute. // Each CTA consists of eight warps. For the 128 x 128 tile, each warp computes // eight 16 x 16 subtiles, organized in a 2 x 4 two-dimensional array. Warps // compute the 16 x 16 subtiles using nvcuda::wmma::mma_sync operations by // moving through the K_GLOBAL dimension of the A and B matrices and // accumulating the intermediate result in the local thread state. // There are a number of simple optimizations used in the algorithm: // - The CTA copies the 128 x 128 tile of the C matrix from the global memory to // shared memory. After that is done, each warp loads the C matrix fragments // from shared memory, thus avoiding a random global memory access. // - On each internal iteration, the CTA copies a portion of the A and B // matrices from // global memory to shared memory. After that, all warps in the CTA reuse the // A and B data from shared memory, thus reducing the number of data copies // from global memory. // - The portions of the A and B matrices are stored in shared memory with an // additional // padding (skew) to reduce the number of shared memory access bank conflicts. // (See a detailed explanation near the SKEW_HALF macro definition.) // - When the CTA finishes computing the tiles of the resulting matrix, each // warp stores // its subtiles to shared memory. The CTA then copies the shared memory // contents to global memory, again avoiding redundant random global memory // accesses. // - Note that the CTA tile size is chosen to maximize the GPU register // utilization, // but carefully enough to avoid local memory use. #include <assert.h> #include <hip/hip_runtime.h> #include <mma.h> #include <stdio.h> // helper functions and utilities to work with CUDA #include <helper_cuda.h> #include <helper_functions.h> // Externally configurable parameters. #ifndef SHARED_MEMORY_LIMIT_64K // Set this to 0 to use more than 64 Kb of shared memory to cache data, to // improve the performance of the computations on GPU. // Note that you need a GPU that can have more than 64 Kb of shared memory // per multiprocessor. #define SHARED_MEMORY_LIMIT_64K 1 #endif // GPU configuration. #define WARP_SIZE 32 // MMA matrix tile dimensions. #define M 8 #define N 8 #define K 128 // GEMM configuration. #define M_TILES 1024 #define N_TILES 1024 #define K_TILES 512 #define M_GLOBAL (M * M_TILES) #define N_GLOBAL (N * N_TILES) #define K_GLOBAL (K * K_TILES) #define C_LAYOUT wmma::mem_row_major // Implementation constants. #define WARPS_PER_BLOCK 8 #define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK) #if SHARED_MEMORY_LIMIT_64K // With only 64 Kb shared memory available, we can fit two 8-tile chunks of // the A and B matrix data, that are 16 * 16 * 8 * 8 * 2 = 32 Kb each // (i.e. two 8x8 arrays of tiles of 16x16 uint8_t-typed elements per CTA). // But we cannot account the 8 Kb total skew overhead, without which the // performance would be severely impacted. So we choose to reduce the chunk size // in half, i.e. the amount of A and B matrix data we cache in shared memory. // Accordingly, this doubles the number of outer iterations across the global K // dimension, which only slightly impacts the performance. #define CHUNK_K 8 #else #define CHUNK_K 16 #endif #define CHUNK_LINE_BYTES (CHUNK_K * sizeof(int4)) #define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4)) #define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES) #define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP) #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS) #define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS) #define GLOBAL_MEM_STRIDE N_GLOBAL #define SHMEM_STRIDE (N * BLOCK_ROW_TILES) #define SHMEM_OFFSET (N * WARP_ROW_TILES) // The macro below is used to shift rows of the A matrix and columns of the B // matrix in shared memory to minimize possible bank conflicts. Before // performing the nvcuda::wmma::mma_sync operation, the warp must load the // matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the // memory access pattern is not specified for that function, each lane in the // warp can read one or multiple matrix elements from different matrix rows or // columns. For shared memory, such access can result in bank conflicts if // different rows / columns of the matrix map to the same bank. By shifting each // row and column by a few bytes, we make sure that they map to different banks, // thus reducing the number of possible bank conflicts. The number of 32 // one-byte "uint8_t" elements is chosen as the minimum possible shift because // we must keep each row and column 256-bit aligned, as required by // nvcuda::wmma::load_matrix_sync. #define SKEW 2 // Updated for int4 #define checkKernelErrors(expr) \ do { \ expr; \ \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ hipGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; __global__ void compute_gemm_imma(const int4 *A, const int4 *B, int *D) { extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // Each CTA slides along the 128 x 128 tiles from the top left corner of the // matrix to the right and down, and selects the next tile to compute. Once // there's no such tile, all warps in this CTA exit. for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = (block_pos * BLOCK_ROW_TILES * BLOCK_COL_TILES) / N_TILES; const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_TILES) { break; } // These fragments will accumulate the result of A and B matrix fragment // multiplications along the K_GLOBAL dimension. wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j = 0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); // warpId = 4; // printf("warpId == 4: %d", (warpId == 4)); // if ((warpId == (unsigned int)4) && (laneId > 0)) { // printf("warpId: %d, laneId: %d. ckpt1\n", warpId, laneId); // } // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const int4 *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * (K_GLOBAL/128)] + M * (K_GLOBAL/128) * (warpId % 4) * 2) : (&B[block_tile_j * N * (K_GLOBAL/128)] + N * (K_GLOBAL/128) * (warpId % 4) * 2); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop. // printf("shmem_idx_b_off: %d\n", shmem_idx_b_off); // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy // the B matrix. size_t shmem_idx = warpId < (WARPS_PER_BLOCK / 2) ? (M * (warpId % (WARPS_PER_BLOCK / 2)) * 2) : (N * (warpId % (WARPS_PER_BLOCK / 2)) * 2 + shmem_idx_b_off); // if(shmem_idx > 100) { // printf("Line 237: shmem_idx: %d\n", shmem_idx); // } // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) + (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) + (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit. // Shift the second half of the warp to the next row / column in the // shared memory. shmem_idx += laneId / CHUNK_COPY_LINE_LANES; // if(shmem_idx > 100) { // printf("Line 251: shmem_idx: %d\n", shmem_idx); // } #pragma unroll for (int i = 0; i < ((WARP_SIZE / 2) / CHUNK_COPY_LINES_PER_WARP); i++) { unsigned int tmp = CHUNK_COPY_LINE_LANES; // unsigned int tmp1 = laneId % tmp; // printf("laneId: %u, Zero: %d\n", laneId, 5+6); // if(shmem_idx > 100 and laneId == 0) { // printf("laneId: %u, (laneId mod CHUNK_COPY_LINE_LANES): %d, shmem_idx: %d\n", laneId, laneId % tmp, shmem_idx); // } // Copy 16 bytes at once in each lane. *((int4 *)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) = *lane_ptr; // Advance the global memory pointer and the shared memory index. lane_ptr = (int4 *)(lane_ptr + (K_GLOBAL/128) * CHUNK_COPY_LINES_PER_WARP); shmem_idx += CHUNK_COPY_LINES_PER_WARP; } __syncthreads(); // if (warpId == 0 && laneId == 0) { // printf("ckpt2\n"); // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step * (K/128)]; wmma::load_matrix_sync(a[i], tile_ptr, CHUNK_K + SKEW); // printf("ckpt3\n"); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, CHUNK_K + SKEW); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // write-to-GLmemory access. /* // This pointer is used to access the C and D matrix tiles this warp computes. int4 *shmem_warp_tile_ptr = &shmem[0][0] + (warpId / 2) * SHMEM_STRIDE * (K/128) * 2 + (warpId % 2) * SHMEM_OFFSET; // Will be used only when writing back D. May be moved outside the for loop. TODO. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = (int*)shmem_warp_tile_ptr + i * SHMEM_STRIDE * (K/64) + j * N; // Problem here. // int4 *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * (K/128) + j * N; wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // This pointer is used to stream the C and D matrices block-wide tile to and // from shared memory. int4 *shmem_warp_stream_ptr = &shmem[0][0] + warpId * SHMEM_STRIDE * (K/128); // Will be used only when writing back D. Maybe moved outside the for loop. TODO. // This warp's pointer to the C matrix data to copy memory from to shared memory. // TODO: May be moved outside the for loop. const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N; // Now that shared memory contains all the D tiles, stream them to global // memory. int *dst_gmem_warp_stream_ptr = (int *)&D[gmem_idx]; #pragma unroll for (int i = 0; i < K; i++) { *((int4 *)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) = *((int4 *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId); } __syncthreads(); */ } } int main(int argc, char **argv) { printf("Initializing...\n"); int dev = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES); printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES); printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES); int4 *A_h = NULL; int4 *B_h = NULL; int *C_h = NULL; A_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)); B_h = (int4 *)malloc(sizeof(int4) * (K_GLOBAL/128) * N_GLOBAL); C_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); int4 *A = NULL; int4 *B = NULL; int *C = NULL; checkCudaErrors( hipMalloc(reinterpret_cast<void **>(&A), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128))); checkCudaErrors( hipMalloc(reinterpret_cast<void **>(&B), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128))); checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&C), sizeof(int) * M_GLOBAL * N_GLOBAL)); assert(((unsigned long long)A) % 128 == 0); assert(((unsigned long long)B) % 128 == 0); assert(((unsigned long long)C) % 128 == 0); enum { // Compute the right amount of shared memory to request. // We need shared memory to hold per-CTA C and D matrix tiles, and to cache // per-CTA chunks // of the A and B matrices. Therefore, the right amount to request is the // maximum of those // two numbers. SHMEM_SZ = MAX(sizeof(int4) * (BLOCK_COL_TILES * M) * (CHUNK_K * (K/128) + SKEW) * 2, M * (BLOCK_ROW_WARPS * WARP_ROW_TILES) * N * (BLOCK_COL_WARPS * WARP_COL_TILES) * sizeof(int)) }; printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL); checkCudaErrors(hipMemcpy(A, A_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(B, B_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(C, C_h, sizeof(int) * M_GLOBAL * N_GLOBAL, hipMemcpyHostToDevice)); printf("Preparing data for GPU...\n"); assert(((unsigned long long)A) % 128 == 0); assert(((unsigned long long)B) % 128 == 0); assert(((unsigned long long)C) % 128 == 0); hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipEventRecord(start)); // If enough shared memory available on the GPU use high performant kernel printf("Computing... using high performance kernel compute_gemm_imma \n"); checkCudaErrors(hipFuncSetAttribute( compute_gemm_imma, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); checkKernelErrors( hipLaunchKernelGGL(( (compute_gemm_imma), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C))); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); float milliseconds = 0; checkCudaErrors(hipEventElapsedTime(&milliseconds, start, stop)); printf("Time: %f ms\n", milliseconds); printf("TOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2)/(milliseconds/1000.)) / 1e12); free(A_h); free(B_h); free(C_h); checkCudaErrors(hipFree(reinterpret_cast<void *>(A))); checkCudaErrors(hipFree(reinterpret_cast<void *>(B))); checkCudaErrors(hipFree(reinterpret_cast<void *>(C))); return EXIT_SUCCESS; }
721658832fe6eef4ad94bb00e84c70a910678fc6.cu
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // CUDA sample demonstrating a integer GEMM computation using the Warp Matrix // Multiply and Accumulate API. // In this program, the compute_gemm kernel computes the result of a matrix // multiplication and addition: D = alpha * A * B + beta * C. The dimensions of // both C and D matrices are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x // K_GLOBAL (row-major), the B matrix is K_GLOBAL x N_GLOBAL (column-major). In // that kernel, each CTA computes one 128 x 128 tile of the resulting matrix per // iteration. When the tile is computed, the CTA stores it to the global memory // and begins a new iteration, selecting a new 128 x 128 tile to compute. // Each CTA consists of eight warps. For the 128 x 128 tile, each warp computes // eight 16 x 16 subtiles, organized in a 2 x 4 two-dimensional array. Warps // compute the 16 x 16 subtiles using nvcuda::wmma::mma_sync operations by // moving through the K_GLOBAL dimension of the A and B matrices and // accumulating the intermediate result in the local thread state. // There are a number of simple optimizations used in the algorithm: // - The CTA copies the 128 x 128 tile of the C matrix from the global memory to // shared memory. After that is done, each warp loads the C matrix fragments // from shared memory, thus avoiding a random global memory access. // - On each internal iteration, the CTA copies a portion of the A and B // matrices from // global memory to shared memory. After that, all warps in the CTA reuse the // A and B data from shared memory, thus reducing the number of data copies // from global memory. // - The portions of the A and B matrices are stored in shared memory with an // additional // padding (skew) to reduce the number of shared memory access bank conflicts. // (See a detailed explanation near the SKEW_HALF macro definition.) // - When the CTA finishes computing the tiles of the resulting matrix, each // warp stores // its subtiles to shared memory. The CTA then copies the shared memory // contents to global memory, again avoiding redundant random global memory // accesses. // - Note that the CTA tile size is chosen to maximize the GPU register // utilization, // but carefully enough to avoid local memory use. #include <assert.h> #include <cuda.h> #include <mma.h> #include <stdio.h> // helper functions and utilities to work with CUDA #include <helper_cuda.h> #include <helper_functions.h> // Externally configurable parameters. #ifndef SHARED_MEMORY_LIMIT_64K // Set this to 0 to use more than 64 Kb of shared memory to cache data, to // improve the performance of the computations on GPU. // Note that you need a GPU that can have more than 64 Kb of shared memory // per multiprocessor. #define SHARED_MEMORY_LIMIT_64K 1 #endif // GPU configuration. #define WARP_SIZE 32 // MMA matrix tile dimensions. #define M 8 #define N 8 #define K 128 // GEMM configuration. #define M_TILES 1024 #define N_TILES 1024 #define K_TILES 512 #define M_GLOBAL (M * M_TILES) #define N_GLOBAL (N * N_TILES) #define K_GLOBAL (K * K_TILES) #define C_LAYOUT wmma::mem_row_major // Implementation constants. #define WARPS_PER_BLOCK 8 #define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK) #if SHARED_MEMORY_LIMIT_64K // With only 64 Kb shared memory available, we can fit two 8-tile chunks of // the A and B matrix data, that are 16 * 16 * 8 * 8 * 2 = 32 Kb each // (i.e. two 8x8 arrays of tiles of 16x16 uint8_t-typed elements per CTA). // But we cannot account the 8 Kb total skew overhead, without which the // performance would be severely impacted. So we choose to reduce the chunk size // in half, i.e. the amount of A and B matrix data we cache in shared memory. // Accordingly, this doubles the number of outer iterations across the global K // dimension, which only slightly impacts the performance. #define CHUNK_K 8 #else #define CHUNK_K 16 #endif #define CHUNK_LINE_BYTES (CHUNK_K * sizeof(int4)) #define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4)) #define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES) #define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP) #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS) #define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS) #define GLOBAL_MEM_STRIDE N_GLOBAL #define SHMEM_STRIDE (N * BLOCK_ROW_TILES) #define SHMEM_OFFSET (N * WARP_ROW_TILES) // The macro below is used to shift rows of the A matrix and columns of the B // matrix in shared memory to minimize possible bank conflicts. Before // performing the nvcuda::wmma::mma_sync operation, the warp must load the // matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the // memory access pattern is not specified for that function, each lane in the // warp can read one or multiple matrix elements from different matrix rows or // columns. For shared memory, such access can result in bank conflicts if // different rows / columns of the matrix map to the same bank. By shifting each // row and column by a few bytes, we make sure that they map to different banks, // thus reducing the number of possible bank conflicts. The number of 32 // one-byte "uint8_t" elements is chosen as the minimum possible shift because // we must keep each row and column 256-bit aligned, as required by // nvcuda::wmma::load_matrix_sync. #define SKEW 2 // Updated for int4 #define checkKernelErrors(expr) \ do { \ expr; \ \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ cudaGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; __global__ void compute_gemm_imma(const int4 *A, const int4 *B, int *D) { extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // Each CTA slides along the 128 x 128 tiles from the top left corner of the // matrix to the right and down, and selects the next tile to compute. Once // there's no such tile, all warps in this CTA exit. for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = (block_pos * BLOCK_ROW_TILES * BLOCK_COL_TILES) / N_TILES; const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_TILES) { break; } // These fragments will accumulate the result of A and B matrix fragment // multiplications along the K_GLOBAL dimension. wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j = 0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); // warpId = 4; // printf("warpId == 4: %d", (warpId == 4)); // if ((warpId == (unsigned int)4) && (laneId > 0)) { // printf("warpId: %d, laneId: %d. ckpt1\n", warpId, laneId); // } // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const int4 *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * (K_GLOBAL/128)] + M * (K_GLOBAL/128) * (warpId % 4) * 2) : (&B[block_tile_j * N * (K_GLOBAL/128)] + N * (K_GLOBAL/128) * (warpId % 4) * 2); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop. // printf("shmem_idx_b_off: %d\n", shmem_idx_b_off); // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy // the B matrix. size_t shmem_idx = warpId < (WARPS_PER_BLOCK / 2) ? (M * (warpId % (WARPS_PER_BLOCK / 2)) * 2) : (N * (warpId % (WARPS_PER_BLOCK / 2)) * 2 + shmem_idx_b_off); // if(shmem_idx > 100) { // printf("Line 237: shmem_idx: %d\n", shmem_idx); // } // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) + (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) + (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit. // Shift the second half of the warp to the next row / column in the // shared memory. shmem_idx += laneId / CHUNK_COPY_LINE_LANES; // if(shmem_idx > 100) { // printf("Line 251: shmem_idx: %d\n", shmem_idx); // } #pragma unroll for (int i = 0; i < ((WARP_SIZE / 2) / CHUNK_COPY_LINES_PER_WARP); i++) { unsigned int tmp = CHUNK_COPY_LINE_LANES; // unsigned int tmp1 = laneId % tmp; // printf("laneId: %u, Zero: %d\n", laneId, 5+6); // if(shmem_idx > 100 and laneId == 0) { // printf("laneId: %u, (laneId mod CHUNK_COPY_LINE_LANES): %d, shmem_idx: %d\n", laneId, laneId % tmp, shmem_idx); // } // Copy 16 bytes at once in each lane. *((int4 *)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) = *lane_ptr; // Advance the global memory pointer and the shared memory index. lane_ptr = (int4 *)(lane_ptr + (K_GLOBAL/128) * CHUNK_COPY_LINES_PER_WARP); shmem_idx += CHUNK_COPY_LINES_PER_WARP; } __syncthreads(); // if (warpId == 0 && laneId == 0) { // printf("ckpt2\n"); // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step * (K/128)]; wmma::load_matrix_sync(a[i], tile_ptr, CHUNK_K + SKEW); // printf("ckpt3\n"); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, CHUNK_K + SKEW); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // 接下来还需要认真修改一下。现在write-to-GL多花了一倍的memory access. /* // This pointer is used to access the C and D matrix tiles this warp computes. int4 *shmem_warp_tile_ptr = &shmem[0][0] + (warpId / 2) * SHMEM_STRIDE * (K/128) * 2 + (warpId % 2) * SHMEM_OFFSET; // Will be used only when writing back D. May be moved outside the for loop. TODO. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = (int*)shmem_warp_tile_ptr + i * SHMEM_STRIDE * (K/64) + j * N; // Problem here. // int4 *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * (K/128) + j * N; wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // This pointer is used to stream the C and D matrices block-wide tile to and // from shared memory. int4 *shmem_warp_stream_ptr = &shmem[0][0] + warpId * SHMEM_STRIDE * (K/128); // Will be used only when writing back D. Maybe moved outside the for loop. TODO. // This warp's pointer to the C matrix data to copy memory from to shared memory. // TODO: May be moved outside the for loop. const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N; // Now that shared memory contains all the D tiles, stream them to global // memory. int *dst_gmem_warp_stream_ptr = (int *)&D[gmem_idx]; #pragma unroll for (int i = 0; i < K; i++) { *((int4 *)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) = *((int4 *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId); } __syncthreads(); */ } } int main(int argc, char **argv) { printf("Initializing...\n"); int dev = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES); printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES); printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES); int4 *A_h = NULL; int4 *B_h = NULL; int *C_h = NULL; A_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)); B_h = (int4 *)malloc(sizeof(int4) * (K_GLOBAL/128) * N_GLOBAL); C_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); int4 *A = NULL; int4 *B = NULL; int *C = NULL; checkCudaErrors( cudaMalloc(reinterpret_cast<void **>(&A), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128))); checkCudaErrors( cudaMalloc(reinterpret_cast<void **>(&B), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128))); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&C), sizeof(int) * M_GLOBAL * N_GLOBAL)); assert(((unsigned long long)A) % 128 == 0); assert(((unsigned long long)B) % 128 == 0); assert(((unsigned long long)C) % 128 == 0); enum { // Compute the right amount of shared memory to request. // We need shared memory to hold per-CTA C and D matrix tiles, and to cache // per-CTA chunks // of the A and B matrices. Therefore, the right amount to request is the // maximum of those // two numbers. SHMEM_SZ = MAX(sizeof(int4) * (BLOCK_COL_TILES * M) * (CHUNK_K * (K/128) + SKEW) * 2, M * (BLOCK_ROW_WARPS * WARP_ROW_TILES) * N * (BLOCK_COL_WARPS * WARP_COL_TILES) * sizeof(int)) }; printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL); checkCudaErrors(cudaMemcpy(A, A_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(B, B_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(C, C_h, sizeof(int) * M_GLOBAL * N_GLOBAL, cudaMemcpyHostToDevice)); printf("Preparing data for GPU...\n"); assert(((unsigned long long)A) % 128 == 0); assert(((unsigned long long)B) % 128 == 0); assert(((unsigned long long)C) % 128 == 0); cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start)); // If enough shared memory available on the GPU use high performant kernel printf("Computing... using high performance kernel compute_gemm_imma \n"); checkCudaErrors(cudaFuncSetAttribute( compute_gemm_imma, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); checkKernelErrors( (compute_gemm_imma<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK, SHMEM_SZ>>>(A, B, C))); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); float milliseconds = 0; checkCudaErrors(cudaEventElapsedTime(&milliseconds, start, stop)); printf("Time: %f ms\n", milliseconds); printf("TOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2)/(milliseconds/1000.)) / 1e12); free(A_h); free(B_h); free(C_h); checkCudaErrors(cudaFree(reinterpret_cast<void *>(A))); checkCudaErrors(cudaFree(reinterpret_cast<void *>(B))); checkCudaErrors(cudaFree(reinterpret_cast<void *>(C))); return EXIT_SUCCESS; }
14a47ea3a8e72bbf487ef1af1bb3ba6d6c14a6e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holdvr nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief CUTLASS Attention Example. This workload computes a fused multi head attention. Because it keeps the attention matrix in shared memory, it's both faster and uses less global memory. This is based on `"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_, and very similar to `"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness" <https://arxiv.org/abs/2205.14135>`_. Algorithm: In short, we can compute the output incrementally in blocks of size B, we just need to divide the final result by the sum of all coefficients in the softmax (which we compute incrementally) with the following pseudo-code: ``` s_prime = torch.zeros([num_queries, B]) O = torch.zeros([num_queries, head_size_v]) for i in range(0, K.shape[0], B): si = exp((Q . K[i * B:(i+1) * B].t) * scale) sum_coefs += attn_unscaled.sum(-1) O += si . V[i * B:(i+1) * B] O = O / s_prime ``` In practice, and for numerical stability reasons, we also substract the maximum so far (`mi`) before doing the exponential. When we encounter new keys, the maximum used to compute O so far (`m_prime`) can differ from the current maximum, so we update O before accumulating with ``` O = O * exp(m_prime - mi) m_prime = mi ``` Implementation details: - `si` is stored in shared memory between the 2 back to back gemms - we keep and accumulate the output directly in registers if we can (`head_size_v <= 128`). Otherwise, we store it & accumulate in global memory (slower) - blocks are parallelized across the batch dimension, the number of heads, and the query sequence size Examples: # Run an attention example with default setup $ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen # Run an attention example with custom setup $ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen --head_number=2 --batch_size=3 --head_size=32 --head_size_v=64 --seq_length=512 --seq_length_kv=1024 --causal=true Acknowledgement: Fixed-sequence-length FMHA code was upstreamed by Meta xFormers (https://github.com/facebookresearch/xformers). */ ///////////////////////////////////////////////////////////////////////////////////////////////// #include <vector> #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/gemm_grouped.h" #include "cutlass/gemm/kernel/default_gemm_grouped.h" #include "cutlass/gemm/device/gemm_grouped.h" #include "cutlass/gemm/device/gemm_universal.h" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm_complex.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/kernel/gemm_grouped.h" #include "cutlass/gemm/kernel/gemm_transpose_operands.h" #include "cutlass/gemm/kernel/default_gemm.h" #include "cutlass/gemm/kernel/default_gemm_complex.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/threadblock/epilogue_with_visitor.h" #include "cutlass/fast_math.h" #include "kernel_forward.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; hipError_t error; bool passed; // // Methods // Result( double runtime_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, hipError_t error = hipSuccess ): runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; bool error; bool reference_check; bool use_mask; bool causal; std::vector<cutlass::gemm::GemmCoord> problem_sizes0; std::vector<cutlass::gemm::GemmCoord> problem_sizes1; std::vector<cutlass::gemm::GemmCoord> problem_sizes0_real; std::vector<cutlass::gemm::GemmCoord> problem_sizes1_real; int alignment; int head_number; int batch_size; int head_size; int head_size_v; int seq_length; int seq_length_kv; int iterations; // alpha0, alpha1 and beta are fixed // in this multi-head attention example float alpha0; float alpha1; float beta; // // Methods // Options(): help(false), error(false), alignment(1), reference_check(true), head_number(12), batch_size(16), head_size(64), head_size_v(64), seq_length(1024), seq_length_kv(1024), use_mask(false), iterations(20), causal(false) { } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("alignment", alignment, 1); cmd.get_cmd_line_argument("head_number", head_number, 12); cmd.get_cmd_line_argument("batch_size", batch_size, 16); cmd.get_cmd_line_argument("head_size", head_size, 64); cmd.get_cmd_line_argument("head_size_v", head_size_v, head_size); cmd.get_cmd_line_argument("seq_length", seq_length, 1024); cmd.get_cmd_line_argument("seq_length_kv", seq_length_kv, seq_length); cmd.get_cmd_line_argument("use_mask", use_mask, false); cmd.get_cmd_line_argument("iterations", iterations, 20); cmd.get_cmd_line_argument("reference-check", reference_check, true); cmd.get_cmd_line_argument("causal", causal, true); randomize_problems(); } void randomize_problems() { int problem_count = head_number * batch_size; problem_sizes0.reserve(problem_count); problem_sizes1.reserve(problem_count); // When using mask, the original inputs are not padded // and we need to save these info. if (use_mask) { problem_sizes0_real.reserve(problem_count); problem_sizes1_real.reserve(problem_count); } for (int i = 0; i < batch_size; ++i) { // problems belonging to the same batch share the same seq len int m_real = seq_length; int mkv_real = seq_length_kv; int m = (m_real + alignment - 1) / alignment * alignment; int mkv = (mkv_real + alignment - 1) / alignment * alignment; int k0 = head_size; int k1 = head_size_v; for (int j = 0; j < head_number; ++j) { cutlass::gemm::GemmCoord problem0(m, mkv, k0); cutlass::gemm::GemmCoord problem1(m, k1, mkv); problem_sizes0.push_back(problem0); problem_sizes1.push_back(problem1); if (use_mask) { cutlass::gemm::GemmCoord problem0_real(m_real, mkv_real, k0); cutlass::gemm::GemmCoord problem1_real(m_real, k1, mkv_real); problem_sizes0_real.push_back(problem0_real); problem_sizes1_real.push_back(problem1_real); } } } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "41_fused_multi_head_attention_fixed_seqlen\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --head_number=<int> Head number in multi-head attention (default: --head_number=12)\n" << " --batch_size=<int> Batch size in multi-head attention (default: --batch_size=16)\n" << " --head_size=<int> Head size in multi-head attention (default: --head_size=64)\n" << " --head_size_v=<int> Head size in multi-head attention for V (default: --head_size_v=head_size)\n" << " --seq_length=<int> Sequence length in multi-head attention for Q (default: --seq_length=1024)\n" << " --seq_length_kv=<int> Sequence length in multi-head attention for K/V (default: --seq_length_kv=seq_length)\n" << " --use_mask=<bool> If true, performs padding-like masking in softmax.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --reference-check=<bool> If true, performs reference check.\n" << " --causal=<bool> If true, uses causal masking.\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fops = int64_t(); for (int i = 0; i < problem_sizes0.size(); ++i) { auto const& problem0 = problem_sizes0[i]; auto const& problem1 = problem_sizes1[i]; for (int row = 0; row < problem0.m(); ++row) { int num_cols0 = problem0.n(); if (causal) { num_cols0 = ::min(row + 1, num_cols0); } // P <- Q . K_t fops += 2 * num_cols0 * problem0.k(); // P <- exp(P - max(P)) fops += 2 * num_cols0; // S <- sum(P) fops += num_cols0 - 1; // O <- P . V fops += 2 * num_cols0 * problem1.n(); // O <- O / S fops += num_cols0 * problem1.n(); } } return double(fops) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Attention> class TestbedAttention { public: // // Type definitions // using ElementQ = typename Attention::scalar_t; using ElementK = typename Attention::scalar_t; using ElementP = typename Attention::accum_t; using ElementAccumulator = typename Attention::accum_t; using ElementV = typename Attention::scalar_t; using ElementO = typename Attention::output_t; using ElementCompute = typename Attention::accum_t; using ElementNorm = typename Attention::accum_t; using ElementSum = typename Attention::accum_t; using ElementSoftmaxCompute = typename Attention::accum_t; using LayoutQ = cutlass::layout::RowMajor; using LayoutK = cutlass::layout::ColumnMajor; using LayoutP = cutlass::layout::RowMajor; using LayoutV = cutlass::layout::RowMajor; using LayoutO = cutlass::layout::RowMajor; using MatrixCoord = typename LayoutP::TensorCoord; private: // // Data members // Options & options; /// Initialization cutlass::Distribution::Kind init_Q; cutlass::Distribution::Kind init_K; cutlass::Distribution::Kind init_P; cutlass::Distribution::Kind init_V; cutlass::Distribution::Kind init_O; uint32_t seed; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device1; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0_real; std::vector<int64_t> offset_Q; std::vector<int64_t> offset_K; std::vector<int64_t> offset_P; std::vector<int64_t> offset_V; std::vector<int64_t> offset_O; std::vector<int64_t> ldq_host; std::vector<int64_t> ldk_host; std::vector<int64_t> ldp_host; std::vector<int64_t> ldv_host; std::vector<int64_t> ldo_host; std::vector<int64_t> seqlen_host; cutlass::DeviceAllocation<int64_t> ldq; cutlass::DeviceAllocation<int64_t> ldk; cutlass::DeviceAllocation<int64_t> ldp; cutlass::DeviceAllocation<int64_t> ldv; cutlass::DeviceAllocation<int64_t> ldo; cutlass::DeviceAllocation<int64_t> seqlen; cutlass::DeviceAllocation<ElementQ> block_Q; cutlass::DeviceAllocation<ElementK> block_K; cutlass::DeviceAllocation<ElementP> block_P; cutlass::DeviceAllocation<ElementV> block_V; cutlass::DeviceAllocation<ElementO> block_O; cutlass::DeviceAllocation<ElementNorm> block_Norm; cutlass::DeviceAllocation<ElementSum> block_Sum; cutlass::DeviceAllocation<int64_t> offset_P_Device; cutlass::DeviceAllocation<ElementQ *> ptr_Q; cutlass::DeviceAllocation<ElementK *> ptr_K; cutlass::DeviceAllocation<ElementP *> ptr_P; cutlass::DeviceAllocation<ElementV *> ptr_V; cutlass::DeviceAllocation<ElementO *> ptr_O; public: // // Methods // TestbedAttention( Options &options_, cutlass::Distribution::Kind init_Q_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_K_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_P_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_V_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_O_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): options(options_), init_Q(init_Q_), init_K(init_K_), init_P(init_P_), init_V(init_V_), init_O(init_O_), seed(seed_) { } int problem_count() const { return (options.head_number * options.batch_size); } private: /// Helper to initialize a tensor view template <typename Element> void initialize_tensor_( Element *ptr, size_t capacity, cutlass::Distribution::Kind dist_kind, uint32_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { Element scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<ElementP>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 8; scope_min = -8; } else { scope_max = 8; scope_min = -8; } cutlass::reference::device::BlockFillRandomUniform( ptr, capacity, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::device::BlockFillRandomGaussian( ptr, capacity, seed, Element(), Element(0.5f)); } else if (dist_kind == cutlass::Distribution::Sequential) { // Fill with increasing elements cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(1), Element()); } else { // Fill with all 1s cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(), Element(1)); } } /// Initializes data structures void initialize_() { // // Set scalors for the mha example // options.alpha0 = 1.0f / sqrt(float(options.head_size)); options.alpha1 = 1.0f; options.beta = 0; // // Choose random problem sizes // // construct a few problems of random sizes srand(seed); int64_t total_elements_Q = 0; int64_t total_elements_K = 0; int64_t total_elements_P = 0; int64_t total_elements_V = 0; int64_t total_elements_O = 0; ldq_host.resize(problem_count()); ldk_host.resize(problem_count()); ldp_host.resize(problem_count()); ldv_host.resize(problem_count()); ldo_host.resize(problem_count()); seqlen_host.resize(problem_count()); // Create tensors in BMHK format, where // B = batch_size // M = sequence length // H = num_heads // K = embedding size per head int64_t batch_offset_Q, batch_offset_K, batch_offset_V, batch_offset_O; for (int32_t b = 0; b < options.batch_size; ++b) { batch_offset_Q = total_elements_Q; batch_offset_K = total_elements_K; batch_offset_V = total_elements_V; batch_offset_O = total_elements_O; for (int32_t h = 0; h < options.head_number; ++h) { int32_t i = h + b * options.head_number; auto problem0 = options.problem_sizes0.at(i); auto problem1 = options.problem_sizes1.at(i); ldq_host.at(i) = LayoutQ::packed({problem0.m(), options.head_number * problem0.k()}).stride(0); ldk_host.at(i) = LayoutK::packed({options.head_number * problem0.k(), problem0.n()}).stride(0); ldp_host.at(i) = LayoutP::packed({problem0.m(), problem0.n()}).stride(0); ldv_host.at(i) = LayoutV::packed({problem1.k(), options.head_number * problem1.n()}).stride(0); ldo_host.at(i) = LayoutO::packed({problem1.m(), options.head_number * problem1.n()}).stride(0); // m = n for attention problems. seqlen_host.at(i) = problem0.m(); offset_Q.push_back(batch_offset_Q + h * problem0.k()); offset_K.push_back(batch_offset_K + h * problem0.k()); offset_P.push_back(total_elements_P); offset_V.push_back(batch_offset_V + h * problem0.k()); offset_O.push_back(batch_offset_O + h * problem1.n()); int64_t elements_Q = problem0.m() * problem0.k(); int64_t elements_K = problem0.k() * problem0.n(); int64_t elements_P = problem0.m() * problem0.n(); int64_t elements_V = problem1.k() * problem1.n(); int64_t elements_O = problem1.m() * problem1.n(); total_elements_Q += elements_Q; total_elements_K += elements_K; total_elements_P += elements_P; total_elements_V += elements_V; total_elements_O += elements_O; } } problem_sizes_device0.reset(problem_count()); problem_sizes_device1.reset(problem_count()); problem_sizes_device0.copy_from_host(options.problem_sizes0.data()); problem_sizes_device1.copy_from_host(options.problem_sizes1.data()); if (options.use_mask) { problem_sizes_device0_real.reset(problem_count()); problem_sizes_device0_real.copy_from_host(options.problem_sizes0_real.data()); } ldq.reset(problem_count()); ldk.reset(problem_count()); ldp.reset(problem_count()); ldv.reset(problem_count()); ldo.reset(problem_count()); seqlen.reset(problem_count()); ldq.copy_from_host(ldq_host.data()); ldk.copy_from_host(ldk_host.data()); ldp.copy_from_host(ldp_host.data()); ldv.copy_from_host(ldv_host.data()); ldo.copy_from_host(ldo_host.data()); seqlen.copy_from_host(seqlen_host.data()); // // Assign pointers // block_Q.reset(total_elements_Q); block_K.reset(total_elements_K); block_P.reset(total_elements_P); block_V.reset(total_elements_V); block_O.reset(total_elements_O); offset_P_Device.reset(problem_count()); // sync offset with device cutlass::device_memory::copy_to_device(offset_P_Device.get(), offset_P.data(), offset_P.size()); std::vector<ElementQ *> ptr_Q_host(problem_count()); std::vector<ElementK *> ptr_K_host(problem_count()); std::vector<ElementP *> ptr_P_host(problem_count()); std::vector<ElementV *> ptr_V_host(problem_count()); std::vector<ElementO *> ptr_O_host(problem_count()); std::vector<ElementNorm *> ptr_norm_host(problem_count()); std::vector<ElementSum *> ptr_sum_host(problem_count()); for (int32_t i = 0; i < problem_count(); ++i) { ptr_Q_host.at(i) = block_Q.get() + offset_Q.at(i); ptr_K_host.at(i) = block_K.get() + offset_K.at(i); ptr_P_host.at(i) = block_P.get() + offset_P.at(i); ptr_V_host.at(i) = block_V.get() + offset_V.at(i); ptr_O_host.at(i) = block_O.get() + offset_O.at(i); } ptr_Q.reset(problem_count()); ptr_Q.copy_from_host(ptr_Q_host.data()); ptr_K.reset(problem_count()); ptr_K.copy_from_host(ptr_K_host.data()); ptr_P.reset(problem_count()); ptr_P.copy_from_host(ptr_P_host.data()); ptr_V.reset(problem_count()); ptr_V.copy_from_host(ptr_V_host.data()); ptr_O.reset(problem_count()); ptr_O.copy_from_host(ptr_O_host.data()); // // Initialize the problems of the workspace // initialize_tensor_(block_Q.get(), total_elements_Q, init_Q, seed + 1); initialize_tensor_(block_K.get(), total_elements_K, init_K, seed + 2); initialize_tensor_(block_V.get(), total_elements_V, init_V, seed + 3); } template<typename Element> bool verify_tensor_(std::vector<Element> vector_Input, \ std::vector<Element> vector_Input_Ref, int64_t verify_length = -1) { int64_t size = (vector_Input.size() < vector_Input_Ref.size()) ? vector_Input.size() : vector_Input_Ref.size(); size = (verify_length == -1) ? size : verify_length; // 0.05 for absolute error float abs_tol = 5e-2f; // 10% for relative error float rel_tol = 1e-1f; for (int64_t i = 0; i < size; ++i) { float diff = (float)(vector_Input.at(i) - vector_Input_Ref.at(i)); float abs_diff = fabs(diff); float abs_ref = fabs((float)vector_Input_Ref.at(i) + 1e-5f); float relative_diff = abs_diff / abs_ref; if ( (isnan(vector_Input_Ref.at(i)) || isnan(abs_diff) || isinf(abs_diff)) || (abs_diff > abs_tol && relative_diff > rel_tol)) { printf("[%d/%d] diff = %f, rel_diff = %f, {computed=%f, ref=%f}.\n", int(i), int(size), abs_diff, relative_diff, (float)(vector_Input.at(i)), (float)(vector_Input_Ref.at(i))); return false; } } return true; } /// Verifies the result is a GEMM bool verify_() { bool passed = true; for (int32_t b = 0; b < options.batch_size; ++b) { int32_t i = b * options.head_number; // Problem size is the same for all heads cutlass::gemm::GemmCoord problem0 = options.problem_sizes0.at(b * options.head_number); cutlass::gemm::GemmCoord problem1 = options.problem_sizes1.at(b * options.head_number); MatrixCoord extent_Q{problem0.m(), problem0.k()}; MatrixCoord extent_K{problem0.k(), problem0.n()}; MatrixCoord extent_P{problem0.m(), problem0.n()}; MatrixCoord extent_V{problem1.k(), problem1.n()}; MatrixCoord extent_O{problem1.m(), problem1.n()}; LayoutO layout_O(ldo_host.at(i)); std::vector<ElementO> matrix_O(layout_O.capacity(extent_O)); cutlass::device_memory::copy_to_host(matrix_O.data(), block_O.get() + offset_O.at(i), matrix_O.size()); cutlass::DeviceAllocation<ElementO> block_Ref_O(layout_O.capacity(extent_O)); for (int32_t h = 0; h < options.head_number; ++h) { i = h + b * options.head_number; LayoutQ layout_Q(ldq_host.at(i)); LayoutK layout_K(ldk_host.at(i)); LayoutP layout_P(ldp_host.at(i)); LayoutV layout_V(ldv_host.at(i)); cutlass::TensorView<ElementQ, LayoutQ> view_Q(block_Q.get() + offset_Q.at(i), layout_Q, extent_Q); cutlass::TensorView<ElementK, LayoutK> view_K(block_K.get() + offset_K.at(i), layout_K, extent_K); cutlass::TensorView<ElementV, LayoutV> view_V(block_V.get() + offset_V.at(i), layout_V, extent_V); cutlass::TensorView<ElementO, LayoutO> view_Ref_O_device(block_Ref_O.get() + offset_O.at(i) - offset_O.at(b * options.head_number), layout_O, extent_O); cutlass::DeviceAllocation<ElementP> block_Ref_P(layout_P.capacity(extent_P)); cutlass::TensorView<ElementP, LayoutP> view_Ref_P_device(block_Ref_P.get(), layout_P, extent_P); // Reference GEMM cutlass::reference::device::GemmComplex< ElementQ, LayoutQ, ElementK, LayoutK, ElementP, LayoutP, ElementCompute, ElementAccumulator >( problem0, ElementAccumulator(options.alpha0), view_Q, Attention::MM0::Mma::kTransformA, view_K, Attention::MM0::Mma::kTransformB, ElementAccumulator(options.beta), view_Ref_P_device, view_Ref_P_device, ElementAccumulator(0) ); // Compute softmax for P. We need to explicitly compute softmax // over P because softmax is fused to the second GEMM in the // profiled implementation. std::vector<ElementP> matrix_Ref(layout_P.capacity(extent_P)); cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref_P.get(), matrix_Ref.size()); cutlass::TensorView<ElementP, LayoutP> view_Ref_host(matrix_Ref.data(), layout_P, extent_P); std::vector<ElementNorm> vector_Norm_Ref(problem0.m()); std::vector<ElementSum> vector_Sum_Ref(problem0.m()); int n_dim = options.use_mask ? options.problem_sizes0_real.at(i).n() : problem0.n(); // Compute softmax for reference matrix for (int m = 0; m < problem0.m(); m++) { int n_dim_row = n_dim; if (options.causal) { n_dim_row = ::min(m + 1, n_dim); } ElementSoftmaxCompute max = ElementSoftmaxCompute(view_Ref_host.ref().at({m, 0})); for (int n = 1; n < n_dim_row; n++) { max = ::max(max, ElementSoftmaxCompute(view_Ref_host.ref().at({m, n}))); } vector_Norm_Ref.at(m) = ElementNorm(max); ElementSoftmaxCompute sum = ElementSoftmaxCompute(); for (int n = 0; n < n_dim_row; n++) { sum += ::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ); } ElementSoftmaxCompute inv_sum = ElementSoftmaxCompute(1.0f / sum); vector_Sum_Ref.at(m) = ElementSum(inv_sum); for (int n = 0; n < n_dim_row; n++) { view_Ref_host.ref().at({m, n}) = ElementP( ::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ) * inv_sum ); } // Mask out the rest of the attention matrix for (int n = n_dim_row; n < n_dim; ++n) { view_Ref_host.ref().at({m, n}) = ElementP(0); } } // when not using mask, problem_real and problem share the same sizes if (options.use_mask) { for (int m = 0; m < problem0.m(); m++) { for (int n = n_dim; n < problem0.n(); n++) { view_Ref_host.ref().at({m, n}) = ElementP(0); } } } cutlass::device_memory::copy_to_device(block_Ref_P.get(), matrix_Ref.data(), matrix_Ref.size()); // Reference GEMM cutlass::reference::device::GemmComplex< ElementP, LayoutP, ElementV, LayoutV, ElementO, LayoutO, ElementCompute, ElementAccumulator >( problem1, ElementAccumulator(options.alpha1), view_Ref_P_device, Attention::MM0::Mma::kTransformA, view_V, Attention::MM0::Mma::kTransformB, ElementAccumulator(options.beta), view_Ref_O_device, view_Ref_O_device, ElementAccumulator(0) ); } // Copy to host memory std::vector<ElementO> matrix_Ref_O(layout_O.capacity(extent_O)); cutlass::device_memory::copy_to_host(matrix_Ref_O.data(), block_Ref_O.get(), matrix_Ref_O.size()); // printf("Pb %d: \n Q=(offset=%d, ldq=%d)\n K=(offset=%d, ldk=%d)\n O=(offset=%d, ldo=%d)\n", // int(i), int(offset_Q[i]), int(ldq_host[i]), int(offset_K[i]), int(ldk_host[i]), int(offset_O[i]), int(ldo_host[i])); bool verified_O = false; if (!verified_O) { verified_O = verify_tensor_<ElementO>(matrix_O, matrix_Ref_O); } passed = passed && verified_O; if (!passed) { std::cerr << "\n***\nError - problem " << i << " (batch " << b << ") failed the QA check\n***\n" << std::endl; if (!verified_O) { std::cout << "Final matrix output is incorrect" << std::endl; } return passed; } } return passed; } public: /// Executes a CUTLASS Attention kernel and measures runtime. Result profile() { Result result; result.passed = false; // Initialize the problem initialize_(); typename Attention::Params p; { // set parameters p.query_ptr = block_Q.get(); p.key_ptr = block_K.get(); p.value_ptr = block_V.get(); p.logsumexp_ptr = nullptr; // Only needed for bw p.output_accum_ptr = nullptr; if (Attention::kNeedsOutputAccumulatorBuffer) { hipMalloc(&p.output_accum_ptr, block_O.size() * sizeof(typename Attention::output_accum_t)); } p.output_ptr = block_O.get(); // TODO: support arbitrary seq lengths // if (cu_seqlens_q.has_value()) { // p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr(); // p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr(); // } p.scale = options.alpha0; p.num_heads = options.head_number; p.num_batches = options.batch_size; p.head_dim = options.head_size; p.head_dim_value = options.head_size_v; p.num_queries = options.seq_length; p.num_keys = options.seq_length_kv; if (options.causal) { p.custom_mask_type = Attention::CausalFromTopLeft; } // All tensors are in BMHK shapes p.q_strideH = options.head_size; p.k_strideH = options.head_size; p.v_strideH = options.head_size_v; p.q_strideM = int32_t(ldq_host[0]); p.k_strideM = int32_t(ldk_host[0]); p.v_strideM = int32_t(ldv_host[0]); p.q_strideB = p.q_strideM * options.seq_length; p.k_strideB = p.k_strideM * options.seq_length_kv; p.v_strideB = p.v_strideM * options.seq_length_kv; p.o_strideM = p.head_dim_value * p.num_heads; } // launch kernel :) constexpr auto kernel_fn = attention_kernel_batched_impl<Attention>; int smem_bytes = sizeof(typename Attention::SharedStorage); if (smem_bytes > 0xc000) { hipFuncSetAttribute(kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes); } if (!Attention::check_supported(p)) { std::cerr << "Kernel does not support these inputs" << std::endl; return result; } hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, 0, p); // Wait for completion result.error = hipDeviceSynchronize(); if (result.error != hipSuccess) { std::cerr << "Kernel execution error: " << hipGetErrorString(result.error); return result; } // // Verify correctness // result.passed = true; if (options.reference_check) { result.passed = verify_(); } // // Warm-up run // hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, 0, p); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to run CUTLASS Attention kernel." << std::endl; return result; } // // Construct events // hipEvent_t events[2]; for (auto & event : events) { result.error = hipEventCreate(&event); if (result.error != hipSuccess) { std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl; return -1; } } // Record an event at the start of a series of GEMM operations result.error = hipEventRecord(events[0]); if (result.error != hipSuccess) { std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, 0, p); } // // Stop profiling loop // // Record an event when the GEMM operations have been launched. result.error = hipEventRecord(events[1]); if (result.error != hipSuccess) { std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = hipEventSynchronize(events[1]); if (result.error != hipSuccess) { std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != hipSuccess) { std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Compute average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // // Cleanup // for (auto event : events) { (void)hipEventDestroy(event); } std::cout << std::endl; std::cout << "CUTLASS Attention:\n" << "====================================================" << std::endl; std::cout << " " << " {seq length Q, seq length KV, head size, head size V, head number, batch size} = {" << options.seq_length \ << ", " << options.seq_length_kv << ", " << options.head_size << ", " << options.head_size_v << ", " << options.head_number\ << ", " << options.batch_size << "}." << std::endl; std::cout << std::endl; std::cout << " " << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout << " " << "GFLOPs: " << result.gflops << std::endl; return result; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template < int kQueriesPerBlock, int kKeysPerBlock, int kMaxK > int run_attention(Options& options) { using Attention = AttentionKernel< cutlass::half_t, // scalar_t cutlass::arch::Sm80, // ArchTag true, // Memory is aligned kQueriesPerBlock, kKeysPerBlock, kMaxK, false, // Supports dropout false // Supports bias >; // // Test and profile // TestbedAttention<Attention> testbed(options); Result result = testbed.profile(); if (!result.passed) { std::cout << "Profiling CUTLASS attention has failed.\n"; std::cout << "\nFailed\n"; return -1; } std::cout << "\nPassed\n"; return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { // // This example uses mma.sync to directly access Tensor Cores to achieve peak performance. // hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) { // // This example requires an NVIDIA Ampere-architecture GPU. // std::cout << "CUTLASS's CUTLASS Attention example requires a GPU of NVIDIA's Ampere Architecture or " << "later (compute capability 80 or greater).\n"; return 0; } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.error) { std::cerr << "Aborting execution." << std::endl; return -1; } if (options.use_mask) { std::cerr << "--use_mask is not supported at the moment\n"; return -2; } if (options.alignment != 1) { std::cerr << "--alignment=1 is the only supported value\n"; return -2; } // Determine kernel configuration based on head size. // If head size is less than or equal to 64, each block operates over 64 queries and // 64 keys, and partial results can be stored in the register file. // If head size is greater than 64, each block operates over 32 queries and 128 keys, // and partial results are stored in shared memory. if (options.head_size_v > 64) { static int const kQueriesPerBlock = 32; static int const kKeysPerBlock = 128; if (options.head_size_v <= 128) { return run_attention<kQueriesPerBlock, kKeysPerBlock, 128>(options); } else { return run_attention<kQueriesPerBlock, kKeysPerBlock, 65536>(options); } } else { static constexpr int kMaxK = 64; // <- Decrease to 32/16 if your problem is smaller static int const kQueriesPerBlock = 64; static int const kKeysPerBlock = 64; return run_attention<kQueriesPerBlock, kKeysPerBlock, kMaxK>(options); } } /////////////////////////////////////////////////////////////////////////////////////////////////
14a47ea3a8e72bbf487ef1af1bb3ba6d6c14a6e9.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holdvr nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief CUTLASS Attention Example. This workload computes a fused multi head attention. Because it keeps the attention matrix in shared memory, it's both faster and uses less global memory. This is based on `"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_, and very similar to `"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness" <https://arxiv.org/abs/2205.14135>`_. Algorithm: In short, we can compute the output incrementally in blocks of size B, we just need to divide the final result by the sum of all coefficients in the softmax (which we compute incrementally) with the following pseudo-code: ``` s_prime = torch.zeros([num_queries, B]) O = torch.zeros([num_queries, head_size_v]) for i in range(0, K.shape[0], B): si = exp((Q . K[i * B:(i+1) * B].t) * scale) sum_coefs += attn_unscaled.sum(-1) O += si . V[i * B:(i+1) * B] O = O / s_prime ``` In practice, and for numerical stability reasons, we also substract the maximum so far (`mi`) before doing the exponential. When we encounter new keys, the maximum used to compute O so far (`m_prime`) can differ from the current maximum, so we update O before accumulating with ``` O = O * exp(m_prime - mi) m_prime = mi ``` Implementation details: - `si` is stored in shared memory between the 2 back to back gemms - we keep and accumulate the output directly in registers if we can (`head_size_v <= 128`). Otherwise, we store it & accumulate in global memory (slower) - blocks are parallelized across the batch dimension, the number of heads, and the query sequence size Examples: # Run an attention example with default setup $ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen # Run an attention example with custom setup $ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen --head_number=2 --batch_size=3 --head_size=32 --head_size_v=64 --seq_length=512 --seq_length_kv=1024 --causal=true Acknowledgement: Fixed-sequence-length FMHA code was upstreamed by Meta xFormers (https://github.com/facebookresearch/xformers). */ ///////////////////////////////////////////////////////////////////////////////////////////////// #include <vector> #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/gemm_grouped.h" #include "cutlass/gemm/kernel/default_gemm_grouped.h" #include "cutlass/gemm/device/gemm_grouped.h" #include "cutlass/gemm/device/gemm_universal.h" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm_complex.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/kernel/gemm_grouped.h" #include "cutlass/gemm/kernel/gemm_transpose_operands.h" #include "cutlass/gemm/kernel/default_gemm.h" #include "cutlass/gemm/kernel/default_gemm_complex.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/threadblock/epilogue_with_visitor.h" #include "cutlass/fast_math.h" #include "kernel_forward.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; cudaError_t error; bool passed; // // Methods // Result( double runtime_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, cudaError_t error = cudaSuccess ): runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; bool error; bool reference_check; bool use_mask; bool causal; std::vector<cutlass::gemm::GemmCoord> problem_sizes0; std::vector<cutlass::gemm::GemmCoord> problem_sizes1; std::vector<cutlass::gemm::GemmCoord> problem_sizes0_real; std::vector<cutlass::gemm::GemmCoord> problem_sizes1_real; int alignment; int head_number; int batch_size; int head_size; int head_size_v; int seq_length; int seq_length_kv; int iterations; // alpha0, alpha1 and beta are fixed // in this multi-head attention example float alpha0; float alpha1; float beta; // // Methods // Options(): help(false), error(false), alignment(1), reference_check(true), head_number(12), batch_size(16), head_size(64), head_size_v(64), seq_length(1024), seq_length_kv(1024), use_mask(false), iterations(20), causal(false) { } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("alignment", alignment, 1); cmd.get_cmd_line_argument("head_number", head_number, 12); cmd.get_cmd_line_argument("batch_size", batch_size, 16); cmd.get_cmd_line_argument("head_size", head_size, 64); cmd.get_cmd_line_argument("head_size_v", head_size_v, head_size); cmd.get_cmd_line_argument("seq_length", seq_length, 1024); cmd.get_cmd_line_argument("seq_length_kv", seq_length_kv, seq_length); cmd.get_cmd_line_argument("use_mask", use_mask, false); cmd.get_cmd_line_argument("iterations", iterations, 20); cmd.get_cmd_line_argument("reference-check", reference_check, true); cmd.get_cmd_line_argument("causal", causal, true); randomize_problems(); } void randomize_problems() { int problem_count = head_number * batch_size; problem_sizes0.reserve(problem_count); problem_sizes1.reserve(problem_count); // When using mask, the original inputs are not padded // and we need to save these info. if (use_mask) { problem_sizes0_real.reserve(problem_count); problem_sizes1_real.reserve(problem_count); } for (int i = 0; i < batch_size; ++i) { // problems belonging to the same batch share the same seq len int m_real = seq_length; int mkv_real = seq_length_kv; int m = (m_real + alignment - 1) / alignment * alignment; int mkv = (mkv_real + alignment - 1) / alignment * alignment; int k0 = head_size; int k1 = head_size_v; for (int j = 0; j < head_number; ++j) { cutlass::gemm::GemmCoord problem0(m, mkv, k0); cutlass::gemm::GemmCoord problem1(m, k1, mkv); problem_sizes0.push_back(problem0); problem_sizes1.push_back(problem1); if (use_mask) { cutlass::gemm::GemmCoord problem0_real(m_real, mkv_real, k0); cutlass::gemm::GemmCoord problem1_real(m_real, k1, mkv_real); problem_sizes0_real.push_back(problem0_real); problem_sizes1_real.push_back(problem1_real); } } } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "41_fused_multi_head_attention_fixed_seqlen\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --head_number=<int> Head number in multi-head attention (default: --head_number=12)\n" << " --batch_size=<int> Batch size in multi-head attention (default: --batch_size=16)\n" << " --head_size=<int> Head size in multi-head attention (default: --head_size=64)\n" << " --head_size_v=<int> Head size in multi-head attention for V (default: --head_size_v=head_size)\n" << " --seq_length=<int> Sequence length in multi-head attention for Q (default: --seq_length=1024)\n" << " --seq_length_kv=<int> Sequence length in multi-head attention for K/V (default: --seq_length_kv=seq_length)\n" << " --use_mask=<bool> If true, performs padding-like masking in softmax.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --reference-check=<bool> If true, performs reference check.\n" << " --causal=<bool> If true, uses causal masking.\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fops = int64_t(); for (int i = 0; i < problem_sizes0.size(); ++i) { auto const& problem0 = problem_sizes0[i]; auto const& problem1 = problem_sizes1[i]; for (int row = 0; row < problem0.m(); ++row) { int num_cols0 = problem0.n(); if (causal) { num_cols0 = std::min(row + 1, num_cols0); } // P <- Q . K_t fops += 2 * num_cols0 * problem0.k(); // P <- exp(P - max(P)) fops += 2 * num_cols0; // S <- sum(P) fops += num_cols0 - 1; // O <- P . V fops += 2 * num_cols0 * problem1.n(); // O <- O / S fops += num_cols0 * problem1.n(); } } return double(fops) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Attention> class TestbedAttention { public: // // Type definitions // using ElementQ = typename Attention::scalar_t; using ElementK = typename Attention::scalar_t; using ElementP = typename Attention::accum_t; using ElementAccumulator = typename Attention::accum_t; using ElementV = typename Attention::scalar_t; using ElementO = typename Attention::output_t; using ElementCompute = typename Attention::accum_t; using ElementNorm = typename Attention::accum_t; using ElementSum = typename Attention::accum_t; using ElementSoftmaxCompute = typename Attention::accum_t; using LayoutQ = cutlass::layout::RowMajor; using LayoutK = cutlass::layout::ColumnMajor; using LayoutP = cutlass::layout::RowMajor; using LayoutV = cutlass::layout::RowMajor; using LayoutO = cutlass::layout::RowMajor; using MatrixCoord = typename LayoutP::TensorCoord; private: // // Data members // Options & options; /// Initialization cutlass::Distribution::Kind init_Q; cutlass::Distribution::Kind init_K; cutlass::Distribution::Kind init_P; cutlass::Distribution::Kind init_V; cutlass::Distribution::Kind init_O; uint32_t seed; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device1; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0_real; std::vector<int64_t> offset_Q; std::vector<int64_t> offset_K; std::vector<int64_t> offset_P; std::vector<int64_t> offset_V; std::vector<int64_t> offset_O; std::vector<int64_t> ldq_host; std::vector<int64_t> ldk_host; std::vector<int64_t> ldp_host; std::vector<int64_t> ldv_host; std::vector<int64_t> ldo_host; std::vector<int64_t> seqlen_host; cutlass::DeviceAllocation<int64_t> ldq; cutlass::DeviceAllocation<int64_t> ldk; cutlass::DeviceAllocation<int64_t> ldp; cutlass::DeviceAllocation<int64_t> ldv; cutlass::DeviceAllocation<int64_t> ldo; cutlass::DeviceAllocation<int64_t> seqlen; cutlass::DeviceAllocation<ElementQ> block_Q; cutlass::DeviceAllocation<ElementK> block_K; cutlass::DeviceAllocation<ElementP> block_P; cutlass::DeviceAllocation<ElementV> block_V; cutlass::DeviceAllocation<ElementO> block_O; cutlass::DeviceAllocation<ElementNorm> block_Norm; cutlass::DeviceAllocation<ElementSum> block_Sum; cutlass::DeviceAllocation<int64_t> offset_P_Device; cutlass::DeviceAllocation<ElementQ *> ptr_Q; cutlass::DeviceAllocation<ElementK *> ptr_K; cutlass::DeviceAllocation<ElementP *> ptr_P; cutlass::DeviceAllocation<ElementV *> ptr_V; cutlass::DeviceAllocation<ElementO *> ptr_O; public: // // Methods // TestbedAttention( Options &options_, cutlass::Distribution::Kind init_Q_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_K_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_P_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_V_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_O_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): options(options_), init_Q(init_Q_), init_K(init_K_), init_P(init_P_), init_V(init_V_), init_O(init_O_), seed(seed_) { } int problem_count() const { return (options.head_number * options.batch_size); } private: /// Helper to initialize a tensor view template <typename Element> void initialize_tensor_( Element *ptr, size_t capacity, cutlass::Distribution::Kind dist_kind, uint32_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { Element scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<ElementP>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 8; scope_min = -8; } else { scope_max = 8; scope_min = -8; } cutlass::reference::device::BlockFillRandomUniform( ptr, capacity, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::device::BlockFillRandomGaussian( ptr, capacity, seed, Element(), Element(0.5f)); } else if (dist_kind == cutlass::Distribution::Sequential) { // Fill with increasing elements cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(1), Element()); } else { // Fill with all 1s cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(), Element(1)); } } /// Initializes data structures void initialize_() { // // Set scalors for the mha example // options.alpha0 = 1.0f / sqrt(float(options.head_size)); options.alpha1 = 1.0f; options.beta = 0; // // Choose random problem sizes // // construct a few problems of random sizes srand(seed); int64_t total_elements_Q = 0; int64_t total_elements_K = 0; int64_t total_elements_P = 0; int64_t total_elements_V = 0; int64_t total_elements_O = 0; ldq_host.resize(problem_count()); ldk_host.resize(problem_count()); ldp_host.resize(problem_count()); ldv_host.resize(problem_count()); ldo_host.resize(problem_count()); seqlen_host.resize(problem_count()); // Create tensors in BMHK format, where // B = batch_size // M = sequence length // H = num_heads // K = embedding size per head int64_t batch_offset_Q, batch_offset_K, batch_offset_V, batch_offset_O; for (int32_t b = 0; b < options.batch_size; ++b) { batch_offset_Q = total_elements_Q; batch_offset_K = total_elements_K; batch_offset_V = total_elements_V; batch_offset_O = total_elements_O; for (int32_t h = 0; h < options.head_number; ++h) { int32_t i = h + b * options.head_number; auto problem0 = options.problem_sizes0.at(i); auto problem1 = options.problem_sizes1.at(i); ldq_host.at(i) = LayoutQ::packed({problem0.m(), options.head_number * problem0.k()}).stride(0); ldk_host.at(i) = LayoutK::packed({options.head_number * problem0.k(), problem0.n()}).stride(0); ldp_host.at(i) = LayoutP::packed({problem0.m(), problem0.n()}).stride(0); ldv_host.at(i) = LayoutV::packed({problem1.k(), options.head_number * problem1.n()}).stride(0); ldo_host.at(i) = LayoutO::packed({problem1.m(), options.head_number * problem1.n()}).stride(0); // m = n for attention problems. seqlen_host.at(i) = problem0.m(); offset_Q.push_back(batch_offset_Q + h * problem0.k()); offset_K.push_back(batch_offset_K + h * problem0.k()); offset_P.push_back(total_elements_P); offset_V.push_back(batch_offset_V + h * problem0.k()); offset_O.push_back(batch_offset_O + h * problem1.n()); int64_t elements_Q = problem0.m() * problem0.k(); int64_t elements_K = problem0.k() * problem0.n(); int64_t elements_P = problem0.m() * problem0.n(); int64_t elements_V = problem1.k() * problem1.n(); int64_t elements_O = problem1.m() * problem1.n(); total_elements_Q += elements_Q; total_elements_K += elements_K; total_elements_P += elements_P; total_elements_V += elements_V; total_elements_O += elements_O; } } problem_sizes_device0.reset(problem_count()); problem_sizes_device1.reset(problem_count()); problem_sizes_device0.copy_from_host(options.problem_sizes0.data()); problem_sizes_device1.copy_from_host(options.problem_sizes1.data()); if (options.use_mask) { problem_sizes_device0_real.reset(problem_count()); problem_sizes_device0_real.copy_from_host(options.problem_sizes0_real.data()); } ldq.reset(problem_count()); ldk.reset(problem_count()); ldp.reset(problem_count()); ldv.reset(problem_count()); ldo.reset(problem_count()); seqlen.reset(problem_count()); ldq.copy_from_host(ldq_host.data()); ldk.copy_from_host(ldk_host.data()); ldp.copy_from_host(ldp_host.data()); ldv.copy_from_host(ldv_host.data()); ldo.copy_from_host(ldo_host.data()); seqlen.copy_from_host(seqlen_host.data()); // // Assign pointers // block_Q.reset(total_elements_Q); block_K.reset(total_elements_K); block_P.reset(total_elements_P); block_V.reset(total_elements_V); block_O.reset(total_elements_O); offset_P_Device.reset(problem_count()); // sync offset with device cutlass::device_memory::copy_to_device(offset_P_Device.get(), offset_P.data(), offset_P.size()); std::vector<ElementQ *> ptr_Q_host(problem_count()); std::vector<ElementK *> ptr_K_host(problem_count()); std::vector<ElementP *> ptr_P_host(problem_count()); std::vector<ElementV *> ptr_V_host(problem_count()); std::vector<ElementO *> ptr_O_host(problem_count()); std::vector<ElementNorm *> ptr_norm_host(problem_count()); std::vector<ElementSum *> ptr_sum_host(problem_count()); for (int32_t i = 0; i < problem_count(); ++i) { ptr_Q_host.at(i) = block_Q.get() + offset_Q.at(i); ptr_K_host.at(i) = block_K.get() + offset_K.at(i); ptr_P_host.at(i) = block_P.get() + offset_P.at(i); ptr_V_host.at(i) = block_V.get() + offset_V.at(i); ptr_O_host.at(i) = block_O.get() + offset_O.at(i); } ptr_Q.reset(problem_count()); ptr_Q.copy_from_host(ptr_Q_host.data()); ptr_K.reset(problem_count()); ptr_K.copy_from_host(ptr_K_host.data()); ptr_P.reset(problem_count()); ptr_P.copy_from_host(ptr_P_host.data()); ptr_V.reset(problem_count()); ptr_V.copy_from_host(ptr_V_host.data()); ptr_O.reset(problem_count()); ptr_O.copy_from_host(ptr_O_host.data()); // // Initialize the problems of the workspace // initialize_tensor_(block_Q.get(), total_elements_Q, init_Q, seed + 1); initialize_tensor_(block_K.get(), total_elements_K, init_K, seed + 2); initialize_tensor_(block_V.get(), total_elements_V, init_V, seed + 3); } template<typename Element> bool verify_tensor_(std::vector<Element> vector_Input, \ std::vector<Element> vector_Input_Ref, int64_t verify_length = -1) { int64_t size = (vector_Input.size() < vector_Input_Ref.size()) ? vector_Input.size() : vector_Input_Ref.size(); size = (verify_length == -1) ? size : verify_length; // 0.05 for absolute error float abs_tol = 5e-2f; // 10% for relative error float rel_tol = 1e-1f; for (int64_t i = 0; i < size; ++i) { float diff = (float)(vector_Input.at(i) - vector_Input_Ref.at(i)); float abs_diff = fabs(diff); float abs_ref = fabs((float)vector_Input_Ref.at(i) + 1e-5f); float relative_diff = abs_diff / abs_ref; if ( (isnan(vector_Input_Ref.at(i)) || isnan(abs_diff) || isinf(abs_diff)) || (abs_diff > abs_tol && relative_diff > rel_tol)) { printf("[%d/%d] diff = %f, rel_diff = %f, {computed=%f, ref=%f}.\n", int(i), int(size), abs_diff, relative_diff, (float)(vector_Input.at(i)), (float)(vector_Input_Ref.at(i))); return false; } } return true; } /// Verifies the result is a GEMM bool verify_() { bool passed = true; for (int32_t b = 0; b < options.batch_size; ++b) { int32_t i = b * options.head_number; // Problem size is the same for all heads cutlass::gemm::GemmCoord problem0 = options.problem_sizes0.at(b * options.head_number); cutlass::gemm::GemmCoord problem1 = options.problem_sizes1.at(b * options.head_number); MatrixCoord extent_Q{problem0.m(), problem0.k()}; MatrixCoord extent_K{problem0.k(), problem0.n()}; MatrixCoord extent_P{problem0.m(), problem0.n()}; MatrixCoord extent_V{problem1.k(), problem1.n()}; MatrixCoord extent_O{problem1.m(), problem1.n()}; LayoutO layout_O(ldo_host.at(i)); std::vector<ElementO> matrix_O(layout_O.capacity(extent_O)); cutlass::device_memory::copy_to_host(matrix_O.data(), block_O.get() + offset_O.at(i), matrix_O.size()); cutlass::DeviceAllocation<ElementO> block_Ref_O(layout_O.capacity(extent_O)); for (int32_t h = 0; h < options.head_number; ++h) { i = h + b * options.head_number; LayoutQ layout_Q(ldq_host.at(i)); LayoutK layout_K(ldk_host.at(i)); LayoutP layout_P(ldp_host.at(i)); LayoutV layout_V(ldv_host.at(i)); cutlass::TensorView<ElementQ, LayoutQ> view_Q(block_Q.get() + offset_Q.at(i), layout_Q, extent_Q); cutlass::TensorView<ElementK, LayoutK> view_K(block_K.get() + offset_K.at(i), layout_K, extent_K); cutlass::TensorView<ElementV, LayoutV> view_V(block_V.get() + offset_V.at(i), layout_V, extent_V); cutlass::TensorView<ElementO, LayoutO> view_Ref_O_device(block_Ref_O.get() + offset_O.at(i) - offset_O.at(b * options.head_number), layout_O, extent_O); cutlass::DeviceAllocation<ElementP> block_Ref_P(layout_P.capacity(extent_P)); cutlass::TensorView<ElementP, LayoutP> view_Ref_P_device(block_Ref_P.get(), layout_P, extent_P); // Reference GEMM cutlass::reference::device::GemmComplex< ElementQ, LayoutQ, ElementK, LayoutK, ElementP, LayoutP, ElementCompute, ElementAccumulator >( problem0, ElementAccumulator(options.alpha0), view_Q, Attention::MM0::Mma::kTransformA, view_K, Attention::MM0::Mma::kTransformB, ElementAccumulator(options.beta), view_Ref_P_device, view_Ref_P_device, ElementAccumulator(0) ); // Compute softmax for P. We need to explicitly compute softmax // over P because softmax is fused to the second GEMM in the // profiled implementation. std::vector<ElementP> matrix_Ref(layout_P.capacity(extent_P)); cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref_P.get(), matrix_Ref.size()); cutlass::TensorView<ElementP, LayoutP> view_Ref_host(matrix_Ref.data(), layout_P, extent_P); std::vector<ElementNorm> vector_Norm_Ref(problem0.m()); std::vector<ElementSum> vector_Sum_Ref(problem0.m()); int n_dim = options.use_mask ? options.problem_sizes0_real.at(i).n() : problem0.n(); // Compute softmax for reference matrix for (int m = 0; m < problem0.m(); m++) { int n_dim_row = n_dim; if (options.causal) { n_dim_row = std::min(m + 1, n_dim); } ElementSoftmaxCompute max = ElementSoftmaxCompute(view_Ref_host.ref().at({m, 0})); for (int n = 1; n < n_dim_row; n++) { max = std::max(max, ElementSoftmaxCompute(view_Ref_host.ref().at({m, n}))); } vector_Norm_Ref.at(m) = ElementNorm(max); ElementSoftmaxCompute sum = ElementSoftmaxCompute(); for (int n = 0; n < n_dim_row; n++) { sum += std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ); } ElementSoftmaxCompute inv_sum = ElementSoftmaxCompute(1.0f / sum); vector_Sum_Ref.at(m) = ElementSum(inv_sum); for (int n = 0; n < n_dim_row; n++) { view_Ref_host.ref().at({m, n}) = ElementP( std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ) * inv_sum ); } // Mask out the rest of the attention matrix for (int n = n_dim_row; n < n_dim; ++n) { view_Ref_host.ref().at({m, n}) = ElementP(0); } } // when not using mask, problem_real and problem share the same sizes if (options.use_mask) { for (int m = 0; m < problem0.m(); m++) { for (int n = n_dim; n < problem0.n(); n++) { view_Ref_host.ref().at({m, n}) = ElementP(0); } } } cutlass::device_memory::copy_to_device(block_Ref_P.get(), matrix_Ref.data(), matrix_Ref.size()); // Reference GEMM cutlass::reference::device::GemmComplex< ElementP, LayoutP, ElementV, LayoutV, ElementO, LayoutO, ElementCompute, ElementAccumulator >( problem1, ElementAccumulator(options.alpha1), view_Ref_P_device, Attention::MM0::Mma::kTransformA, view_V, Attention::MM0::Mma::kTransformB, ElementAccumulator(options.beta), view_Ref_O_device, view_Ref_O_device, ElementAccumulator(0) ); } // Copy to host memory std::vector<ElementO> matrix_Ref_O(layout_O.capacity(extent_O)); cutlass::device_memory::copy_to_host(matrix_Ref_O.data(), block_Ref_O.get(), matrix_Ref_O.size()); // printf("Pb %d: \n Q=(offset=%d, ldq=%d)\n K=(offset=%d, ldk=%d)\n O=(offset=%d, ldo=%d)\n", // int(i), int(offset_Q[i]), int(ldq_host[i]), int(offset_K[i]), int(ldk_host[i]), int(offset_O[i]), int(ldo_host[i])); bool verified_O = false; if (!verified_O) { verified_O = verify_tensor_<ElementO>(matrix_O, matrix_Ref_O); } passed = passed && verified_O; if (!passed) { std::cerr << "\n***\nError - problem " << i << " (batch " << b << ") failed the QA check\n***\n" << std::endl; if (!verified_O) { std::cout << "Final matrix output is incorrect" << std::endl; } return passed; } } return passed; } public: /// Executes a CUTLASS Attention kernel and measures runtime. Result profile() { Result result; result.passed = false; // Initialize the problem initialize_(); typename Attention::Params p; { // set parameters p.query_ptr = block_Q.get(); p.key_ptr = block_K.get(); p.value_ptr = block_V.get(); p.logsumexp_ptr = nullptr; // Only needed for bw p.output_accum_ptr = nullptr; if (Attention::kNeedsOutputAccumulatorBuffer) { cudaMalloc(&p.output_accum_ptr, block_O.size() * sizeof(typename Attention::output_accum_t)); } p.output_ptr = block_O.get(); // TODO: support arbitrary seq lengths // if (cu_seqlens_q.has_value()) { // p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr(); // p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr(); // } p.scale = options.alpha0; p.num_heads = options.head_number; p.num_batches = options.batch_size; p.head_dim = options.head_size; p.head_dim_value = options.head_size_v; p.num_queries = options.seq_length; p.num_keys = options.seq_length_kv; if (options.causal) { p.custom_mask_type = Attention::CausalFromTopLeft; } // All tensors are in BMHK shapes p.q_strideH = options.head_size; p.k_strideH = options.head_size; p.v_strideH = options.head_size_v; p.q_strideM = int32_t(ldq_host[0]); p.k_strideM = int32_t(ldk_host[0]); p.v_strideM = int32_t(ldv_host[0]); p.q_strideB = p.q_strideM * options.seq_length; p.k_strideB = p.k_strideM * options.seq_length_kv; p.v_strideB = p.v_strideM * options.seq_length_kv; p.o_strideM = p.head_dim_value * p.num_heads; } // launch kernel :) constexpr auto kernel_fn = attention_kernel_batched_impl<Attention>; int smem_bytes = sizeof(typename Attention::SharedStorage); if (smem_bytes > 0xc000) { cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes); } if (!Attention::check_supported(p)) { std::cerr << "Kernel does not support these inputs" << std::endl; return result; } kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p); // Wait for completion result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // // Verify correctness // result.passed = true; if (options.reference_check) { result.passed = verify_(); } // // Warm-up run // kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to run CUTLASS Attention kernel." << std::endl; return result; } // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return -1; } } // Record an event at the start of a series of GEMM operations result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p); } // // Stop profiling loop // // Record an event when the GEMM operations have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Compute average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // // Cleanup // for (auto event : events) { (void)cudaEventDestroy(event); } std::cout << std::endl; std::cout << "CUTLASS Attention:\n" << "====================================================" << std::endl; std::cout << " " << " {seq length Q, seq length KV, head size, head size V, head number, batch size} = {" << options.seq_length \ << ", " << options.seq_length_kv << ", " << options.head_size << ", " << options.head_size_v << ", " << options.head_number\ << ", " << options.batch_size << "}." << std::endl; std::cout << std::endl; std::cout << " " << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout << " " << "GFLOPs: " << result.gflops << std::endl; return result; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template < int kQueriesPerBlock, int kKeysPerBlock, int kMaxK > int run_attention(Options& options) { using Attention = AttentionKernel< cutlass::half_t, // scalar_t cutlass::arch::Sm80, // ArchTag true, // Memory is aligned kQueriesPerBlock, kKeysPerBlock, kMaxK, false, // Supports dropout false // Supports bias >; // // Test and profile // TestbedAttention<Attention> testbed(options); Result result = testbed.profile(); if (!result.passed) { std::cout << "Profiling CUTLASS attention has failed.\n"; std::cout << "\nFailed\n"; return -1; } std::cout << "\nPassed\n"; return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { // // This example uses mma.sync to directly access Tensor Cores to achieve peak performance. // cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) { // // This example requires an NVIDIA Ampere-architecture GPU. // std::cout << "CUTLASS's CUTLASS Attention example requires a GPU of NVIDIA's Ampere Architecture or " << "later (compute capability 80 or greater).\n"; return 0; } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.error) { std::cerr << "Aborting execution." << std::endl; return -1; } if (options.use_mask) { std::cerr << "--use_mask is not supported at the moment\n"; return -2; } if (options.alignment != 1) { std::cerr << "--alignment=1 is the only supported value\n"; return -2; } // Determine kernel configuration based on head size. // If head size is less than or equal to 64, each block operates over 64 queries and // 64 keys, and partial results can be stored in the register file. // If head size is greater than 64, each block operates over 32 queries and 128 keys, // and partial results are stored in shared memory. if (options.head_size_v > 64) { static int const kQueriesPerBlock = 32; static int const kKeysPerBlock = 128; if (options.head_size_v <= 128) { return run_attention<kQueriesPerBlock, kKeysPerBlock, 128>(options); } else { return run_attention<kQueriesPerBlock, kKeysPerBlock, 65536>(options); } } else { static constexpr int kMaxK = 64; // <- Decrease to 32/16 if your problem is smaller static int const kQueriesPerBlock = 64; static int const kKeysPerBlock = 64; return run_attention<kQueriesPerBlock, kKeysPerBlock, kMaxK>(options); } } /////////////////////////////////////////////////////////////////////////////////////////////////
08573b291346052a8970fa628d76308d7aa996bf.hip
// !!! This is a file automatically generated by hipify!!! #include <wb.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #define MASK_WIDTH 5 #define Mask_radius (MASK_WIDTH/2) #define O_TILE_WIDTH 16 #define BLOCK_WIDTH (O_TILE_WIDTH + MASK_WIDTH - 1) #define clamp(x) (min(max((x), 0.0), 1.0)) //@@ INSERT CODE HERE //implement the tiled 2D convolution kernel with adjustments for channels //use shared memory to reduce the number of global accesses, handle the boundary conditions in when loading input list elements into the shared memory __global__ void convolution2D(float* I, const float* __restrict__ M, float* P, int channels, int width, int height) { __shared__ float N_ds[BLOCK_WIDTH][BLOCK_WIDTH]; int outputRow = blockIdx.y * O_TILE_WIDTH + threadIdx.y; int outputCol = blockIdx.x * O_TILE_WIDTH + threadIdx.x; int inputRow = outputRow - (MASK_WIDTH / 2); int inputCol = outputCol - (MASK_WIDTH / 2); for (int i = 0; i < channels; i++) { if ((inputRow >= 0) && (inputRow < height) && (inputCol >= 0) && (inputCol < width)) { N_ds[threadIdx.y][threadIdx.x] = I[(inputRow * width + inputCol) * channels + i]; } else { N_ds[threadIdx.y][threadIdx.x] = 0.0f; } __syncthreads(); float output = 0.0f; if (threadIdx.x < O_TILE_WIDTH && threadIdx.y < O_TILE_WIDTH) { for (int k = 0; k < MASK_WIDTH; k++) { for (int j = 0; j < MASK_WIDTH; j++) { output += M[k * MASK_WIDTH + j] * N_ds[threadIdx.y + k][threadIdx.x + j]; } } } if (threadIdx.x < O_TILE_WIDTH && threadIdx.y < O_TILE_WIDTH) { if (outputRow < height && outputCol < width) { P[(outputRow * width + outputCol) * channels + i] = clamp(output); assert(P[(outputRow * width + outputCol) * channels + i] >= 0 && P[(outputRow * width + outputCol) * channels + i] <= 1); } } __syncthreads(); } } int main(int argc, char *argv[]) { wbArg_t arg; int maskRows; int maskColumns; int imageChannels; int imageWidth; int imageHeight; char *inputImageFile; char *inputMaskFile; wbImage_t inputImage; wbImage_t outputImage; float *hostInputImageData; float *hostOutputImageData; float *hostMaskData; float *deviceInputImageData; float *deviceOutputImageData; float *deviceMaskData; arg = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(arg, 0); inputMaskFile = wbArg_getInputFile(arg, 1); inputImage = wbImport(inputImageFile); hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns); assert(maskRows == MASK_WIDTH); /* mask height is fixed to 5 */ assert(maskColumns == MASK_WIDTH); /* mask width is fixed to 5 */ imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); //@@ INSERT CODE HERE //allocate device memory hipMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **)&deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **)&deviceMaskData, maskRows * maskColumns * sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); //@@ INSERT CODE HERE //copy host memory to device hipMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceMaskData, hostMaskData, maskRows * maskColumns * sizeof(float), hipMemcpyHostToDevice); wbTime_stop(Copy, "Copying data to the GPU"); wbTime_start(Compute, "Doing the computation on the GPU"); //@@ INSERT CODE HERE //initialize thread block and kernel grid dimensions //invoke CUDA kernel dim3 blockDim(BLOCK_WIDTH, BLOCK_WIDTH); wbLog(TRACE, "blockDim is ", BLOCK_WIDTH, ",", BLOCK_WIDTH); wbLog(TRACE, "Image is ", imageWidth, " x ", imageHeight); int gridWidth = (imageWidth - 1) / O_TILE_WIDTH + 1; // Ceiling int gridHeight = (imageHeight - 1) / O_TILE_WIDTH + 1; // Ceiling dim3 gridDim(gridWidth, gridHeight); wbLog(TRACE, "gridDim is ", gridWidth, ",", gridHeight); convolution2D << <gridDim, blockDim >> >(deviceInputImageData, deviceMaskData, deviceOutputImageData, imageChannels, imageWidth, imageHeight); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); //@@ INSERT CODE HERE //copy results from device to host hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(arg, outputImage); //@@ INSERT CODE HERE //deallocate device memory hipFree(deviceInputImageData); hipFree(deviceOutputImageData); hipFree(deviceMaskData); free(hostMaskData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
08573b291346052a8970fa628d76308d7aa996bf.cu
#include <wb.h> #include <cuda_runtime.h> #include <cuda.h> #include "device_launch_parameters.h" #define MASK_WIDTH 5 #define Mask_radius (MASK_WIDTH/2) #define O_TILE_WIDTH 16 #define BLOCK_WIDTH (O_TILE_WIDTH + MASK_WIDTH - 1) #define clamp(x) (min(max((x), 0.0), 1.0)) //@@ INSERT CODE HERE //implement the tiled 2D convolution kernel with adjustments for channels //use shared memory to reduce the number of global accesses, handle the boundary conditions in when loading input list elements into the shared memory __global__ void convolution2D(float* I, const float* __restrict__ M, float* P, int channels, int width, int height) { __shared__ float N_ds[BLOCK_WIDTH][BLOCK_WIDTH]; int outputRow = blockIdx.y * O_TILE_WIDTH + threadIdx.y; int outputCol = blockIdx.x * O_TILE_WIDTH + threadIdx.x; int inputRow = outputRow - (MASK_WIDTH / 2); int inputCol = outputCol - (MASK_WIDTH / 2); for (int i = 0; i < channels; i++) { if ((inputRow >= 0) && (inputRow < height) && (inputCol >= 0) && (inputCol < width)) { N_ds[threadIdx.y][threadIdx.x] = I[(inputRow * width + inputCol) * channels + i]; } else { N_ds[threadIdx.y][threadIdx.x] = 0.0f; } __syncthreads(); float output = 0.0f; if (threadIdx.x < O_TILE_WIDTH && threadIdx.y < O_TILE_WIDTH) { for (int k = 0; k < MASK_WIDTH; k++) { for (int j = 0; j < MASK_WIDTH; j++) { output += M[k * MASK_WIDTH + j] * N_ds[threadIdx.y + k][threadIdx.x + j]; } } } if (threadIdx.x < O_TILE_WIDTH && threadIdx.y < O_TILE_WIDTH) { if (outputRow < height && outputCol < width) { P[(outputRow * width + outputCol) * channels + i] = clamp(output); assert(P[(outputRow * width + outputCol) * channels + i] >= 0 && P[(outputRow * width + outputCol) * channels + i] <= 1); } } __syncthreads(); } } int main(int argc, char *argv[]) { wbArg_t arg; int maskRows; int maskColumns; int imageChannels; int imageWidth; int imageHeight; char *inputImageFile; char *inputMaskFile; wbImage_t inputImage; wbImage_t outputImage; float *hostInputImageData; float *hostOutputImageData; float *hostMaskData; float *deviceInputImageData; float *deviceOutputImageData; float *deviceMaskData; arg = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(arg, 0); inputMaskFile = wbArg_getInputFile(arg, 1); inputImage = wbImport(inputImageFile); hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns); assert(maskRows == MASK_WIDTH); /* mask height is fixed to 5 */ assert(maskColumns == MASK_WIDTH); /* mask width is fixed to 5 */ imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); //@@ INSERT CODE HERE //allocate device memory cudaMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **)&deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **)&deviceMaskData, maskRows * maskColumns * sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); //@@ INSERT CODE HERE //copy host memory to device cudaMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceMaskData, hostMaskData, maskRows * maskColumns * sizeof(float), cudaMemcpyHostToDevice); wbTime_stop(Copy, "Copying data to the GPU"); wbTime_start(Compute, "Doing the computation on the GPU"); //@@ INSERT CODE HERE //initialize thread block and kernel grid dimensions //invoke CUDA kernel dim3 blockDim(BLOCK_WIDTH, BLOCK_WIDTH); wbLog(TRACE, "blockDim is ", BLOCK_WIDTH, ",", BLOCK_WIDTH); wbLog(TRACE, "Image is ", imageWidth, " x ", imageHeight); int gridWidth = (imageWidth - 1) / O_TILE_WIDTH + 1; // Ceiling int gridHeight = (imageHeight - 1) / O_TILE_WIDTH + 1; // Ceiling dim3 gridDim(gridWidth, gridHeight); wbLog(TRACE, "gridDim is ", gridWidth, ",", gridHeight); convolution2D << <gridDim, blockDim >> >(deviceInputImageData, deviceMaskData, deviceOutputImageData, imageChannels, imageWidth, imageHeight); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); //@@ INSERT CODE HERE //copy results from device to host cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(arg, outputImage); //@@ INSERT CODE HERE //deallocate device memory cudaFree(deviceInputImageData); cudaFree(deviceOutputImageData); cudaFree(deviceMaskData); free(hostMaskData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
85f17fcd04fbec12eea432acb84d596d56e11638.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <glm/gtc/matrix_inverse.hpp> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" using namespace std; #define ERRORCHECK 1 #define CACHE_FIRST_BOUNCE 0 #define SORT_MATERIAL 0 #define ANTIALIASING 1 #define DOF 0 #define MSI 0 #define DIRECT_LIGHTING 0 #define STREAM_COMPACTION 0 #define STRATIFIED 0 #define TIMER 0 #define MOTION_BLUR 0 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Geom * dev_lights = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc // ... static ShadeableIntersection * dev_first_intersections = NULL; void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_lights, scene->lights.size() * sizeof(Geom)); hipMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need hipMalloc(&dev_first_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_first_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_paths); hipFree(dev_geoms); hipFree(dev_lights); hipFree(dev_materials); hipFree(dev_intersections); // TODO: clean up any extra device memory you created hipFree(dev_first_intersections); checkCUDAError("pathtraceFree"); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); thrust::default_random_engine rng = makeSeededRandomEngine(iter, x, y); #if ANTIALIASING // TODO: implement antialiasing by jittering the ray thrust::uniform_real_distribution<float> uAA(-0.5, 0.5); segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + uAA(rng)) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + uAA(rng)) ); #else segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) ); #endif #if DOF thrust::uniform_real_distribution<float> u01(0, 1); glm::vec2 sample = glm::vec2(u01(rng), u01(rng)); glm::vec2 pLens = cam.lensRadius * squareToDiskConcentric(sample); float ft = glm::abs(cam.focalDistance / segment.ray.direction.z); glm::vec3 pFocus = segment.ray.origin + segment.ray.direction * ft; segment.ray.origin += pLens.x * cam.right + pLens.y * cam.up; segment.ray.direction = glm::normalize(pFocus - segment.ray.origin); #else #endif segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } __device__ void rayIntersections( Ray * ray , Geom * geoms , int geoms_size , float & t_min , int & hit_geom_index ) { float t; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, *ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, *ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { hit_geom_index = i; t_min = t; } } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , int geoms_size , ShadeableIntersection * intersections ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; intersections[path_index].point = intersect_point; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial( int iter , int num_paths , int num_lights , int num_geoms , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials , Geom * lights , Geom * geoms ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths && pathSegments[idx].remainingBounces) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces); #if STRATIFIED thrust::default_random_engine rng2 = makeSeededRandomEngine(iter, idx, -pathSegments[idx].remainingBounces); thrust::uniform_real_distribution<float> u01(0, 1); const int sqrtVal = 64; //const int sqrtVal = (int)(std::sqrt((float)hst_scene->state.iterations) + 0.5); int y = iter / sqrtVal; int x = iter % sqrtVal; glm::vec2 sample = glm::vec2(u01(rng), u01(rng2)); //glm::vec2 sample = glm::vec2((x+ u01(rng)) / (sqrtVal*1.f), (y+ u01(rng2)) / (sqrtVal*1.f)); //glm::vec2 sample = glm::vec2((x+0.5 ) / (sqrtVal*1.f), (y+0.5 ) / (sqrtVal*1.f)); //std::cout << "itr" << iter << "sample x" << sample.x << "sample y" << sample.y << std::endl; scatterRayStratified(pathSegments[idx], intersection.point, intersection.surfaceNormal, material, sample); #else scatterRay(pathSegments[idx], intersection.point, intersection.surfaceNormal, material, rng); pathSegments[idx].remainingBounces--; if (pathSegments[idx].remainingBounces == 0) { pathSegments[idx].color = glm::vec3(0.0f); } #if DIRECT_LIGHTING //if (pathSegments[idx].remainingBounces == 0) //{ //pathSegments[idx].color = glm::vec3(0.f, 0.f, 0.f); thrust::uniform_int_distribution<float> u0l(0, num_lights); int lightidx = u0l(rng); Geom light = lights[lightidx]; Material materiallight = materials[light.materialid]; Ray ray; ray.origin = intersection.point + intersection.surfaceNormal * EPSILON; glm::vec3 pointonlight = sampleonlight(light, rng); ray.direction = glm::normalize(pointonlight - ray.origin); float t_min = FLT_MAX; int hit_geom_index = -1; rayIntersections(&ray, geoms, num_geoms, t_min, hit_geom_index); /*if (hit_geom_index != -1) { if (geoms[hit_geom_index].materialid == light.materialid) { pathSegments[idx].color *= (materiallight.color * materiallight.emittance); } }*/ if (abs(t_min - glm::length(pointonlight - ray.origin)) < 1e-3f) { pathSegments[idx].color *= (material.color * materiallight.color * materiallight.emittance); //pathSegments[idx].color = glm::vec3(0.f,0.f,1.f); } else { //pathSegments[idx].color = glm::vec3(0.f, 1.f, 0.f); //pathSegments[idx].color == glm::vec3(0.0f); } pathSegments[idx].remainingBounces--; //} #endif #endif #if MSI float pdf_direct = 0.f, pdf_scattering = 0.f, weight; glm::vec3 wo, wi; wo = -pathSegments[idx].ray.direction; if (material.hasReflective || material.hasRefractive) { pathSegments[idx].specular = true; } //direct lighting //choose a random light thrust::uniform_int_distribution<float> u0l(0, num_lights); int lightidx = u0l(rng); Geom light = lights[lightidx]; Material materiallight = materials[light.materialid]; Ray ray; ray.origin = intersection.point + intersection.surfaceNormal * EPSILON; #endif } } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; if (!iterationPath.remainingBounces)//gather those whose remainingBounces == 0 { image[iterationPath.pixelIndex] += iterationPath.color; } } } struct is_dead { __host__ __device__ bool operator()(const PathSegment& pathsegment) { return (pathsegment.remainingBounces == 0); } }; struct compare_material { __host__ __device__ bool operator()(const ShadeableIntersection& a, const ShadeableIntersection& b) { return (b.materialId > a.materialId); } }; /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const int num_lights = hst_scene->lights.size(); // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing #if MOTION_BLUR float step = 1 / (hst_scene->state.iterations*1.f); for (int i = 0; i < hst_scene->geoms.size(); i++) { if (hst_scene->geoms[i].motion) { hst_scene->geoms[i].translation += hst_scene->geoms[i].translate * step; hst_scene->geoms[i].rotation += hst_scene->geoms[i].rotate * step; hst_scene->geoms[i].transform = utilityCore::buildTransformationMatrix(hst_scene->geoms[i].translation, hst_scene->geoms[i].rotation, hst_scene->geoms[i].scale); hst_scene->geoms[i].inverseTransform = glm::inverse(hst_scene->geoms[i].transform); hst_scene->geoms[i].invTranspose = glm::inverseTranspose(hst_scene->geoms[i].transform); } } hipMemcpy(dev_geoms, &(hst_scene->geoms)[0], hst_scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); #endif hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; bool iterationComplete = false; while (!iterationComplete) { hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; #if CACHE_FIRST_BOUNCE if (depth == 0 && iter == 1) { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections ); hipMemcpy(dev_first_intersections, dev_intersections, num_paths * sizeof(dev_first_intersections[0]), hipMemcpyDeviceToDevice); } else if (depth == 0 && iter != 1) { hipMemcpy(dev_intersections, dev_first_intersections, num_paths * sizeof(dev_first_intersections[0]), hipMemcpyDeviceToDevice); } else { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections ); } #else computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections ); #endif checkCUDAError("trace one bounce"); hipDeviceSynchronize(); depth++; // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. #if SORT_MATERIAL thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_material()); #endif #if TIMER hipEvent_t start, stop; if (iter == 1) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); } #endif hipLaunchKernelGGL(( shadeFakeMaterial), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, iter, num_paths, num_lights, hst_scene->geoms.size(), dev_intersections, dev_paths, dev_materials, dev_lights, dev_geoms ); #if TIMER if (iter == 1) { hipEventRecord(stop); hipEventSynchronize(stop); float prev_elapsed_time_cpu_milliseconds = 0; hipEventElapsedTime(&prev_elapsed_time_cpu_milliseconds, start, stop); //std::cout << "Elapsed time: " << prev_elapsed_time_cpu_milliseconds << "ms per iteration when depth = " << depth << std::endl; std::cout << prev_elapsed_time_cpu_milliseconds << std::endl; } #endif #if STREAM_COMPACTION dim3 numBlocksPixels = (num_paths + blockSize1d - 1) / blockSize1d; finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths); PathSegment* dev_path_end2 = thrust::remove_if(thrust::device, dev_paths, dev_paths + num_paths, is_dead()); num_paths = dev_path_end2 - dev_paths; iterationComplete = (!num_paths|| depth>traceDepth); // TODO: should be based off stream compaction results. #else iterationComplete = (depth > traceDepth); // TODO: should be based off stream compaction results. #endif } // Assemble this iteration and apply it to the image #if !STREAM_COMPACTION dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, num_paths, dev_image, dev_paths); #endif /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
85f17fcd04fbec12eea432acb84d596d56e11638.cu
#include <cstdio> #include <cuda.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <glm/gtc/matrix_inverse.hpp> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" using namespace std; #define ERRORCHECK 1 #define CACHE_FIRST_BOUNCE 0 #define SORT_MATERIAL 0 #define ANTIALIASING 1 #define DOF 0 #define MSI 0 #define DIRECT_LIGHTING 0 #define STREAM_COMPACTION 0 #define STRATIFIED 0 #define TIMER 0 #define MOTION_BLUR 0 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Geom * dev_lights = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc // ... static ShadeableIntersection * dev_first_intersections = NULL; void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_lights, scene->lights.size() * sizeof(Geom)); cudaMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need cudaMalloc(&dev_first_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_first_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_paths); cudaFree(dev_geoms); cudaFree(dev_lights); cudaFree(dev_materials); cudaFree(dev_intersections); // TODO: clean up any extra device memory you created cudaFree(dev_first_intersections); checkCUDAError("pathtraceFree"); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); thrust::default_random_engine rng = makeSeededRandomEngine(iter, x, y); #if ANTIALIASING // TODO: implement antialiasing by jittering the ray thrust::uniform_real_distribution<float> uAA(-0.5, 0.5); segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f + uAA(rng)) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f + uAA(rng)) ); #else segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) ); #endif #if DOF thrust::uniform_real_distribution<float> u01(0, 1); glm::vec2 sample = glm::vec2(u01(rng), u01(rng)); glm::vec2 pLens = cam.lensRadius * squareToDiskConcentric(sample); float ft = glm::abs(cam.focalDistance / segment.ray.direction.z); glm::vec3 pFocus = segment.ray.origin + segment.ray.direction * ft; segment.ray.origin += pLens.x * cam.right + pLens.y * cam.up; segment.ray.direction = glm::normalize(pFocus - segment.ray.origin); #else #endif segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } __device__ void rayIntersections( Ray * ray , Geom * geoms , int geoms_size , float & t_min , int & hit_geom_index ) { float t; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, *ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, *ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { hit_geom_index = i; t_min = t; } } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , int geoms_size , ShadeableIntersection * intersections ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; intersections[path_index].point = intersect_point; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial( int iter , int num_paths , int num_lights , int num_geoms , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials , Geom * lights , Geom * geoms ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths && pathSegments[idx].remainingBounces) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces); #if STRATIFIED thrust::default_random_engine rng2 = makeSeededRandomEngine(iter, idx, -pathSegments[idx].remainingBounces); thrust::uniform_real_distribution<float> u01(0, 1); const int sqrtVal = 64; //const int sqrtVal = (int)(std::sqrt((float)hst_scene->state.iterations) + 0.5); int y = iter / sqrtVal; int x = iter % sqrtVal; glm::vec2 sample = glm::vec2(u01(rng), u01(rng2)); //glm::vec2 sample = glm::vec2((x+ u01(rng)) / (sqrtVal*1.f), (y+ u01(rng2)) / (sqrtVal*1.f)); //glm::vec2 sample = glm::vec2((x+0.5 ) / (sqrtVal*1.f), (y+0.5 ) / (sqrtVal*1.f)); //std::cout << "itr" << iter << "sample x" << sample.x << "sample y" << sample.y << std::endl; scatterRayStratified(pathSegments[idx], intersection.point, intersection.surfaceNormal, material, sample); #else scatterRay(pathSegments[idx], intersection.point, intersection.surfaceNormal, material, rng); pathSegments[idx].remainingBounces--; if (pathSegments[idx].remainingBounces == 0) { pathSegments[idx].color = glm::vec3(0.0f); } #if DIRECT_LIGHTING //if (pathSegments[idx].remainingBounces == 0) //{ //pathSegments[idx].color = glm::vec3(0.f, 0.f, 0.f); thrust::uniform_int_distribution<float> u0l(0, num_lights); int lightidx = u0l(rng); Geom light = lights[lightidx]; Material materiallight = materials[light.materialid]; Ray ray; ray.origin = intersection.point + intersection.surfaceNormal * EPSILON; glm::vec3 pointonlight = sampleonlight(light, rng); ray.direction = glm::normalize(pointonlight - ray.origin); float t_min = FLT_MAX; int hit_geom_index = -1; rayIntersections(&ray, geoms, num_geoms, t_min, hit_geom_index); /*if (hit_geom_index != -1) { if (geoms[hit_geom_index].materialid == light.materialid) { pathSegments[idx].color *= (materiallight.color * materiallight.emittance); } }*/ if (abs(t_min - glm::length(pointonlight - ray.origin)) < 1e-3f) { pathSegments[idx].color *= (material.color * materiallight.color * materiallight.emittance); //pathSegments[idx].color = glm::vec3(0.f,0.f,1.f); } else { //pathSegments[idx].color = glm::vec3(0.f, 1.f, 0.f); //pathSegments[idx].color == glm::vec3(0.0f); } pathSegments[idx].remainingBounces--; //} #endif #endif #if MSI float pdf_direct = 0.f, pdf_scattering = 0.f, weight; glm::vec3 wo, wi; wo = -pathSegments[idx].ray.direction; if (material.hasReflective || material.hasRefractive) { pathSegments[idx].specular = true; } //direct lighting //choose a random light thrust::uniform_int_distribution<float> u0l(0, num_lights); int lightidx = u0l(rng); Geom light = lights[lightidx]; Material materiallight = materials[light.materialid]; Ray ray; ray.origin = intersection.point + intersection.surfaceNormal * EPSILON; #endif } } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; if (!iterationPath.remainingBounces)//gather those whose remainingBounces == 0 { image[iterationPath.pixelIndex] += iterationPath.color; } } } struct is_dead { __host__ __device__ bool operator()(const PathSegment& pathsegment) { return (pathsegment.remainingBounces == 0); } }; struct compare_material { __host__ __device__ bool operator()(const ShadeableIntersection& a, const ShadeableIntersection& b) { return (b.materialId > a.materialId); } }; /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const int num_lights = hst_scene->lights.size(); // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing #if MOTION_BLUR float step = 1 / (hst_scene->state.iterations*1.f); for (int i = 0; i < hst_scene->geoms.size(); i++) { if (hst_scene->geoms[i].motion) { hst_scene->geoms[i].translation += hst_scene->geoms[i].translate * step; hst_scene->geoms[i].rotation += hst_scene->geoms[i].rotate * step; hst_scene->geoms[i].transform = utilityCore::buildTransformationMatrix(hst_scene->geoms[i].translation, hst_scene->geoms[i].rotation, hst_scene->geoms[i].scale); hst_scene->geoms[i].inverseTransform = glm::inverse(hst_scene->geoms[i].transform); hst_scene->geoms[i].invTranspose = glm::inverseTranspose(hst_scene->geoms[i].transform); } } cudaMemcpy(dev_geoms, &(hst_scene->geoms)[0], hst_scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); #endif generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; bool iterationComplete = false; while (!iterationComplete) { cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; #if CACHE_FIRST_BOUNCE if (depth == 0 && iter == 1) { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections ); cudaMemcpy(dev_first_intersections, dev_intersections, num_paths * sizeof(dev_first_intersections[0]), cudaMemcpyDeviceToDevice); } else if (depth == 0 && iter != 1) { cudaMemcpy(dev_intersections, dev_first_intersections, num_paths * sizeof(dev_first_intersections[0]), cudaMemcpyDeviceToDevice); } else { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections ); } #else computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections ); #endif checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); depth++; // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. #if SORT_MATERIAL thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_material()); #endif #if TIMER cudaEvent_t start, stop; if (iter == 1) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); } #endif shadeFakeMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> ( iter, num_paths, num_lights, hst_scene->geoms.size(), dev_intersections, dev_paths, dev_materials, dev_lights, dev_geoms ); #if TIMER if (iter == 1) { cudaEventRecord(stop); cudaEventSynchronize(stop); float prev_elapsed_time_cpu_milliseconds = 0; cudaEventElapsedTime(&prev_elapsed_time_cpu_milliseconds, start, stop); //std::cout << "Elapsed time: " << prev_elapsed_time_cpu_milliseconds << "ms per iteration when depth = " << depth << std::endl; std::cout << prev_elapsed_time_cpu_milliseconds << std::endl; } #endif #if STREAM_COMPACTION dim3 numBlocksPixels = (num_paths + blockSize1d - 1) / blockSize1d; finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths); PathSegment* dev_path_end2 = thrust::remove_if(thrust::device, dev_paths, dev_paths + num_paths, is_dead()); num_paths = dev_path_end2 - dev_paths; iterationComplete = (!num_paths|| depth>traceDepth); // TODO: should be based off stream compaction results. #else iterationComplete = (depth > traceDepth); // TODO: should be based off stream compaction results. #endif } // Assemble this iteration and apply it to the image #if !STREAM_COMPACTION dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather<<<numBlocksPixels, blockSize1d>>>(num_paths, dev_image, dev_paths); #endif /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
57975fd5ad66fffb184a97126600c956098da7bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ void Temp( float *R, float *G, float *B, float Temp) { float r, g, b; if (Temp <= 66.0f){ r = 255.0f; } else { r = Temp - 60.0f; r = 329.698727446 * powf(r, -0.1332047592); if(r < 0.0f){r = 0.0f;} if(r > 255.0f){r = 255.0f;} } if (Temp <= 66.0f){ g = Temp; g = 99.4708025861 * log(g) - 161.1195681661; if(g < 0.0f){g = 0.0f;} if(g > 255.0f){g = 255.0f;} } else { g = Temp - 60.0f; g = 288.1221695283 * powf(g, -0.0755148492); if(g < 0.0f){g = 0.0f;} if(g > 255.0f){g = 255.0f;} } if(Temp >= 66.0f){ b = 255.0f; } else { if(Temp <= 19.0f){ b = 0.0f; } else { b = Temp - 10.0f; b = 138.5177312231 * log(b) - 305.0447927307; if(b < 0.0f){b = 0.0f;} if(b > 255.0f){b = 255.0f;} } } *R = r / 255.0f; *G = g / 255.0f; *B = b / 255.0f; } __global__ void TempReturn(float* p_Input, float* p_Temp, int p_Width, int p_Height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < p_Width) && (y < p_Height)) { const int index = (y * p_Width + x) * 4; p_Input[index + 2] = p_Temp[y * p_Width + x]; }}
57975fd5ad66fffb184a97126600c956098da7bc.cu
#include "includes.h" __device__ void Temp( float *R, float *G, float *B, float Temp) { float r, g, b; if (Temp <= 66.0f){ r = 255.0f; } else { r = Temp - 60.0f; r = 329.698727446 * powf(r, -0.1332047592); if(r < 0.0f){r = 0.0f;} if(r > 255.0f){r = 255.0f;} } if (Temp <= 66.0f){ g = Temp; g = 99.4708025861 * log(g) - 161.1195681661; if(g < 0.0f){g = 0.0f;} if(g > 255.0f){g = 255.0f;} } else { g = Temp - 60.0f; g = 288.1221695283 * powf(g, -0.0755148492); if(g < 0.0f){g = 0.0f;} if(g > 255.0f){g = 255.0f;} } if(Temp >= 66.0f){ b = 255.0f; } else { if(Temp <= 19.0f){ b = 0.0f; } else { b = Temp - 10.0f; b = 138.5177312231 * log(b) - 305.0447927307; if(b < 0.0f){b = 0.0f;} if(b > 255.0f){b = 255.0f;} } } *R = r / 255.0f; *G = g / 255.0f; *B = b / 255.0f; } __global__ void TempReturn(float* p_Input, float* p_Temp, int p_Width, int p_Height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < p_Width) && (y < p_Height)) { const int index = (y * p_Width + x) * 4; p_Input[index + 2] = p_Temp[y * p_Width + x]; }}
3586f2cca2ea666dd3f796d943823ed939499951.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 28-Jun-2012 11:01:03 // // user function __device__ #include "bres_calc.h" // CUDA kernel function __global__ void op_cuda_bres_calc( float *ind_arg0, float *ind_arg1, float *ind_arg2, float *ind_arg3, int *ind_map, short *arg_map, int *arg5, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg4_l[4]; __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ int *ind_arg1_map, ind_arg1_size; __shared__ int *ind_arg2_map, ind_arg2_size; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ float *ind_arg0_s; __shared__ float *ind_arg1_s; __shared__ float *ind_arg2_s; __shared__ float *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*4]; ind_arg1_size = ind_arg_sizes[1+blockId*4]; ind_arg2_size = ind_arg_sizes[2+blockId*4]; ind_arg3_size = ind_arg_sizes[3+blockId*4]; ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*4]; ind_arg1_map = &ind_map[2*set_size] + ind_arg_offs[1+blockId*4]; ind_arg2_map = &ind_map[3*set_size] + ind_arg_offs[2+blockId*4]; ind_arg3_map = &ind_map[4*set_size] + ind_arg_offs[3+blockId*4]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2); ind_arg1_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4); ind_arg2_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1); ind_arg3_s = (float *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x) ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4]; for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x) ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1]; for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3_s[n] = ZERO_float; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) { int col2 = -1; if (n<nelem) { // initialise local variables for (int d=0; d<4; d++) arg4_l[d] = ZERO_float; // user-supplied kernel call bres_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2, ind_arg0_s+arg_map[1*set_size+n+offset_b]*2, ind_arg1_s+arg_map[2*set_size+n+offset_b]*4, ind_arg2_s+arg_map[3*set_size+n+offset_b]*1, arg4_l, arg5+(n+offset_b)*1 ); col2 = colors[n+offset_b]; } // store local variables int arg4_map; if (col2>=0) { arg4_map = arg_map[4*set_size+n+offset_b]; } for (int col=0; col<ncolor; col++) { if (col2==col) { for (int d=0; d<4; d++) ind_arg3_s[d+arg4_map*4] += arg4_l[d]; } __syncthreads(); } } // apply pointered write/increment for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } // host stub function void op_par_loop_bres_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5 ){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; int ninds = 4; int inds[6] = {0,0,1,2,3,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: bres_calc\n"); } // get plan #ifdef OP_PART_SIZE_3 int part_size = OP_PART_SIZE_3; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timers_core(&cpu_t1, &wall_t1); if (set->size >0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args); #ifdef OP_BLOCK_SIZE_3 int nthread = OP_BLOCK_SIZE_3; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; hipLaunchKernelGGL(( op_cuda_bres_calc), dim3(nblocks),dim3(nthread),nshared, 0, (float *)arg0.data_d, (float *)arg2.data_d, (float *)arg3.data_d, (float *)arg4.data_d, Plan->ind_map, Plan->loc_map, (int *)arg5.data_d, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set_size); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_bres_calc execution failed\n"); } block_offset += Plan->ncolblk[col]; } op_timing_realloc(3); OP_kernels[3].transfer += Plan->transfer; OP_kernels[3].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); op_timing_realloc(3); OP_kernels[3].name = name; OP_kernels[3].count += 1; OP_kernels[3].time += wall_t2 - wall_t1; }
3586f2cca2ea666dd3f796d943823ed939499951.cu
// // auto-generated by op2.m on 28-Jun-2012 11:01:03 // // user function __device__ #include "bres_calc.h" // CUDA kernel function __global__ void op_cuda_bres_calc( float *ind_arg0, float *ind_arg1, float *ind_arg2, float *ind_arg3, int *ind_map, short *arg_map, int *arg5, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg4_l[4]; __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ int *ind_arg1_map, ind_arg1_size; __shared__ int *ind_arg2_map, ind_arg2_size; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ float *ind_arg0_s; __shared__ float *ind_arg1_s; __shared__ float *ind_arg2_s; __shared__ float *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*4]; ind_arg1_size = ind_arg_sizes[1+blockId*4]; ind_arg2_size = ind_arg_sizes[2+blockId*4]; ind_arg3_size = ind_arg_sizes[3+blockId*4]; ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*4]; ind_arg1_map = &ind_map[2*set_size] + ind_arg_offs[1+blockId*4]; ind_arg2_map = &ind_map[3*set_size] + ind_arg_offs[2+blockId*4]; ind_arg3_map = &ind_map[4*set_size] + ind_arg_offs[3+blockId*4]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2); ind_arg1_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4); ind_arg2_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1); ind_arg3_s = (float *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x) ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4]; for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x) ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1]; for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3_s[n] = ZERO_float; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) { int col2 = -1; if (n<nelem) { // initialise local variables for (int d=0; d<4; d++) arg4_l[d] = ZERO_float; // user-supplied kernel call bres_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2, ind_arg0_s+arg_map[1*set_size+n+offset_b]*2, ind_arg1_s+arg_map[2*set_size+n+offset_b]*4, ind_arg2_s+arg_map[3*set_size+n+offset_b]*1, arg4_l, arg5+(n+offset_b)*1 ); col2 = colors[n+offset_b]; } // store local variables int arg4_map; if (col2>=0) { arg4_map = arg_map[4*set_size+n+offset_b]; } for (int col=0; col<ncolor; col++) { if (col2==col) { for (int d=0; d<4; d++) ind_arg3_s[d+arg4_map*4] += arg4_l[d]; } __syncthreads(); } } // apply pointered write/increment for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } // host stub function void op_par_loop_bres_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5 ){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; int ninds = 4; int inds[6] = {0,0,1,2,3,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: bres_calc\n"); } // get plan #ifdef OP_PART_SIZE_3 int part_size = OP_PART_SIZE_3; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timers_core(&cpu_t1, &wall_t1); if (set->size >0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args); #ifdef OP_BLOCK_SIZE_3 int nthread = OP_BLOCK_SIZE_3; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; op_cuda_bres_calc<<<nblocks,nthread,nshared>>>( (float *)arg0.data_d, (float *)arg2.data_d, (float *)arg3.data_d, (float *)arg4.data_d, Plan->ind_map, Plan->loc_map, (int *)arg5.data_d, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set_size); cutilSafeCall(cudaThreadSynchronize()); cutilCheckMsg("op_cuda_bres_calc execution failed\n"); } block_offset += Plan->ncolblk[col]; } op_timing_realloc(3); OP_kernels[3].transfer += Plan->transfer; OP_kernels[3].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); op_timing_realloc(3); OP_kernels[3].name = name; OP_kernels[3].count += 1; OP_kernels[3].time += wall_t2 - wall_t1; }
b784908dee90d0b755df78a474de740573b015a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <CudaMathEngine.h> #include <CudaMathEngineDnnConvs.h> #include <CudaCommon.h> #include <CudaDevice.h> #include <MathEngineCommon.h> #include <MemoryHandleInternal.h> #include <Kernels/CudaDnnConvKernels.h> namespace NeoML { // Temporary matrix height static inline int tempMatrixHeight( const CCudaConvolutionDescInternal& desc ) { return desc.Source.ObjectCount() * desc.Result.Height() * desc.Result.Width(); } // Temporary matrix width static inline int tempMatrixWidth( const CCudaConvolutionDescInternal& desc ) { return desc.Filter.ObjectSize(); } CConvolutionDesc* CCudaMathEngine::InitBlobConvolution( const CBlobDesc& input, int paddingHeight, int paddingWidth, int strideHeight, int strideWidth, int dilationHeight, int dilationWidth, const CBlobDesc& filter, const CBlobDesc& output ) { int totalInputChannels = input.Channels() * input.Depth(); int totalOutputChannels = output.Channels() * output.Depth(); CCudaConvolutionDesc* desc = new CCudaConvolutionDesc(); desc->Internal.Source = input; desc->Internal.Filter = filter; desc->Internal.Result = output; desc->Internal.StrideHeight = strideHeight; desc->Internal.StrideWidth = strideWidth; desc->Internal.PaddingHeight = paddingHeight; desc->Internal.PaddingWidth = paddingWidth; desc->Internal.DilationHeight = dilationHeight; desc->Internal.DilationWidth = dilationWidth; return desc; } void CCudaMathEngine::BlobConvolution( const CConvolutionDesc& convDesc, const CConstFloatHandle& sourceData, const CConstFloatHandle& filterData, const CConstFloatHandle* freeTermData, const CFloatHandle& resultData ) { SetCudaDevice( device->DeviceNumber ); const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal; const CCudaBlobDesc& source = desc.Source; const CCudaBlobDesc& filter = desc.Filter; const CCudaBlobDesc& result = desc.Result; if( filter.Height() == 3 && filter.Width() == 3 && desc.StrideHeight == 1 && desc.StrideWidth == 1 && desc.DilationHeight == 1 && desc.DilationWidth == 1 && source.Channels() * source.Depth() < 16 ) { // Use a convolution kernel of size 3*3 with stride 1 dim3 blockCount; dim3 threadCount; int widthNorm = ( desc.Result.Width() + 7 ) / 8; getCudaTaskGrid3DMinZYX( 1, 1, 1024, blockCount, threadCount, result.ObjectCount() * result.Height(), widthNorm, filter.ObjectCount(), 512 ); hipLaunchKernelGGL(( Conv3x3s1d1Kernel1x8), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( sourceData ), GetRaw( filterData ), freeTermData == 0 ? 0 : GetRaw( *freeTermData ), GetRaw( resultData ), widthNorm ); return; } if( filter.Height() == 1 && filter.Width() == 1 && desc.StrideHeight == 1 && desc.StrideWidth == 1 && desc.PaddingHeight == 0 && desc.PaddingWidth == 0 ) { // The convolution is a matrix product anyway, without a temporary matrix if( freeTermData != 0 ) { // Fill the output matrix with the free term values SetVectorToMatrixRows( resultData, result.ObjectCount() * result.Height() * result.Width(), filter.ObjectCount(), *freeTermData ); multiplyMatrixByTransposedMatrixAndAdd( sourceData, source.ObjectCount() * result.Height() * result.Width(), filter.ObjectSize(), filter.ObjectSize(), filterData, filter.ObjectCount(), filter.ObjectSize(), resultData, filter.ObjectCount() ); } else { MultiplyMatrixByTransposedMatrix( sourceData, source.ObjectCount() * result.Height() * result.Width(), filter.ObjectSize(), filter.ObjectSize(), filterData, filter.ObjectCount(), filter.ObjectSize(), resultData, filter.ObjectCount(), result.BlobSize() ); } return; } const int tempMatrixWidth = filter.ObjectSize(); const int tempMatrixHeight = result.ObjectCount() * result.ObjectSize() / filter.ObjectCount(); const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( tempMatrixHeight, tempMatrixWidth ); CFloatHandleStackVar tempMatrix( mathEngine(), tempMatrixHeightBatchSize * tempMatrixWidth ); int tempMatrixHeightIndex = 0; while( tempMatrixHeightIndex < tempMatrixHeight ) { int curTempMatrixHeight = min( tempMatrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, source.Depth() * source.Channels() ); hipLaunchKernelGGL(( BuildTempMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( sourceData ), tempMatrixHeightIndex, curTempMatrixHeight, GetRaw( tempMatrix.GetHandle() ) ); MultiplyMatrixByTransposedMatrix( tempMatrix, curTempMatrixHeight, filter.ObjectSize(), filter.ObjectSize(), filterData, filter.ObjectCount(), filter.ObjectSize(), resultData + tempMatrixHeightIndex * filter.ObjectCount(), filter.ObjectCount(), curTempMatrixHeight * filter.ObjectCount() ); tempMatrixHeightIndex += curTempMatrixHeight; } if( freeTermData != 0 ) { // Fill the output with the free term values AddVectorToMatrixRows( 1, resultData, resultData, result.BlobSize() / filter.ObjectCount(), filter.ObjectCount(), *freeTermData ); } } void CCudaMathEngine::BlobConvolutionBackward( const CConvolutionDesc& convDesc, const CConstFloatHandle& outputDiff, const CConstFloatHandle& filter, const CConstFloatHandle* freeTerm, const CFloatHandle& inputDiff ) { SetCudaDevice( device->DeviceNumber ); const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal; const int filterCount = desc.Filter.ObjectCount(); const int filterObjectSize = desc.Filter.ObjectSize(); if( desc.Filter.Height() == 1 && desc.Filter.Width() == 1 && desc.StrideHeight == 1 && desc.StrideWidth == 1 && desc.PaddingHeight == 0 && desc.PaddingWidth == 0 ) { // The convolution backward pass is a matrix product without creating a temporary matrix MultiplyMatrixByMatrix( 1, outputDiff, desc.Result.BlobSize() / filterCount, filterCount, filter, filterObjectSize, inputDiff, desc.Source.BlobSize() ); if( freeTerm != 0 ) { AddVectorToMatrixRows( 1, inputDiff, inputDiff, desc.Source.ObjectCount() * desc.Source.Height() * desc.Source.Width(), desc.Source.Channels() * desc.Source.Depth(), *freeTerm ); } return; } if( freeTerm != 0 ) { // Fill the input gradients with the free terms SetVectorToMatrixRows( inputDiff, desc.Source.ObjectCount() * desc.Source.Height() * desc.Source.Width(), desc.Source.Channels() * desc.Source.Depth(), *freeTerm ); } else { VectorFill( inputDiff, 0.f, desc.Source.BlobSize() ); } TBackwardOperationType operation = BOT_AtomicAdd; if( ( desc.Filter.Width() - 1 ) * desc.DilationWidth + 1 <= desc.StrideWidth && ( desc.Filter.Height() - 1 ) * desc.DilationHeight + 1 <= desc.StrideHeight ) { // The filter areas do not intersect, so atomic operations are not needed operation = freeTerm == 0 ? BOT_Set : BOT_Add; } // Get the temporary matrix const int matrixHeight = tempMatrixHeight( desc ); const int matrixWidth = tempMatrixWidth( desc ); const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( matrixHeight, matrixWidth ); CFloatHandleStackVar tempMatrix( *this, tempMatrixHeightBatchSize * matrixWidth ); int tempMatrixHeightIndex = 0; while( tempMatrixHeightIndex < matrixHeight ) { int curTempMatrixHeight = min( matrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize ); MultiplyMatrixByMatrix( 1, outputDiff + tempMatrixHeightIndex * filterCount, curTempMatrixHeight, filterCount, filter, filterObjectSize, tempMatrix, tempMatrix.Size() ); // Get the input gradients from the temporary matrix data dim3 blockCount; dim3 threadCount; int widthNorm = ( matrixWidth + BuildInputFromTempMatrixCombine - 1 ) / BuildInputFromTempMatrixCombine; getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, widthNorm ); hipLaunchKernelGGL(( BuildInputFromTempMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( tempMatrix.GetHandle() ), curTempMatrixHeight, matrixWidth, GetRaw( inputDiff ), operation, widthNorm, tempMatrixHeightIndex ); tempMatrixHeightIndex += curTempMatrixHeight; } } void CCudaMathEngine::BlobConvolutionLearnAdd( const CConvolutionDesc& convDesc, const CConstFloatHandle& input, const CConstFloatHandle& outputDiff, const CFloatHandle& filterDiff, const CFloatHandle* freeTermDiff, bool isFreeTermDiffFromInput ) { SetCudaDevice( device->DeviceNumber ); const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal; if( freeTermDiff != 0 ) { // Get the free term gradient if( !isFreeTermDiffFromInput ) { SumMatrixRowsAdd( 1, *freeTermDiff, outputDiff, desc.Result.BlobSize() / desc.Filter.ObjectCount(), desc.Filter.ObjectCount() ); } else { SumMatrixRowsAdd( 1, *freeTermDiff, input, desc.Source.BlobSize() / desc.Source.Channels(), desc.Source.Channels() ); } } // Build the temporary matrix const int matrixHeight = tempMatrixHeight( desc ); const int matrixWidth = tempMatrixWidth( desc ); const int filterCount = desc.Filter.ObjectCount(); const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( matrixHeight, matrixWidth ); CFloatHandleStackVar tempMatrix( *this, tempMatrixHeightBatchSize * matrixWidth ); int tempMatrixHeightIndex = 0; while( tempMatrixHeightIndex < matrixHeight ) { int curTempMatrixHeight = min( matrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, desc.Source.Depth() * desc.Source.Channels() ); hipLaunchKernelGGL(( BuildTempMatrixKernel), dim3(blockCount), dim3(threadCount), 0, 0, desc, GetRaw( input ), tempMatrixHeightIndex, curTempMatrixHeight, GetRaw( tempMatrix.GetHandle() ) ); // Get the filter gradients by multiplying the temporary matrix and the output gradients MultiplyTransposedMatrixByMatrixAndAdd( outputDiff + tempMatrixHeightIndex * filterCount, curTempMatrixHeight, filterCount, filterCount, tempMatrix, matrixWidth, matrixWidth, filterDiff, matrixWidth, desc.Filter.BlobSize() ); tempMatrixHeightIndex += curTempMatrixHeight; } } } // namespace NeoML #endif // NEOML_USE_CUDA
b784908dee90d0b755df78a474de740573b015a0.cu
/* Copyright © 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <CudaMathEngine.h> #include <CudaMathEngineDnnConvs.h> #include <CudaCommon.h> #include <CudaDevice.h> #include <MathEngineCommon.h> #include <MemoryHandleInternal.h> #include <Kernels/CudaDnnConvKernels.h> namespace NeoML { // Temporary matrix height static inline int tempMatrixHeight( const CCudaConvolutionDescInternal& desc ) { return desc.Source.ObjectCount() * desc.Result.Height() * desc.Result.Width(); } // Temporary matrix width static inline int tempMatrixWidth( const CCudaConvolutionDescInternal& desc ) { return desc.Filter.ObjectSize(); } CConvolutionDesc* CCudaMathEngine::InitBlobConvolution( const CBlobDesc& input, int paddingHeight, int paddingWidth, int strideHeight, int strideWidth, int dilationHeight, int dilationWidth, const CBlobDesc& filter, const CBlobDesc& output ) { int totalInputChannels = input.Channels() * input.Depth(); int totalOutputChannels = output.Channels() * output.Depth(); CCudaConvolutionDesc* desc = new CCudaConvolutionDesc(); desc->Internal.Source = input; desc->Internal.Filter = filter; desc->Internal.Result = output; desc->Internal.StrideHeight = strideHeight; desc->Internal.StrideWidth = strideWidth; desc->Internal.PaddingHeight = paddingHeight; desc->Internal.PaddingWidth = paddingWidth; desc->Internal.DilationHeight = dilationHeight; desc->Internal.DilationWidth = dilationWidth; return desc; } void CCudaMathEngine::BlobConvolution( const CConvolutionDesc& convDesc, const CConstFloatHandle& sourceData, const CConstFloatHandle& filterData, const CConstFloatHandle* freeTermData, const CFloatHandle& resultData ) { SetCudaDevice( device->DeviceNumber ); const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal; const CCudaBlobDesc& source = desc.Source; const CCudaBlobDesc& filter = desc.Filter; const CCudaBlobDesc& result = desc.Result; if( filter.Height() == 3 && filter.Width() == 3 && desc.StrideHeight == 1 && desc.StrideWidth == 1 && desc.DilationHeight == 1 && desc.DilationWidth == 1 && source.Channels() * source.Depth() < 16 ) { // Use a convolution kernel of size 3*3 with stride 1 dim3 blockCount; dim3 threadCount; int widthNorm = ( desc.Result.Width() + 7 ) / 8; getCudaTaskGrid3DMinZYX( 1, 1, 1024, blockCount, threadCount, result.ObjectCount() * result.Height(), widthNorm, filter.ObjectCount(), 512 ); Conv3x3s1d1Kernel1x8<<<blockCount, threadCount>>>( desc, GetRaw( sourceData ), GetRaw( filterData ), freeTermData == 0 ? 0 : GetRaw( *freeTermData ), GetRaw( resultData ), widthNorm ); return; } if( filter.Height() == 1 && filter.Width() == 1 && desc.StrideHeight == 1 && desc.StrideWidth == 1 && desc.PaddingHeight == 0 && desc.PaddingWidth == 0 ) { // The convolution is a matrix product anyway, without a temporary matrix if( freeTermData != 0 ) { // Fill the output matrix with the free term values SetVectorToMatrixRows( resultData, result.ObjectCount() * result.Height() * result.Width(), filter.ObjectCount(), *freeTermData ); multiplyMatrixByTransposedMatrixAndAdd( sourceData, source.ObjectCount() * result.Height() * result.Width(), filter.ObjectSize(), filter.ObjectSize(), filterData, filter.ObjectCount(), filter.ObjectSize(), resultData, filter.ObjectCount() ); } else { MultiplyMatrixByTransposedMatrix( sourceData, source.ObjectCount() * result.Height() * result.Width(), filter.ObjectSize(), filter.ObjectSize(), filterData, filter.ObjectCount(), filter.ObjectSize(), resultData, filter.ObjectCount(), result.BlobSize() ); } return; } const int tempMatrixWidth = filter.ObjectSize(); const int tempMatrixHeight = result.ObjectCount() * result.ObjectSize() / filter.ObjectCount(); const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( tempMatrixHeight, tempMatrixWidth ); CFloatHandleStackVar tempMatrix( mathEngine(), tempMatrixHeightBatchSize * tempMatrixWidth ); int tempMatrixHeightIndex = 0; while( tempMatrixHeightIndex < tempMatrixHeight ) { int curTempMatrixHeight = min( tempMatrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, source.Depth() * source.Channels() ); BuildTempMatrixKernel<<<blockCount, threadCount>>>( desc, GetRaw( sourceData ), tempMatrixHeightIndex, curTempMatrixHeight, GetRaw( tempMatrix.GetHandle() ) ); MultiplyMatrixByTransposedMatrix( tempMatrix, curTempMatrixHeight, filter.ObjectSize(), filter.ObjectSize(), filterData, filter.ObjectCount(), filter.ObjectSize(), resultData + tempMatrixHeightIndex * filter.ObjectCount(), filter.ObjectCount(), curTempMatrixHeight * filter.ObjectCount() ); tempMatrixHeightIndex += curTempMatrixHeight; } if( freeTermData != 0 ) { // Fill the output with the free term values AddVectorToMatrixRows( 1, resultData, resultData, result.BlobSize() / filter.ObjectCount(), filter.ObjectCount(), *freeTermData ); } } void CCudaMathEngine::BlobConvolutionBackward( const CConvolutionDesc& convDesc, const CConstFloatHandle& outputDiff, const CConstFloatHandle& filter, const CConstFloatHandle* freeTerm, const CFloatHandle& inputDiff ) { SetCudaDevice( device->DeviceNumber ); const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal; const int filterCount = desc.Filter.ObjectCount(); const int filterObjectSize = desc.Filter.ObjectSize(); if( desc.Filter.Height() == 1 && desc.Filter.Width() == 1 && desc.StrideHeight == 1 && desc.StrideWidth == 1 && desc.PaddingHeight == 0 && desc.PaddingWidth == 0 ) { // The convolution backward pass is a matrix product without creating a temporary matrix MultiplyMatrixByMatrix( 1, outputDiff, desc.Result.BlobSize() / filterCount, filterCount, filter, filterObjectSize, inputDiff, desc.Source.BlobSize() ); if( freeTerm != 0 ) { AddVectorToMatrixRows( 1, inputDiff, inputDiff, desc.Source.ObjectCount() * desc.Source.Height() * desc.Source.Width(), desc.Source.Channels() * desc.Source.Depth(), *freeTerm ); } return; } if( freeTerm != 0 ) { // Fill the input gradients with the free terms SetVectorToMatrixRows( inputDiff, desc.Source.ObjectCount() * desc.Source.Height() * desc.Source.Width(), desc.Source.Channels() * desc.Source.Depth(), *freeTerm ); } else { VectorFill( inputDiff, 0.f, desc.Source.BlobSize() ); } TBackwardOperationType operation = BOT_AtomicAdd; if( ( desc.Filter.Width() - 1 ) * desc.DilationWidth + 1 <= desc.StrideWidth && ( desc.Filter.Height() - 1 ) * desc.DilationHeight + 1 <= desc.StrideHeight ) { // The filter areas do not intersect, so atomic operations are not needed operation = freeTerm == 0 ? BOT_Set : BOT_Add; } // Get the temporary matrix const int matrixHeight = tempMatrixHeight( desc ); const int matrixWidth = tempMatrixWidth( desc ); const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( matrixHeight, matrixWidth ); CFloatHandleStackVar tempMatrix( *this, tempMatrixHeightBatchSize * matrixWidth ); int tempMatrixHeightIndex = 0; while( tempMatrixHeightIndex < matrixHeight ) { int curTempMatrixHeight = min( matrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize ); MultiplyMatrixByMatrix( 1, outputDiff + tempMatrixHeightIndex * filterCount, curTempMatrixHeight, filterCount, filter, filterObjectSize, tempMatrix, tempMatrix.Size() ); // Get the input gradients from the temporary matrix data dim3 blockCount; dim3 threadCount; int widthNorm = ( matrixWidth + BuildInputFromTempMatrixCombine - 1 ) / BuildInputFromTempMatrixCombine; getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, widthNorm ); BuildInputFromTempMatrixKernel<<<blockCount, threadCount>>>( desc, GetRaw( tempMatrix.GetHandle() ), curTempMatrixHeight, matrixWidth, GetRaw( inputDiff ), operation, widthNorm, tempMatrixHeightIndex ); tempMatrixHeightIndex += curTempMatrixHeight; } } void CCudaMathEngine::BlobConvolutionLearnAdd( const CConvolutionDesc& convDesc, const CConstFloatHandle& input, const CConstFloatHandle& outputDiff, const CFloatHandle& filterDiff, const CFloatHandle* freeTermDiff, bool isFreeTermDiffFromInput ) { SetCudaDevice( device->DeviceNumber ); const CCudaConvolutionDescInternal& desc = static_cast<const CCudaConvolutionDesc&>( convDesc ).Internal; if( freeTermDiff != 0 ) { // Get the free term gradient if( !isFreeTermDiffFromInput ) { SumMatrixRowsAdd( 1, *freeTermDiff, outputDiff, desc.Result.BlobSize() / desc.Filter.ObjectCount(), desc.Filter.ObjectCount() ); } else { SumMatrixRowsAdd( 1, *freeTermDiff, input, desc.Source.BlobSize() / desc.Source.Channels(), desc.Source.Channels() ); } } // Build the temporary matrix const int matrixHeight = tempMatrixHeight( desc ); const int matrixWidth = tempMatrixWidth( desc ); const int filterCount = desc.Filter.ObjectCount(); const int tempMatrixHeightBatchSize = getCudaTempMatrixMaxHeight( matrixHeight, matrixWidth ); CFloatHandleStackVar tempMatrix( *this, tempMatrixHeightBatchSize * matrixWidth ); int tempMatrixHeightIndex = 0; while( tempMatrixHeightIndex < matrixHeight ) { int curTempMatrixHeight = min( matrixHeight - tempMatrixHeightIndex, tempMatrixHeightBatchSize ); dim3 blockCount; dim3 threadCount; getCudaTaskGrid2D( blockCount, threadCount, curTempMatrixHeight, desc.Source.Depth() * desc.Source.Channels() ); BuildTempMatrixKernel<<<blockCount, threadCount>>>( desc, GetRaw( input ), tempMatrixHeightIndex, curTempMatrixHeight, GetRaw( tempMatrix.GetHandle() ) ); // Get the filter gradients by multiplying the temporary matrix and the output gradients MultiplyTransposedMatrixByMatrixAndAdd( outputDiff + tempMatrixHeightIndex * filterCount, curTempMatrixHeight, filterCount, filterCount, tempMatrix, matrixWidth, matrixWidth, filterDiff, matrixWidth, desc.Filter.BlobSize() ); tempMatrixHeightIndex += curTempMatrixHeight; } } } // namespace NeoML #endif // NEOML_USE_CUDA
130fbca59587d798244d463f1104189d4bdb43eb.hip
// !!! This is a file automatically generated by hipify!!! #include "FixPressureBerendsen.h" #include "State.h" #include "Mod.h" namespace py = boost::python; const std::string BerendsenType = "Berendsen"; using namespace MD_ENGINE; FixPressureBerendsen::FixPressureBerendsen(boost::shared_ptr<State> state_, std::string handle_, double pressure_, double period_, int applyEvery_) : Interpolator(pressure_), Fix(state_, handle_, "all", BerendsenType, false, true, false, applyEvery_), pressureComputer(state, "scalar"), period(period_) { bulkModulus = 10; //lammps maxDilation = 0.00001; requiresPerAtomVirials=true; }; bool FixPressureBerendsen::prepareFinal() { turnBeginRun = state->runInit; turnFinishRun = state->runInit + state->runningFor; pressureComputer.prepareForRun(); // get rigid bodies, if any, in simulation state->findRigidBodies(); prepared = true; return prepared; } bool FixPressureBerendsen::stepFinal() { double dilationUpper = 1.0 + maxDilation; double dilationLower = 1.0 - maxDilation; pressureComputer.computeScalar_GPU(true, 1); computeCurrentVal(state->turn); double target = getCurrentVal(); hipDeviceSynchronize(); pressureComputer.computeScalar_CPU(); double pressure = pressureComputer.pressureScalar; double dilation = ::pow(1.0 - state->dt/period * (target - pressure) / bulkModulus, 1.0/3.0); if (dilation < dilationLower) { dilation = dilationLower; } else if (dilation > dilationUpper) { dilation = dilationUpper; } Mod::scaleSystem(state, make_real3(dilation, dilation, dilation)); CUT_CHECK_ERROR("Mod::scaleSystem failed!"); return true; } bool FixPressureBerendsen::postRun() { finished = true; prepared = false; return true; } void FixPressureBerendsen::setParameters(double maxDilation_) { maxDilation = maxDilation_; } void export_FixPressureBerendsen() { py::class_<FixPressureBerendsen, boost::shared_ptr<FixPressureBerendsen>, py::bases<Fix> > ( "FixPressureBerendsen", py::init<boost::shared_ptr<State>, std::string, double, double, int>( py::args("state", "handle", "pressure", "period", "applyEvery") ) ) .def("setParameters", &FixPressureBerendsen::setParameters, (py::arg("maxDilation")=-1)) .def_readwrite("bulkModulus",&FixPressureBerendsen::bulkModulus) .def_readonly("pressureComputer", &FixPressureBerendsen::pressureComputer) ; }
130fbca59587d798244d463f1104189d4bdb43eb.cu
#include "FixPressureBerendsen.h" #include "State.h" #include "Mod.h" namespace py = boost::python; const std::string BerendsenType = "Berendsen"; using namespace MD_ENGINE; FixPressureBerendsen::FixPressureBerendsen(boost::shared_ptr<State> state_, std::string handle_, double pressure_, double period_, int applyEvery_) : Interpolator(pressure_), Fix(state_, handle_, "all", BerendsenType, false, true, false, applyEvery_), pressureComputer(state, "scalar"), period(period_) { bulkModulus = 10; //lammps maxDilation = 0.00001; requiresPerAtomVirials=true; }; bool FixPressureBerendsen::prepareFinal() { turnBeginRun = state->runInit; turnFinishRun = state->runInit + state->runningFor; pressureComputer.prepareForRun(); // get rigid bodies, if any, in simulation state->findRigidBodies(); prepared = true; return prepared; } bool FixPressureBerendsen::stepFinal() { double dilationUpper = 1.0 + maxDilation; double dilationLower = 1.0 - maxDilation; pressureComputer.computeScalar_GPU(true, 1); computeCurrentVal(state->turn); double target = getCurrentVal(); cudaDeviceSynchronize(); pressureComputer.computeScalar_CPU(); double pressure = pressureComputer.pressureScalar; double dilation = std::pow(1.0 - state->dt/period * (target - pressure) / bulkModulus, 1.0/3.0); if (dilation < dilationLower) { dilation = dilationLower; } else if (dilation > dilationUpper) { dilation = dilationUpper; } Mod::scaleSystem(state, make_real3(dilation, dilation, dilation)); CUT_CHECK_ERROR("Mod::scaleSystem failed!"); return true; } bool FixPressureBerendsen::postRun() { finished = true; prepared = false; return true; } void FixPressureBerendsen::setParameters(double maxDilation_) { maxDilation = maxDilation_; } void export_FixPressureBerendsen() { py::class_<FixPressureBerendsen, boost::shared_ptr<FixPressureBerendsen>, py::bases<Fix> > ( "FixPressureBerendsen", py::init<boost::shared_ptr<State>, std::string, double, double, int>( py::args("state", "handle", "pressure", "period", "applyEvery") ) ) .def("setParameters", &FixPressureBerendsen::setParameters, (py::arg("maxDilation")=-1)) .def_readwrite("bulkModulus",&FixPressureBerendsen::bulkModulus) .def_readonly("pressureComputer", &FixPressureBerendsen::pressureComputer) ; }
548039b96c5c6deeeb387e73307043690ed200eb.hip
// !!! This is a file automatically generated by hipify!!! // // Created by on 2021/3/1. // #include <string> #include "sharedMemoryTest.cuh" int main(){ hipSharedMemConfig *pconfig; hipDeviceGetSharedMemConfig(pconfig); printf("%d\n",pconfig); hipSharedMemConfig config = hipSharedMemBankSizeFourByte; printf("config %d\n",config); hipDeviceSetSharedMemConfig(config); hipDeviceGetSharedMemConfig(pconfig); // hipError_t errorCode = hipDeviceSetCacheConfig(hipFuncCachePreferEqual); printf("%d\n",pconfig); }
548039b96c5c6deeeb387e73307043690ed200eb.cu
// // Created by 李亘杰 on 2021/3/1. // #include <string> #include "sharedMemoryTest.cuh" int main(){ cudaSharedMemConfig *pconfig; cudaDeviceGetSharedMemConfig(pconfig); printf("%d\n",pconfig); cudaSharedMemConfig config = cudaSharedMemBankSizeFourByte; printf("config %d\n",config); cudaDeviceSetSharedMemConfig(config); cudaDeviceGetSharedMemConfig(pconfig); // cudaError_t errorCode = cudaDeviceSetCacheConfig(cudaFuncCachePreferEqual); printf("%d\n",pconfig); }
2b4eaf6340366e1ddc1846de58c7116e1f88646e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2014-2015 Isis Innovation Limited and the authors of InfiniTAM #include "ITMMeshingEngine_CUDA.h" #include "../../DeviceAgnostic/ITMMeshingEngine.h" #include "ITMCUDAUtils.h" #include "../../../../ORUtils/CUDADefines.h" template<class TVoxel> __global__ void meshScene_device(ITMMesh::Triangle *triangles, unsigned int *noTriangles_device, float factor, int noTotalEntries, int noMaxTriangles, const Vector4s *visibleBlockGlobalPos, const TVoxel *localVBA, const ITMHashEntry *hashTable); __global__ void findAllocateBlocks(Vector4s *visibleBlockGlobalPos, const ITMHashEntry *hashTable, int noTotalEntries); using namespace ITMLib::Engine; template<class TVoxel> ITMMeshingEngine_CUDA<TVoxel,ITMVoxelBlockHash>::ITMMeshingEngine_CUDA(void) { ITMSafeCall(hipMalloc((void**)&visibleBlockGlobalPos_device, SDF_LOCAL_BLOCK_NUM * sizeof(Vector4s))); ITMSafeCall(hipMalloc((void**)&noTriangles_device, sizeof(unsigned int))); } template<class TVoxel> ITMMeshingEngine_CUDA<TVoxel,ITMVoxelBlockHash>::~ITMMeshingEngine_CUDA(void) { ITMSafeCall(hipFree(visibleBlockGlobalPos_device)); ITMSafeCall(hipFree(noTriangles_device)); } template<class TVoxel> void ITMMeshingEngine_CUDA<TVoxel, ITMVoxelBlockHash>::MeshScene(ITMMesh *mesh, const ITMScene<TVoxel, ITMVoxelBlockHash> *scene) { ITMMesh::Triangle *triangles = mesh->triangles->GetData(MEMORYDEVICE_CUDA); const TVoxel *localVBA = scene->localVBA.GetVoxelBlocks(); const ITMHashEntry *hashTable = scene->index.GetEntries(); int noMaxTriangles = mesh->noMaxTriangles, noTotalEntries = scene->index.noTotalEntries; float factor = scene->sceneParams->voxelSize / (float)SDF_BLOCK_SIZE; ITMSafeCall(hipMemset(noTriangles_device, 0, sizeof(unsigned int))); ITMSafeCall(hipMemset(visibleBlockGlobalPos_device, 0, sizeof(Vector4s) * SDF_LOCAL_BLOCK_NUM)); { // identify used voxel blocks dim3 cudaBlockSize(256); dim3 gridSize((int)ceil((float)noTotalEntries / (float)cudaBlockSize.x)); findAllocateBlocks << <gridSize, cudaBlockSize >> >(visibleBlockGlobalPos_device, hashTable, noTotalEntries); } { // mesh used voxel blocks dim3 cudaBlockSize(SDF_BLOCK_SIZE, SDF_BLOCK_SIZE, SDF_BLOCK_SIZE); dim3 gridSize(SDF_LOCAL_BLOCK_NUM / 16, 16); meshScene_device<TVoxel> << <gridSize, cudaBlockSize >> >(triangles, noTriangles_device, factor, noTotalEntries, noMaxTriangles, visibleBlockGlobalPos_device, localVBA, hashTable); ITMSafeCall(hipMemcpy(&mesh->noTotalTriangles, noTriangles_device, sizeof(unsigned int), hipMemcpyDeviceToHost)); } } template<class TVoxel> ITMMeshingEngine_CUDA<TVoxel,ITMPlainVoxelArray>::ITMMeshingEngine_CUDA(void) {} template<class TVoxel> ITMMeshingEngine_CUDA<TVoxel,ITMPlainVoxelArray>::~ITMMeshingEngine_CUDA(void) {} template<class TVoxel> void ITMMeshingEngine_CUDA<TVoxel, ITMPlainVoxelArray>::MeshScene(ITMMesh *mesh, const ITMScene<TVoxel, ITMPlainVoxelArray> *scene) {} __global__ void findAllocateBlocks(Vector4s *visibleBlockGlobalPos, const ITMHashEntry *hashTable, int noTotalEntries) { int entryId = threadIdx.x + blockIdx.x * blockDim.x; if (entryId > noTotalEntries - 1) return; const ITMHashEntry &currentHashEntry = hashTable[entryId]; if (currentHashEntry.ptr >= 0) visibleBlockGlobalPos[currentHashEntry.ptr] = Vector4s(currentHashEntry.pos.x, currentHashEntry.pos.y, currentHashEntry.pos.z, 1); } template<class TVoxel> __global__ void meshScene_device(ITMMesh::Triangle *triangles, unsigned int *noTriangles_device, float factor, int noTotalEntries, int noMaxTriangles, const Vector4s *visibleBlockGlobalPos, const TVoxel *localVBA, const ITMHashEntry *hashTable) { const Vector4s globalPos_4s = visibleBlockGlobalPos[blockIdx.x + gridDim.x * blockIdx.y]; if (globalPos_4s.w == 0) return; Vector3i globalPos = Vector3i(globalPos_4s.x, globalPos_4s.y, globalPos_4s.z) * SDF_BLOCK_SIZE; Vector3f vertList[12]; int cubeIndex = buildVertList(vertList, globalPos, Vector3i(threadIdx.x, threadIdx.y, threadIdx.z), localVBA, hashTable); if (cubeIndex < 0) return; for (int i = 0; triangleTable[cubeIndex][i] != -1; i += 3) { int triangleId = atomicAdd(noTriangles_device, 1); if (triangleId < noMaxTriangles - 1) { triangles[triangleId].p0 = vertList[triangleTable[cubeIndex][i]] * factor; triangles[triangleId].p1 = vertList[triangleTable[cubeIndex][i + 1]] * factor; triangles[triangleId].p2 = vertList[triangleTable[cubeIndex][i + 2]] * factor; } } } template class ITMLib::Engine::ITMMeshingEngine_CUDA<ITMVoxel, ITMVoxelIndex>;
2b4eaf6340366e1ddc1846de58c7116e1f88646e.cu
// Copyright 2014-2015 Isis Innovation Limited and the authors of InfiniTAM #include "ITMMeshingEngine_CUDA.h" #include "../../DeviceAgnostic/ITMMeshingEngine.h" #include "ITMCUDAUtils.h" #include "../../../../ORUtils/CUDADefines.h" template<class TVoxel> __global__ void meshScene_device(ITMMesh::Triangle *triangles, unsigned int *noTriangles_device, float factor, int noTotalEntries, int noMaxTriangles, const Vector4s *visibleBlockGlobalPos, const TVoxel *localVBA, const ITMHashEntry *hashTable); __global__ void findAllocateBlocks(Vector4s *visibleBlockGlobalPos, const ITMHashEntry *hashTable, int noTotalEntries); using namespace ITMLib::Engine; template<class TVoxel> ITMMeshingEngine_CUDA<TVoxel,ITMVoxelBlockHash>::ITMMeshingEngine_CUDA(void) { ITMSafeCall(cudaMalloc((void**)&visibleBlockGlobalPos_device, SDF_LOCAL_BLOCK_NUM * sizeof(Vector4s))); ITMSafeCall(cudaMalloc((void**)&noTriangles_device, sizeof(unsigned int))); } template<class TVoxel> ITMMeshingEngine_CUDA<TVoxel,ITMVoxelBlockHash>::~ITMMeshingEngine_CUDA(void) { ITMSafeCall(cudaFree(visibleBlockGlobalPos_device)); ITMSafeCall(cudaFree(noTriangles_device)); } template<class TVoxel> void ITMMeshingEngine_CUDA<TVoxel, ITMVoxelBlockHash>::MeshScene(ITMMesh *mesh, const ITMScene<TVoxel, ITMVoxelBlockHash> *scene) { ITMMesh::Triangle *triangles = mesh->triangles->GetData(MEMORYDEVICE_CUDA); const TVoxel *localVBA = scene->localVBA.GetVoxelBlocks(); const ITMHashEntry *hashTable = scene->index.GetEntries(); int noMaxTriangles = mesh->noMaxTriangles, noTotalEntries = scene->index.noTotalEntries; float factor = scene->sceneParams->voxelSize / (float)SDF_BLOCK_SIZE; ITMSafeCall(cudaMemset(noTriangles_device, 0, sizeof(unsigned int))); ITMSafeCall(cudaMemset(visibleBlockGlobalPos_device, 0, sizeof(Vector4s) * SDF_LOCAL_BLOCK_NUM)); { // identify used voxel blocks dim3 cudaBlockSize(256); dim3 gridSize((int)ceil((float)noTotalEntries / (float)cudaBlockSize.x)); findAllocateBlocks << <gridSize, cudaBlockSize >> >(visibleBlockGlobalPos_device, hashTable, noTotalEntries); } { // mesh used voxel blocks dim3 cudaBlockSize(SDF_BLOCK_SIZE, SDF_BLOCK_SIZE, SDF_BLOCK_SIZE); dim3 gridSize(SDF_LOCAL_BLOCK_NUM / 16, 16); meshScene_device<TVoxel> << <gridSize, cudaBlockSize >> >(triangles, noTriangles_device, factor, noTotalEntries, noMaxTriangles, visibleBlockGlobalPos_device, localVBA, hashTable); ITMSafeCall(cudaMemcpy(&mesh->noTotalTriangles, noTriangles_device, sizeof(unsigned int), cudaMemcpyDeviceToHost)); } } template<class TVoxel> ITMMeshingEngine_CUDA<TVoxel,ITMPlainVoxelArray>::ITMMeshingEngine_CUDA(void) {} template<class TVoxel> ITMMeshingEngine_CUDA<TVoxel,ITMPlainVoxelArray>::~ITMMeshingEngine_CUDA(void) {} template<class TVoxel> void ITMMeshingEngine_CUDA<TVoxel, ITMPlainVoxelArray>::MeshScene(ITMMesh *mesh, const ITMScene<TVoxel, ITMPlainVoxelArray> *scene) {} __global__ void findAllocateBlocks(Vector4s *visibleBlockGlobalPos, const ITMHashEntry *hashTable, int noTotalEntries) { int entryId = threadIdx.x + blockIdx.x * blockDim.x; if (entryId > noTotalEntries - 1) return; const ITMHashEntry &currentHashEntry = hashTable[entryId]; if (currentHashEntry.ptr >= 0) visibleBlockGlobalPos[currentHashEntry.ptr] = Vector4s(currentHashEntry.pos.x, currentHashEntry.pos.y, currentHashEntry.pos.z, 1); } template<class TVoxel> __global__ void meshScene_device(ITMMesh::Triangle *triangles, unsigned int *noTriangles_device, float factor, int noTotalEntries, int noMaxTriangles, const Vector4s *visibleBlockGlobalPos, const TVoxel *localVBA, const ITMHashEntry *hashTable) { const Vector4s globalPos_4s = visibleBlockGlobalPos[blockIdx.x + gridDim.x * blockIdx.y]; if (globalPos_4s.w == 0) return; Vector3i globalPos = Vector3i(globalPos_4s.x, globalPos_4s.y, globalPos_4s.z) * SDF_BLOCK_SIZE; Vector3f vertList[12]; int cubeIndex = buildVertList(vertList, globalPos, Vector3i(threadIdx.x, threadIdx.y, threadIdx.z), localVBA, hashTable); if (cubeIndex < 0) return; for (int i = 0; triangleTable[cubeIndex][i] != -1; i += 3) { int triangleId = atomicAdd(noTriangles_device, 1); if (triangleId < noMaxTriangles - 1) { triangles[triangleId].p0 = vertList[triangleTable[cubeIndex][i]] * factor; triangles[triangleId].p1 = vertList[triangleTable[cubeIndex][i + 1]] * factor; triangles[triangleId].p2 = vertList[triangleTable[cubeIndex][i + 2]] * factor; } } } template class ITMLib::Engine::ITMMeshingEngine_CUDA<ITMVoxel, ITMVoxelIndex>;
ae01b0cb7b8414ca82520c7c1782cf211fb28e39.hip
// !!! This is a file automatically generated by hipify!!! /* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #define GL_GLEXT_PROTOTYPES #include <hip/hip_runtime.h> #include <GL/glut.h> #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #define rnd(x) (x*rand() / RAND_MAX) #define SPEED 1.0f #define MAX_TEMP 1.0f #define MIN_TEMP 0.0001f #define INF 2e10f const int window_width = 1024; const int window_height = 1024; //The pixel buffer object id that is used to address the pixel buffer that //is created on the GPU GLuint gl_PBO; //The cuda graphics resource that is used to link the OpenGL pixel buffer //to CUDA cudaGraphicsResource *cuda_pbo_resource; // the pointer used to store the GPU address to the pixel buffer object // to give to the kernel for computation texture<float,2> texConstSrc; texture<float,2> texIn; texture<float,2> texOut; float *dev_inSrc; float *dev_outSrc; float *dev_constSrc; uchar4 *d_dst = NULL; dim3 blockSize; dim3 gridSize; __global__ void copy_const_kernel( float *iptr){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float c = tex2D(texConstSrc,x,y); if(c != 0){ iptr[offset] = c; } } __global__ void transferHeat(float * out, bool dstOut){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float t, l, c, r, b, tl, tr, bl, br; if(dstOut){ t = tex2D(texIn,x,y-1); l = tex2D(texIn,x-1, y); c = tex2D(texIn,x,y); r = tex2D(texIn,x+1, y); b = tex2D(texIn,x,y+1); tl = tex2D(texIn, x-1, y-1); tr = tex2D(texIn, x+1, y-1); bl = tex2D(texIn, x-1, y+1); br = tex2D(texIn, x+1, y+1); }else{ t = tex2D(texOut,x,y-1); l = tex2D(texOut,x-1, y); c = tex2D(texOut,x,y); r = tex2D(texOut,x+1, y); b = tex2D(texOut,x,y+1); tl = tex2D(texOut, x-1, y-1); tr = tex2D(texOut, x+1, y-1); bl = tex2D(texOut, x-1, y+1); br = tex2D(texOut, x+1, y+1); } out[offset] = c + ((t + b + l + r + tl + tr + bl + br)/8 - c); } __global__ void float_to_color(uchar4 * pixels, float* in){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float num = in[offset]; pixels[offset].x = (int)(num*255); pixels[offset].y = (int)(0); pixels[offset].z = (int)((MAX_TEMP-num) * 255); pixels[offset].w = 255; } void display(void){ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); glFlush(); hipGraphicsMapResources(1, &cuda_pbo_resource, NULL); hipEventRecord(start, 0); volatile bool dstOut = false; for(int i = 0; i < 200; i++){ float *in, *out; if(dstOut){ in = dev_inSrc; out = dev_outSrc; } else { out = dev_inSrc; in = dev_outSrc; } hipLaunchKernelGGL(( copy_const_kernel), dim3(gridSize), dim3(blockSize), 0, 0, in); hipLaunchKernelGGL(( transferHeat), dim3(gridSize), dim3(blockSize), 0, 0, out, dstOut); dstOut = !dstOut; } hipLaunchKernelGGL(( float_to_color), dim3(gridSize), dim3(blockSize), 0, 0, d_dst, dev_inSrc); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipGraphicsUnmapResources(1, &cuda_pbo_resource, 0); glDrawPixels(window_width, window_height, GL_RGBA, GL_UNSIGNED_BYTE, 0); glutSwapBuffers(); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); char title[20]; sprintf(title,"time: %3.2f", elapsedTime/200); glutSetWindowTitle(title); hipEventDestroy(start); hipEventDestroy(stop); } void init(){ blockSize.x = 16; blockSize.y = 16; blockSize.z = 1; gridSize.x = window_width/16; gridSize.y = window_height/16; gridSize.z = 1; glGenBuffers(1, &gl_PBO); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, gl_PBO); glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, window_width*window_height*sizeof(uchar4), NULL, GL_DYNAMIC_DRAW_ARB); hipGraphicsGLRegisterBuffer(&cuda_pbo_resource, gl_PBO, hipGraphicsMapFlagsNone); hipGraphicsMapResources(1, &cuda_pbo_resource, NULL); size_t num_bytes; hipGraphicsResourceGetMappedPointer((void**)&d_dst, &num_bytes, cuda_pbo_resource); hipGraphicsUnmapResources(1, &cuda_pbo_resource, 0); hipMalloc((void**)&dev_inSrc, window_width*window_height*sizeof(float)); hipMalloc((void**)&dev_outSrc, window_width*window_height*sizeof(float)); hipMalloc((void**)&dev_constSrc, window_width*window_height*sizeof(float)); hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(NULL, texConstSrc, dev_constSrc, desc, window_width, window_height, sizeof(float)*window_width); hipBindTexture2D(NULL, texIn, dev_inSrc, desc, window_width, window_height, sizeof(float)*window_width); hipBindTexture2D(NULL, texOut, dev_outSrc, desc, window_width, window_height, sizeof(float)*window_width); float *temp = (float*)malloc(window_width*window_height*sizeof(float)); for(int i = 0; i < window_width*window_height; i++){ temp[i] = 0; int x = i % window_width; int y = i / window_height; if((x > 300) && (x < 600) && (y > 310) && (y < 601)){ temp[i] = MAX_TEMP; } } temp[window_width*100+100] = (MAX_TEMP + MIN_TEMP) / 2; temp[window_width*700+100] = MIN_TEMP; temp[window_width*300+300] = MIN_TEMP; temp[window_width*200+700] = MIN_TEMP; for(int y=800; y < 900; y++){ for(int x=400; x<500; x++){ temp[x+y*window_width] = MIN_TEMP; } } hipMemcpy(dev_constSrc, temp, window_width*window_height*sizeof(float), hipMemcpyHostToDevice); for(int i = 0; i < window_width*window_height; i++){ temp[i] = MIN_TEMP; } hipMemcpy(dev_inSrc, temp, window_width*window_height*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_outSrc, dev_inSrc, window_width*window_height*sizeof(float), hipMemcpyDeviceToDevice); free(temp); } void deInit(){ hipGraphicsUnmapResources(1, &cuda_pbo_resource, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0); glDeleteBuffers(1, &gl_PBO); hipUnbindTexture(texIn); hipUnbindTexture(texOut); hipUnbindTexture(texConstSrc); hipFree(dev_inSrc); hipFree(dev_outSrc); hipFree(dev_constSrc); } void keyboard(unsigned char key, int x, int y){ switch(key){ case ' ':; glutPostRedisplay(); break; } } void idle(void){ glutPostRedisplay(); } int main(int argc, char** argv) { hipDeviceProp_t prop; int dev; memset(&prop, 0, sizeof(hipDeviceProp_t)); prop.major = 1; prop.minor = 0; hipChooseDevice(&dev, &prop); hipGLSetGLDevice(dev); glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Ray Tracing Renderer"); glutIdleFunc(idle); glutDisplayFunc(display); glutKeyboardFunc(keyboard); init(); glutMainLoop(); deInit(); return 0; }
ae01b0cb7b8414ca82520c7c1782cf211fb28e39.cu
/* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #define GL_GLEXT_PROTOTYPES #include <cuda.h> #include <GL/glut.h> #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> #define rnd(x) (x*rand() / RAND_MAX) #define SPEED 1.0f #define MAX_TEMP 1.0f #define MIN_TEMP 0.0001f #define INF 2e10f const int window_width = 1024; const int window_height = 1024; //The pixel buffer object id that is used to address the pixel buffer that //is created on the GPU GLuint gl_PBO; //The cuda graphics resource that is used to link the OpenGL pixel buffer //to CUDA cudaGraphicsResource *cuda_pbo_resource; // the pointer used to store the GPU address to the pixel buffer object // to give to the kernel for computation texture<float,2> texConstSrc; texture<float,2> texIn; texture<float,2> texOut; float *dev_inSrc; float *dev_outSrc; float *dev_constSrc; uchar4 *d_dst = NULL; dim3 blockSize; dim3 gridSize; __global__ void copy_const_kernel( float *iptr){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float c = tex2D(texConstSrc,x,y); if(c != 0){ iptr[offset] = c; } } __global__ void transferHeat(float * out, bool dstOut){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float t, l, c, r, b, tl, tr, bl, br; if(dstOut){ t = tex2D(texIn,x,y-1); l = tex2D(texIn,x-1, y); c = tex2D(texIn,x,y); r = tex2D(texIn,x+1, y); b = tex2D(texIn,x,y+1); tl = tex2D(texIn, x-1, y-1); tr = tex2D(texIn, x+1, y-1); bl = tex2D(texIn, x-1, y+1); br = tex2D(texIn, x+1, y+1); }else{ t = tex2D(texOut,x,y-1); l = tex2D(texOut,x-1, y); c = tex2D(texOut,x,y); r = tex2D(texOut,x+1, y); b = tex2D(texOut,x,y+1); tl = tex2D(texOut, x-1, y-1); tr = tex2D(texOut, x+1, y-1); bl = tex2D(texOut, x-1, y+1); br = tex2D(texOut, x+1, y+1); } out[offset] = c + ((t + b + l + r + tl + tr + bl + br)/8 - c); } __global__ void float_to_color(uchar4 * pixels, float* in){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float num = in[offset]; pixels[offset].x = (int)(num*255); pixels[offset].y = (int)(0); pixels[offset].z = (int)((MAX_TEMP-num) * 255); pixels[offset].w = 255; } void display(void){ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); glFlush(); cudaGraphicsMapResources(1, &cuda_pbo_resource, NULL); cudaEventRecord(start, 0); volatile bool dstOut = false; for(int i = 0; i < 200; i++){ float *in, *out; if(dstOut){ in = dev_inSrc; out = dev_outSrc; } else { out = dev_inSrc; in = dev_outSrc; } copy_const_kernel<<<gridSize, blockSize>>>(in); transferHeat<<<gridSize, blockSize>>>(out, dstOut); dstOut = !dstOut; } float_to_color<<<gridSize, blockSize>>>(d_dst, dev_inSrc); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaGraphicsUnmapResources(1, &cuda_pbo_resource, 0); glDrawPixels(window_width, window_height, GL_RGBA, GL_UNSIGNED_BYTE, 0); glutSwapBuffers(); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); char title[20]; sprintf(title,"time: %3.2f", elapsedTime/200); glutSetWindowTitle(title); cudaEventDestroy(start); cudaEventDestroy(stop); } void init(){ blockSize.x = 16; blockSize.y = 16; blockSize.z = 1; gridSize.x = window_width/16; gridSize.y = window_height/16; gridSize.z = 1; glGenBuffers(1, &gl_PBO); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, gl_PBO); glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, window_width*window_height*sizeof(uchar4), NULL, GL_DYNAMIC_DRAW_ARB); cudaGraphicsGLRegisterBuffer(&cuda_pbo_resource, gl_PBO, cudaGraphicsMapFlagsNone); cudaGraphicsMapResources(1, &cuda_pbo_resource, NULL); size_t num_bytes; cudaGraphicsResourceGetMappedPointer((void**)&d_dst, &num_bytes, cuda_pbo_resource); cudaGraphicsUnmapResources(1, &cuda_pbo_resource, 0); cudaMalloc((void**)&dev_inSrc, window_width*window_height*sizeof(float)); cudaMalloc((void**)&dev_outSrc, window_width*window_height*sizeof(float)); cudaMalloc((void**)&dev_constSrc, window_width*window_height*sizeof(float)); cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(NULL, texConstSrc, dev_constSrc, desc, window_width, window_height, sizeof(float)*window_width); cudaBindTexture2D(NULL, texIn, dev_inSrc, desc, window_width, window_height, sizeof(float)*window_width); cudaBindTexture2D(NULL, texOut, dev_outSrc, desc, window_width, window_height, sizeof(float)*window_width); float *temp = (float*)malloc(window_width*window_height*sizeof(float)); for(int i = 0; i < window_width*window_height; i++){ temp[i] = 0; int x = i % window_width; int y = i / window_height; if((x > 300) && (x < 600) && (y > 310) && (y < 601)){ temp[i] = MAX_TEMP; } } temp[window_width*100+100] = (MAX_TEMP + MIN_TEMP) / 2; temp[window_width*700+100] = MIN_TEMP; temp[window_width*300+300] = MIN_TEMP; temp[window_width*200+700] = MIN_TEMP; for(int y=800; y < 900; y++){ for(int x=400; x<500; x++){ temp[x+y*window_width] = MIN_TEMP; } } cudaMemcpy(dev_constSrc, temp, window_width*window_height*sizeof(float), cudaMemcpyHostToDevice); for(int i = 0; i < window_width*window_height; i++){ temp[i] = MIN_TEMP; } cudaMemcpy(dev_inSrc, temp, window_width*window_height*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_outSrc, dev_inSrc, window_width*window_height*sizeof(float), cudaMemcpyDeviceToDevice); free(temp); } void deInit(){ cudaGraphicsUnmapResources(1, &cuda_pbo_resource, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0); glDeleteBuffers(1, &gl_PBO); cudaUnbindTexture(texIn); cudaUnbindTexture(texOut); cudaUnbindTexture(texConstSrc); cudaFree(dev_inSrc); cudaFree(dev_outSrc); cudaFree(dev_constSrc); } void keyboard(unsigned char key, int x, int y){ switch(key){ case ' ':; glutPostRedisplay(); break; } } void idle(void){ glutPostRedisplay(); } int main(int argc, char** argv) { cudaDeviceProp prop; int dev; memset(&prop, 0, sizeof(cudaDeviceProp)); prop.major = 1; prop.minor = 0; cudaChooseDevice(&dev, &prop); cudaGLSetGLDevice(dev); glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Ray Tracing Renderer"); glutIdleFunc(idle); glutDisplayFunc(display); glutKeyboardFunc(keyboard); init(); glutMainLoop(); deInit(); return 0; }
c6dc190d2ac30b052386cff7e46a7ae0cbbd7b8d.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // TF-specific helpers. #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { hipError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == hipSuccess, errors::Internal("Cuda error: ", hipGetErrorName(err), "[", #CUDA_CALL, ";]")); } while (0) #define OP_CHECK_GL_ERROR(CTX, GL_CALL) do { GL_CALL; GLenum err = glGetError(); OP_REQUIRES(CTX, err == GL_NO_ERROR, errors::Internal("OpenGL error: ", getGLErrorString(err), "[", #GL_CALL, ";]")); } while (0) // Cuda kernels and CPP all together. What an absolute compilation unit. #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ #include "../common/framework.h" #include "../common/glutil.cpp" #include "../common/common.h" #include "../common/common.cpp" #include "../common/rasterize.h" #include "../common/rasterize_gl.cpp" #include "../common/rasterize.cu" #include "tf_rasterize.cu" #include "../common/interpolate.cu" #include "tf_interpolate.cu" #include "../common/texture.cpp" #include "../common/texture.cu" #include "tf_texture.cu" #include "../common/antialias.cu" #include "tf_antialias.cu"
c6dc190d2ac30b052386cff7e46a7ae0cbbd7b8d.cu
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // TF-specific helpers. #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { cudaError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == cudaSuccess, errors::Internal("Cuda error: ", cudaGetErrorName(err), "[", #CUDA_CALL, ";]")); } while (0) #define OP_CHECK_GL_ERROR(CTX, GL_CALL) do { GL_CALL; GLenum err = glGetError(); OP_REQUIRES(CTX, err == GL_NO_ERROR, errors::Internal("OpenGL error: ", getGLErrorString(err), "[", #GL_CALL, ";]")); } while (0) // Cuda kernels and CPP all together. What an absolute compilation unit. #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ #include "../common/framework.h" #include "../common/glutil.cpp" #include "../common/common.h" #include "../common/common.cpp" #include "../common/rasterize.h" #include "../common/rasterize_gl.cpp" #include "../common/rasterize.cu" #include "tf_rasterize.cu" #include "../common/interpolate.cu" #include "tf_interpolate.cu" #include "../common/texture.cpp" #include "../common/texture.cu" #include "tf_texture.cu" #include "../common/antialias.cu" #include "tf_antialias.cu"
70809beb5818b247cf924799135ba67f7232dadc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "cudaOverlay.h" /*static inline __device__ __host__ bool eq_less( float a, float b, float epsilon ) { return (a > (b - epsilon) && a < (b + epsilon)) ? true : false; }*/ template<typename T> __global__ void gpuRectFill( T* input, T* output, int width, int height, float4* rects, int numRects, float4 color ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= width || y >= height ) return; const T px_in = input[ y * width + x ]; T px_out = px_in; const float fx = x; const float fy = y; //const float thick = 10.0f; const float alpha = color.w / 255.0f; const float ialph = 1.0f - alpha; for( int nr=0; nr < numRects; nr++ ) { const float4 r = rects[nr]; //printf("%i %i %i %f %f %f %f\n", numRects, x, y, r.x, r.y, r.z, r.w); if( fy >= r.y && fy <= r.w /*&& (eq_less(fx, r.x, ep) || eq_less(fx, r.z, ep))*/ ) { if( fx >= r.x && fx <= r.z /*&& (eq_less(fy, r.y, ep) || eq_less(fy, r.w, ep))*/ ) { //printf("cuda rect %i %i\n", x, y); px_out.x = alpha * color.x + ialph * px_out.x; px_out.y = alpha * color.y + ialph * px_out.y; px_out.z = alpha * color.z + ialph * px_out.z; } } } output[y * width + x] = px_out; } // cudaRectFill hipError_t cudaRectFill( float4* input, float4* output, uint32_t width, uint32_t height, float4* rects, int numRects, const float4& color ) { if( !input || !output || width == 0 || height == 0 || !rects || numRects == 0 ) return hipErrorInvalidValue; // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y)); hipLaunchKernelGGL(( gpuRectFill<float4>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, rects, numRects, color); return hipGetLastError(); }
70809beb5818b247cf924799135ba67f7232dadc.cu
/* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "cudaOverlay.h" /*static inline __device__ __host__ bool eq_less( float a, float b, float epsilon ) { return (a > (b - epsilon) && a < (b + epsilon)) ? true : false; }*/ template<typename T> __global__ void gpuRectFill( T* input, T* output, int width, int height, float4* rects, int numRects, float4 color ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= width || y >= height ) return; const T px_in = input[ y * width + x ]; T px_out = px_in; const float fx = x; const float fy = y; //const float thick = 10.0f; const float alpha = color.w / 255.0f; const float ialph = 1.0f - alpha; for( int nr=0; nr < numRects; nr++ ) { const float4 r = rects[nr]; //printf("%i %i %i %f %f %f %f\n", numRects, x, y, r.x, r.y, r.z, r.w); if( fy >= r.y && fy <= r.w /*&& (eq_less(fx, r.x, ep) || eq_less(fx, r.z, ep))*/ ) { if( fx >= r.x && fx <= r.z /*&& (eq_less(fy, r.y, ep) || eq_less(fy, r.w, ep))*/ ) { //printf("cuda rect %i %i\n", x, y); px_out.x = alpha * color.x + ialph * px_out.x; px_out.y = alpha * color.y + ialph * px_out.y; px_out.z = alpha * color.z + ialph * px_out.z; } } } output[y * width + x] = px_out; } // cudaRectFill cudaError_t cudaRectFill( float4* input, float4* output, uint32_t width, uint32_t height, float4* rects, int numRects, const float4& color ) { if( !input || !output || width == 0 || height == 0 || !rects || numRects == 0 ) return cudaErrorInvalidValue; // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y)); gpuRectFill<float4><<<gridDim, blockDim>>>(input, output, width, height, rects, numRects, color); return cudaGetLastError(); }
4ea4dfe8be1e5a0845350820caa1d884f11bd53a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> namespace at { namespace native { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename IndexType, bool upper> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif __global__ void triu_tril_kernel( cuda::detail::TensorInfo<scalar_t, IndexType> result_info, const cuda::detail::TensorInfo<scalar_t, IndexType> self_info, const int64_t k, const int64_t N) { int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x; if (linear_idx >= N) { return; } auto dims = self_info.dims; IndexType self_offset = 0, result_offset = 0; // Compute column index and corresponding offset IndexType col = linear_idx % self_info.sizes[dims - 1]; linear_idx /= self_info.sizes[dims - 1]; self_offset += self_info.strides[dims - 1] * col; result_offset += result_info.strides[dims - 1] * col; // Compute row index and corresponding offset IndexType row = linear_idx % self_info.sizes[dims - 2]; linear_idx /= self_info.sizes[dims - 2]; self_offset += self_info.strides[dims - 2] * row; result_offset += result_info.strides[dims - 2] * row; // Compute remaining offsets IndexType running_index; #pragma unroll for (IndexType i = dims - 3; i >= 0; --i) { running_index = linear_idx % self_info.sizes[i]; linear_idx /= self_info.sizes[i]; self_offset += running_index * self_info.strides[i]; result_offset += running_index * result_info.strides[i]; } bool mask = upper ? (col - row >= k) : (col - row <= k); result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0); } template <bool upper> Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) { int64_t N = self.numel(); dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid((N + dim_block.x - 1) / dim_block.x); AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::Bool, self.scalar_type(), name, [&]{ if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) { auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result); auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self); hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int32_t, upper>) , dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), result_info, self_info, k, N); } else { auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result); auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self); hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int64_t, upper>) , dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), result_info, self_info, k, N); } }); AT_CUDA_CHECK(hipGetLastError()); return result; } Tensor& tril_cuda_(Tensor &self, int64_t k) { return tril_cuda_out(self, self, k); } Tensor& tril_cuda_out(Tensor &result, const Tensor& self, int64_t k) { if (result.sizes() != self.sizes()) { result.resize_as_(self); } if (self.numel() == 0) { return result; } return triu_tril_cuda_template<false>(result, self, k, "tril"); } Tensor& triu_cuda_(Tensor &self, int64_t k) { return triu_cuda_out(self, self, k); } Tensor& triu_cuda_out(Tensor &result, const Tensor& self, int64_t k) { if (result.sizes() != self.sizes()) { result.resize_as_(self); } if (self.numel() == 0) { return result; } return triu_tril_cuda_template<true>(result, self, k, "triu"); } } // namespace native } // namespace at
4ea4dfe8be1e5a0845350820caa1d884f11bd53a.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> namespace at { namespace native { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename IndexType, bool upper> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif __global__ void triu_tril_kernel( cuda::detail::TensorInfo<scalar_t, IndexType> result_info, const cuda::detail::TensorInfo<scalar_t, IndexType> self_info, const int64_t k, const int64_t N) { int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x; if (linear_idx >= N) { return; } auto dims = self_info.dims; IndexType self_offset = 0, result_offset = 0; // Compute column index and corresponding offset IndexType col = linear_idx % self_info.sizes[dims - 1]; linear_idx /= self_info.sizes[dims - 1]; self_offset += self_info.strides[dims - 1] * col; result_offset += result_info.strides[dims - 1] * col; // Compute row index and corresponding offset IndexType row = linear_idx % self_info.sizes[dims - 2]; linear_idx /= self_info.sizes[dims - 2]; self_offset += self_info.strides[dims - 2] * row; result_offset += result_info.strides[dims - 2] * row; // Compute remaining offsets IndexType running_index; #pragma unroll for (IndexType i = dims - 3; i >= 0; --i) { running_index = linear_idx % self_info.sizes[i]; linear_idx /= self_info.sizes[i]; self_offset += running_index * self_info.strides[i]; result_offset += running_index * result_info.strides[i]; } bool mask = upper ? (col - row >= k) : (col - row <= k); result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0); } template <bool upper> Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) { int64_t N = self.numel(); dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid((N + dim_block.x - 1) / dim_block.x); AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::Bool, self.scalar_type(), name, [&]{ if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) { auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result); auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self); triu_tril_kernel<scalar_t, int32_t, upper> <<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( result_info, self_info, k, N); } else { auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result); auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self); triu_tril_kernel<scalar_t, int64_t, upper> <<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( result_info, self_info, k, N); } }); AT_CUDA_CHECK(cudaGetLastError()); return result; } Tensor& tril_cuda_(Tensor &self, int64_t k) { return tril_cuda_out(self, self, k); } Tensor& tril_cuda_out(Tensor &result, const Tensor& self, int64_t k) { if (result.sizes() != self.sizes()) { result.resize_as_(self); } if (self.numel() == 0) { return result; } return triu_tril_cuda_template<false>(result, self, k, "tril"); } Tensor& triu_cuda_(Tensor &self, int64_t k) { return triu_cuda_out(self, self, k); } Tensor& triu_cuda_out(Tensor &result, const Tensor& self, int64_t k) { if (result.sizes() != self.sizes()) { result.resize_as_(self); } if (self.numel() == 0) { return result; } return triu_tril_cuda_template<true>(result, self, k, "triu"); } } // namespace native } // namespace at
f64be969416157a7878b5b00ca51001eb511f234.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #define BLOCK_SIZE 16 __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int r = threadIdx.x; int rb = blockIdx.x; int c = threadIdx.y; int cb = blockIdx.y; int idx = (int) (numCols * (cb*numRows/BLOCK_SIZE + r) + (numCols * rb / BLOCK_SIZE) + c); uchar4 rgba = rgbaImage[idx]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[idx] = channelSum; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize((int) numRows/BLOCK_SIZE + 1, (int) numCols/BLOCK_SIZE + 1, 1); const dim3 gridSize( BLOCK_SIZE, BLOCK_SIZE, 1); hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
f64be969416157a7878b5b00ca51001eb511f234.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #define BLOCK_SIZE 16 __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int r = threadIdx.x; int rb = blockIdx.x; int c = threadIdx.y; int cb = blockIdx.y; int idx = (int) (numCols * (cb*numRows/BLOCK_SIZE + r) + (numCols * rb / BLOCK_SIZE) + c); uchar4 rgba = rgbaImage[idx]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[idx] = channelSum; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize((int) numRows/BLOCK_SIZE + 1, (int) numCols/BLOCK_SIZE + 1, 1); const dim3 gridSize( BLOCK_SIZE, BLOCK_SIZE, 1); rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
22693f923a6969ffaadfd65c1c50ffa111d5c5fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include <stdlib.h> const int TILE_SIZE = 8; const int N_ROWS = 4; #define N_TEST 1 __global__ void naive_transpose(const float * A, float * B, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int x = index % size; int y = index / size; B[y * size + x] = A[x * size + y]; } __global__ void fast_transpose(const float * A, float * B, const int size) { __shared__ float tile[TILE_SIZE][TILE_SIZE + 1]; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; tile[threadIdx.x][threadIdx.y] = A[row * size + col]; __syncthreads(); B[col * size + row] = tile[threadIdx.x][threadIdx.y]; } void fill_host(float* host_a, const int N) { int i,j; for(j = 0; j < N; j++){ for(i = 0; i < N; i++) host_a[j * N + i] = i; } } void cpu_transpose(float * host_a, float * host_control, const int N) { int i, j; for(j = 0; j < N; j++) for(i = 0; i < N; i++) host_control[j * N + i] = host_a[i * N + j]; } int is_transpose(float * host_control, float * host_transpose, const int N) { int i,j; for(j = 0; j < N; j++){ for(i = 0; i < N; i++){ if(host_control[j*N + i] != host_transpose[j*N + i]){ return 0; } } } return 1; } void print_matrix(const float * A, const int N) { int i, j; for(i = 0; i < N; i++){ for(j = 0; j < N; j++) printf("%.1f ", A[i*N + j]); printf("\n"); } printf("\n"); } int main(int argc, char * argv[]) { const int N = 8192; const int size = N * N * sizeof(float); dim3 dimGrid(N/TILE_SIZE, N/N_ROWS, 1); dim3 dimBlock(TILE_SIZE, N_ROWS, 1); float * host_a = (float*)malloc(size); float * host_naive = (float*)malloc(size); float * host_fast = (float*)malloc(size); float * host_control = (float*)malloc(size); float * device_a, * device_naive,* device_fast; hipMalloc((void**)&device_a, size); hipMalloc((void**)&device_naive, size); hipMalloc((void**)&device_fast, size); fill_host(host_a, N); cpu_transpose(host_a, host_control, N); //print_matrix(host_a, N); //print_matrix(host_control, N); hipMemcpy(device_a, host_a, size, hipMemcpyHostToDevice); hipEvent_t begin, end; hipEventCreate(&begin); hipEventCreate(&end); float ms; printf("%25s\n", "naive_transpose"); hipEventRecord(begin,0); hipLaunchKernelGGL(( naive_transpose), dim3(dimGrid), dim3(dimBlock) , 0, 0, device_a, device_naive, N); hipEventRecord(end,0); hipEventSynchronize(end); hipEventElapsedTime(&ms, begin, end); hipMemcpy(host_naive, device_naive, size, hipMemcpyDeviceToHost); //print_matrix(host_naive, N); printf("Correctness Test : %d\n", is_transpose(host_control, host_naive, N)); printf("Required time : %f\n", ms); hipEventRecord(begin, 0); hipLaunchKernelGGL(( fast_transpose), dim3(dimGrid), dim3(dimBlock) , 0, 0, device_a, device_fast, N); hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&ms, begin, end); hipMemcpy(host_fast, device_fast, size, hipMemcpyDeviceToHost); //print_matrix(host_fast, N); printf("Correctness Test : %d\n", is_transpose(host_control, host_fast, N)); printf("Required Time : %f\n",ms); hipEventDestroy(begin); hipEventDestroy(end); hipFree(device_a); hipFree(device_naive); hipFree(device_fast); free(host_a); free(host_control); free(host_naive); free(host_fast); return 0; }
22693f923a6969ffaadfd65c1c50ffa111d5c5fd.cu
#include <stdio.h> #include <math.h> #include <stdlib.h> const int TILE_SIZE = 8; const int N_ROWS = 4; #define N_TEST 1 __global__ void naive_transpose(const float * A, float * B, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int x = index % size; int y = index / size; B[y * size + x] = A[x * size + y]; } __global__ void fast_transpose(const float * A, float * B, const int size) { __shared__ float tile[TILE_SIZE][TILE_SIZE + 1]; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; tile[threadIdx.x][threadIdx.y] = A[row * size + col]; __syncthreads(); B[col * size + row] = tile[threadIdx.x][threadIdx.y]; } void fill_host(float* host_a, const int N) { int i,j; for(j = 0; j < N; j++){ for(i = 0; i < N; i++) host_a[j * N + i] = i; } } void cpu_transpose(float * host_a, float * host_control, const int N) { int i, j; for(j = 0; j < N; j++) for(i = 0; i < N; i++) host_control[j * N + i] = host_a[i * N + j]; } int is_transpose(float * host_control, float * host_transpose, const int N) { int i,j; for(j = 0; j < N; j++){ for(i = 0; i < N; i++){ if(host_control[j*N + i] != host_transpose[j*N + i]){ return 0; } } } return 1; } void print_matrix(const float * A, const int N) { int i, j; for(i = 0; i < N; i++){ for(j = 0; j < N; j++) printf("%.1f ", A[i*N + j]); printf("\n"); } printf("\n"); } int main(int argc, char * argv[]) { const int N = 8192; const int size = N * N * sizeof(float); dim3 dimGrid(N/TILE_SIZE, N/N_ROWS, 1); dim3 dimBlock(TILE_SIZE, N_ROWS, 1); float * host_a = (float*)malloc(size); float * host_naive = (float*)malloc(size); float * host_fast = (float*)malloc(size); float * host_control = (float*)malloc(size); float * device_a, * device_naive,* device_fast; cudaMalloc((void**)&device_a, size); cudaMalloc((void**)&device_naive, size); cudaMalloc((void**)&device_fast, size); fill_host(host_a, N); cpu_transpose(host_a, host_control, N); //print_matrix(host_a, N); //print_matrix(host_control, N); cudaMemcpy(device_a, host_a, size, cudaMemcpyHostToDevice); cudaEvent_t begin, end; cudaEventCreate(&begin); cudaEventCreate(&end); float ms; printf("%25s\n", "naive_transpose"); cudaEventRecord(begin,0); naive_transpose<<< dimGrid, dimBlock >>>(device_a, device_naive, N); cudaEventRecord(end,0); cudaEventSynchronize(end); cudaEventElapsedTime(&ms, begin, end); cudaMemcpy(host_naive, device_naive, size, cudaMemcpyDeviceToHost); //print_matrix(host_naive, N); printf("Correctness Test : %d\n", is_transpose(host_control, host_naive, N)); printf("Required time : %f\n", ms); cudaEventRecord(begin, 0); fast_transpose<<< dimGrid, dimBlock >>>(device_a, device_fast, N); cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&ms, begin, end); cudaMemcpy(host_fast, device_fast, size, cudaMemcpyDeviceToHost); //print_matrix(host_fast, N); printf("Correctness Test : %d\n", is_transpose(host_control, host_fast, N)); printf("Required Time : %f\n",ms); cudaEventDestroy(begin); cudaEventDestroy(end); cudaFree(device_a); cudaFree(device_naive); cudaFree(device_fast); free(host_a); free(host_control); free(host_naive); free(host_fast); return 0; }
f5c6addd19c799f9f9e60d92787992377d8dbef3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel3_zdir; int xdim0_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim0_advec_cell_kernel3_zdir; int ydim0_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim1_advec_cell_kernel3_zdir; int xdim1_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim1_advec_cell_kernel3_zdir; int ydim1_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim2_advec_cell_kernel3_zdir; int xdim2_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim2_advec_cell_kernel3_zdir; int ydim2_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim3_advec_cell_kernel3_zdir; int xdim3_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim3_advec_cell_kernel3_zdir; int ydim3_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim4_advec_cell_kernel3_zdir; int xdim4_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim4_advec_cell_kernel3_zdir; int ydim4_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim5_advec_cell_kernel3_zdir; int xdim5_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim5_advec_cell_kernel3_zdir; int ydim5_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim6_advec_cell_kernel3_zdir; int xdim6_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim6_advec_cell_kernel3_zdir; int ydim6_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim7_advec_cell_kernel3_zdir; int xdim7_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim7_advec_cell_kernel3_zdir; int ydim7_advec_cell_kernel3_zdir_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel3_zdir*(y)+xdim0_advec_cell_kernel3_zdir*ydim0_advec_cell_kernel3_zdir*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel3_zdir*(y)+xdim1_advec_cell_kernel3_zdir*ydim1_advec_cell_kernel3_zdir*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel3_zdir*(y)+xdim2_advec_cell_kernel3_zdir*ydim2_advec_cell_kernel3_zdir*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel3_zdir*(y)+xdim3_advec_cell_kernel3_zdir*ydim3_advec_cell_kernel3_zdir*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel3_zdir*(y)+xdim4_advec_cell_kernel3_zdir*ydim4_advec_cell_kernel3_zdir*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_advec_cell_kernel3_zdir*(y)+xdim5_advec_cell_kernel3_zdir*ydim5_advec_cell_kernel3_zdir*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_advec_cell_kernel3_zdir*(y)+xdim6_advec_cell_kernel3_zdir*ydim6_advec_cell_kernel3_zdir*(z)) #define OPS_ACC7(x,y,z) (x+xdim7_advec_cell_kernel3_zdir*(y)+xdim7_advec_cell_kernel3_zdir*ydim7_advec_cell_kernel3_zdir*(z)) //user function __device__ inline void advec_cell_kernel3_zdir( const double *vol_flux_z, const double *pre_vol, const int *zz, const double *vertexdz, const double *density1, const double *energy1 , double *mass_flux_z, double *ener_flux) { double sigma, sigmat, sigmav, sigmam, sigma3, sigma4; double diffuw, diffdw, limiter; double one_by_six = 1.0/6.0; int z_max=field.z_max; int upwind,donor,downwind,dif; if(vol_flux_z[OPS_ACC0(0,0,0)] > 0.0) { upwind = -2; donor = -1; downwind = 0; dif = donor; } else if (zz[OPS_ACC2(0,0,1)] < z_max+2-2) { upwind = 1; donor = 0; downwind = -1; dif = upwind; } else { upwind = 0; donor = 0; downwind = -1; dif = upwind; } sigmat = fabs(vol_flux_z[OPS_ACC0(0,0,0)])/pre_vol[OPS_ACC1(0,0,donor)]; sigma3 = (1.0 + sigmat)*(vertexdz[OPS_ACC3(0,0,0)]/vertexdz[OPS_ACC3(0,0,dif)]); sigma4 = 2.0 - sigmat; sigma = sigmat; sigmav = sigmat; diffuw = density1[OPS_ACC4(0,0,donor)] - density1[OPS_ACC4(0,0,upwind)]; diffdw = density1[OPS_ACC4(0,0,downwind)] - density1[OPS_ACC4(0,0,donor)]; if( (diffuw*diffdw) > 0.0) limiter=(1.0 - sigmav) * SIGN(1.0 , diffdw) * MIN( MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3*fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter=0.0; mass_flux_z[OPS_ACC6(0,0,0)] = vol_flux_z[OPS_ACC0(0,0,0)] * ( density1[OPS_ACC4(0,0,donor)] + limiter ); sigmam = fabs(mass_flux_z[OPS_ACC6(0,0,0)])/( density1[OPS_ACC4(0,0,donor)] * pre_vol[OPS_ACC1(0,0,donor)]); diffuw = energy1[OPS_ACC5(0,0,donor)] - energy1[OPS_ACC5(0,0,upwind)]; diffdw = energy1[OPS_ACC5(0,0,downwind)] - energy1[OPS_ACC5(0,0,donor)]; if( (diffuw*diffdw) > 0.0) limiter = (1.0 - sigmam) * SIGN(1.0,diffdw) * MIN( MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter=0.0; ener_flux[OPS_ACC7(0,0,0)] = mass_flux_z[OPS_ACC6(0,0,0)] * ( energy1[OPS_ACC5(0,0,donor)] + limiter ); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 __global__ void ops_advec_cell_kernel3_zdir( const double* __restrict arg0, const double* __restrict arg1, const int* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, const double* __restrict arg5, double* __restrict arg6, double* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_advec_cell_kernel3_zdir + idx_z * 1 * xdim0_advec_cell_kernel3_zdir * ydim0_advec_cell_kernel3_zdir; arg1 += idx_x * 1 + idx_y * 1 * xdim1_advec_cell_kernel3_zdir + idx_z * 1 * xdim1_advec_cell_kernel3_zdir * ydim1_advec_cell_kernel3_zdir; arg2 += idx_x * 0 + idx_y * 0 * xdim2_advec_cell_kernel3_zdir + idx_z * 1 * xdim2_advec_cell_kernel3_zdir * ydim2_advec_cell_kernel3_zdir; arg3 += idx_x * 0 + idx_y * 0 * xdim3_advec_cell_kernel3_zdir + idx_z * 1 * xdim3_advec_cell_kernel3_zdir * ydim3_advec_cell_kernel3_zdir; arg4 += idx_x * 1 + idx_y * 1 * xdim4_advec_cell_kernel3_zdir + idx_z * 1 * xdim4_advec_cell_kernel3_zdir * ydim4_advec_cell_kernel3_zdir; arg5 += idx_x * 1 + idx_y * 1 * xdim5_advec_cell_kernel3_zdir + idx_z * 1 * xdim5_advec_cell_kernel3_zdir * ydim5_advec_cell_kernel3_zdir; arg6 += idx_x * 1 + idx_y * 1 * xdim6_advec_cell_kernel3_zdir + idx_z * 1 * xdim6_advec_cell_kernel3_zdir * ydim6_advec_cell_kernel3_zdir; arg7 += idx_x * 1 + idx_y * 1 * xdim7_advec_cell_kernel3_zdir + idx_z * 1 * xdim7_advec_cell_kernel3_zdir * ydim7_advec_cell_kernel3_zdir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel3_zdir(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function void ops_par_loop_advec_cell_kernel3_zdir(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; ops_timing_realloc(39,"advec_cell_kernel3_zdir"); OPS_kernels[39].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]*args[4].dat->dim; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]*args[5].dat->dim; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]*args[6].dat->dim; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]*args[7].dat->dim; int ydim7 = args[7].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_advec_cell_kernel3_zdir_h || ydim0 != ydim0_advec_cell_kernel3_zdir_h || xdim1 != xdim1_advec_cell_kernel3_zdir_h || ydim1 != ydim1_advec_cell_kernel3_zdir_h || xdim2 != xdim2_advec_cell_kernel3_zdir_h || ydim2 != ydim2_advec_cell_kernel3_zdir_h || xdim3 != xdim3_advec_cell_kernel3_zdir_h || ydim3 != ydim3_advec_cell_kernel3_zdir_h || xdim4 != xdim4_advec_cell_kernel3_zdir_h || ydim4 != ydim4_advec_cell_kernel3_zdir_h || xdim5 != xdim5_advec_cell_kernel3_zdir_h || ydim5 != ydim5_advec_cell_kernel3_zdir_h || xdim6 != xdim6_advec_cell_kernel3_zdir_h || ydim6 != ydim6_advec_cell_kernel3_zdir_h || xdim7 != xdim7_advec_cell_kernel3_zdir_h || ydim7 != ydim7_advec_cell_kernel3_zdir_h) { hipMemcpyToSymbol( xdim0_advec_cell_kernel3_zdir, &xdim0, sizeof(int) ); xdim0_advec_cell_kernel3_zdir_h = xdim0; hipMemcpyToSymbol( ydim0_advec_cell_kernel3_zdir, &ydim0, sizeof(int) ); ydim0_advec_cell_kernel3_zdir_h = ydim0; hipMemcpyToSymbol( xdim1_advec_cell_kernel3_zdir, &xdim1, sizeof(int) ); xdim1_advec_cell_kernel3_zdir_h = xdim1; hipMemcpyToSymbol( ydim1_advec_cell_kernel3_zdir, &ydim1, sizeof(int) ); ydim1_advec_cell_kernel3_zdir_h = ydim1; hipMemcpyToSymbol( xdim2_advec_cell_kernel3_zdir, &xdim2, sizeof(int) ); xdim2_advec_cell_kernel3_zdir_h = xdim2; hipMemcpyToSymbol( ydim2_advec_cell_kernel3_zdir, &ydim2, sizeof(int) ); ydim2_advec_cell_kernel3_zdir_h = ydim2; hipMemcpyToSymbol( xdim3_advec_cell_kernel3_zdir, &xdim3, sizeof(int) ); xdim3_advec_cell_kernel3_zdir_h = xdim3; hipMemcpyToSymbol( ydim3_advec_cell_kernel3_zdir, &ydim3, sizeof(int) ); ydim3_advec_cell_kernel3_zdir_h = ydim3; hipMemcpyToSymbol( xdim4_advec_cell_kernel3_zdir, &xdim4, sizeof(int) ); xdim4_advec_cell_kernel3_zdir_h = xdim4; hipMemcpyToSymbol( ydim4_advec_cell_kernel3_zdir, &ydim4, sizeof(int) ); ydim4_advec_cell_kernel3_zdir_h = ydim4; hipMemcpyToSymbol( xdim5_advec_cell_kernel3_zdir, &xdim5, sizeof(int) ); xdim5_advec_cell_kernel3_zdir_h = xdim5; hipMemcpyToSymbol( ydim5_advec_cell_kernel3_zdir, &ydim5, sizeof(int) ); ydim5_advec_cell_kernel3_zdir_h = ydim5; hipMemcpyToSymbol( xdim6_advec_cell_kernel3_zdir, &xdim6, sizeof(int) ); xdim6_advec_cell_kernel3_zdir_h = xdim6; hipMemcpyToSymbol( ydim6_advec_cell_kernel3_zdir, &ydim6, sizeof(int) ); ydim6_advec_cell_kernel3_zdir_h = ydim6; hipMemcpyToSymbol( xdim7_advec_cell_kernel3_zdir, &xdim7, sizeof(int) ); xdim7_advec_cell_kernel3_zdir_h = xdim7; hipMemcpyToSymbol( ydim7_advec_cell_kernel3_zdir, &ydim7, sizeof(int) ); ydim7_advec_cell_kernel3_zdir_h = ydim7; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; char *p_a[8]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif //OPS_MPI int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif //OPS_MPI int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif //OPS_MPI int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif //OPS_MPI int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7+ dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7+ dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); ops_timers_core(&c1,&t1); OPS_kernels[39].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_cell_kernel3_zdir), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[39].time += t2-t1; ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[6],range); ops_set_halo_dirtybit3(&args[7],range); //Update kernel record OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg3); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg4); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg5); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg6); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg7); }
f5c6addd19c799f9f9e60d92787992377d8dbef3.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel3_zdir; int xdim0_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim0_advec_cell_kernel3_zdir; int ydim0_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim1_advec_cell_kernel3_zdir; int xdim1_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim1_advec_cell_kernel3_zdir; int ydim1_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim2_advec_cell_kernel3_zdir; int xdim2_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim2_advec_cell_kernel3_zdir; int ydim2_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim3_advec_cell_kernel3_zdir; int xdim3_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim3_advec_cell_kernel3_zdir; int ydim3_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim4_advec_cell_kernel3_zdir; int xdim4_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim4_advec_cell_kernel3_zdir; int ydim4_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim5_advec_cell_kernel3_zdir; int xdim5_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim5_advec_cell_kernel3_zdir; int ydim5_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim6_advec_cell_kernel3_zdir; int xdim6_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim6_advec_cell_kernel3_zdir; int ydim6_advec_cell_kernel3_zdir_h = -1; __constant__ int xdim7_advec_cell_kernel3_zdir; int xdim7_advec_cell_kernel3_zdir_h = -1; __constant__ int ydim7_advec_cell_kernel3_zdir; int ydim7_advec_cell_kernel3_zdir_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel3_zdir*(y)+xdim0_advec_cell_kernel3_zdir*ydim0_advec_cell_kernel3_zdir*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel3_zdir*(y)+xdim1_advec_cell_kernel3_zdir*ydim1_advec_cell_kernel3_zdir*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel3_zdir*(y)+xdim2_advec_cell_kernel3_zdir*ydim2_advec_cell_kernel3_zdir*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel3_zdir*(y)+xdim3_advec_cell_kernel3_zdir*ydim3_advec_cell_kernel3_zdir*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel3_zdir*(y)+xdim4_advec_cell_kernel3_zdir*ydim4_advec_cell_kernel3_zdir*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_advec_cell_kernel3_zdir*(y)+xdim5_advec_cell_kernel3_zdir*ydim5_advec_cell_kernel3_zdir*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_advec_cell_kernel3_zdir*(y)+xdim6_advec_cell_kernel3_zdir*ydim6_advec_cell_kernel3_zdir*(z)) #define OPS_ACC7(x,y,z) (x+xdim7_advec_cell_kernel3_zdir*(y)+xdim7_advec_cell_kernel3_zdir*ydim7_advec_cell_kernel3_zdir*(z)) //user function __device__ inline void advec_cell_kernel3_zdir( const double *vol_flux_z, const double *pre_vol, const int *zz, const double *vertexdz, const double *density1, const double *energy1 , double *mass_flux_z, double *ener_flux) { double sigma, sigmat, sigmav, sigmam, sigma3, sigma4; double diffuw, diffdw, limiter; double one_by_six = 1.0/6.0; int z_max=field.z_max; int upwind,donor,downwind,dif; if(vol_flux_z[OPS_ACC0(0,0,0)] > 0.0) { upwind = -2; donor = -1; downwind = 0; dif = donor; } else if (zz[OPS_ACC2(0,0,1)] < z_max+2-2) { upwind = 1; donor = 0; downwind = -1; dif = upwind; } else { upwind = 0; donor = 0; downwind = -1; dif = upwind; } sigmat = fabs(vol_flux_z[OPS_ACC0(0,0,0)])/pre_vol[OPS_ACC1(0,0,donor)]; sigma3 = (1.0 + sigmat)*(vertexdz[OPS_ACC3(0,0,0)]/vertexdz[OPS_ACC3(0,0,dif)]); sigma4 = 2.0 - sigmat; sigma = sigmat; sigmav = sigmat; diffuw = density1[OPS_ACC4(0,0,donor)] - density1[OPS_ACC4(0,0,upwind)]; diffdw = density1[OPS_ACC4(0,0,downwind)] - density1[OPS_ACC4(0,0,donor)]; if( (diffuw*diffdw) > 0.0) limiter=(1.0 - sigmav) * SIGN(1.0 , diffdw) * MIN( MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3*fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter=0.0; mass_flux_z[OPS_ACC6(0,0,0)] = vol_flux_z[OPS_ACC0(0,0,0)] * ( density1[OPS_ACC4(0,0,donor)] + limiter ); sigmam = fabs(mass_flux_z[OPS_ACC6(0,0,0)])/( density1[OPS_ACC4(0,0,donor)] * pre_vol[OPS_ACC1(0,0,donor)]); diffuw = energy1[OPS_ACC5(0,0,donor)] - energy1[OPS_ACC5(0,0,upwind)]; diffdw = energy1[OPS_ACC5(0,0,downwind)] - energy1[OPS_ACC5(0,0,donor)]; if( (diffuw*diffdw) > 0.0) limiter = (1.0 - sigmam) * SIGN(1.0,diffdw) * MIN( MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter=0.0; ener_flux[OPS_ACC7(0,0,0)] = mass_flux_z[OPS_ACC6(0,0,0)] * ( energy1[OPS_ACC5(0,0,donor)] + limiter ); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 __global__ void ops_advec_cell_kernel3_zdir( const double* __restrict arg0, const double* __restrict arg1, const int* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, const double* __restrict arg5, double* __restrict arg6, double* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_advec_cell_kernel3_zdir + idx_z * 1 * xdim0_advec_cell_kernel3_zdir * ydim0_advec_cell_kernel3_zdir; arg1 += idx_x * 1 + idx_y * 1 * xdim1_advec_cell_kernel3_zdir + idx_z * 1 * xdim1_advec_cell_kernel3_zdir * ydim1_advec_cell_kernel3_zdir; arg2 += idx_x * 0 + idx_y * 0 * xdim2_advec_cell_kernel3_zdir + idx_z * 1 * xdim2_advec_cell_kernel3_zdir * ydim2_advec_cell_kernel3_zdir; arg3 += idx_x * 0 + idx_y * 0 * xdim3_advec_cell_kernel3_zdir + idx_z * 1 * xdim3_advec_cell_kernel3_zdir * ydim3_advec_cell_kernel3_zdir; arg4 += idx_x * 1 + idx_y * 1 * xdim4_advec_cell_kernel3_zdir + idx_z * 1 * xdim4_advec_cell_kernel3_zdir * ydim4_advec_cell_kernel3_zdir; arg5 += idx_x * 1 + idx_y * 1 * xdim5_advec_cell_kernel3_zdir + idx_z * 1 * xdim5_advec_cell_kernel3_zdir * ydim5_advec_cell_kernel3_zdir; arg6 += idx_x * 1 + idx_y * 1 * xdim6_advec_cell_kernel3_zdir + idx_z * 1 * xdim6_advec_cell_kernel3_zdir * ydim6_advec_cell_kernel3_zdir; arg7 += idx_x * 1 + idx_y * 1 * xdim7_advec_cell_kernel3_zdir + idx_z * 1 * xdim7_advec_cell_kernel3_zdir * ydim7_advec_cell_kernel3_zdir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel3_zdir(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function void ops_par_loop_advec_cell_kernel3_zdir(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; ops_timing_realloc(39,"advec_cell_kernel3_zdir"); OPS_kernels[39].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]*args[4].dat->dim; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]*args[5].dat->dim; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]*args[6].dat->dim; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]*args[7].dat->dim; int ydim7 = args[7].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_advec_cell_kernel3_zdir_h || ydim0 != ydim0_advec_cell_kernel3_zdir_h || xdim1 != xdim1_advec_cell_kernel3_zdir_h || ydim1 != ydim1_advec_cell_kernel3_zdir_h || xdim2 != xdim2_advec_cell_kernel3_zdir_h || ydim2 != ydim2_advec_cell_kernel3_zdir_h || xdim3 != xdim3_advec_cell_kernel3_zdir_h || ydim3 != ydim3_advec_cell_kernel3_zdir_h || xdim4 != xdim4_advec_cell_kernel3_zdir_h || ydim4 != ydim4_advec_cell_kernel3_zdir_h || xdim5 != xdim5_advec_cell_kernel3_zdir_h || ydim5 != ydim5_advec_cell_kernel3_zdir_h || xdim6 != xdim6_advec_cell_kernel3_zdir_h || ydim6 != ydim6_advec_cell_kernel3_zdir_h || xdim7 != xdim7_advec_cell_kernel3_zdir_h || ydim7 != ydim7_advec_cell_kernel3_zdir_h) { cudaMemcpyToSymbol( xdim0_advec_cell_kernel3_zdir, &xdim0, sizeof(int) ); xdim0_advec_cell_kernel3_zdir_h = xdim0; cudaMemcpyToSymbol( ydim0_advec_cell_kernel3_zdir, &ydim0, sizeof(int) ); ydim0_advec_cell_kernel3_zdir_h = ydim0; cudaMemcpyToSymbol( xdim1_advec_cell_kernel3_zdir, &xdim1, sizeof(int) ); xdim1_advec_cell_kernel3_zdir_h = xdim1; cudaMemcpyToSymbol( ydim1_advec_cell_kernel3_zdir, &ydim1, sizeof(int) ); ydim1_advec_cell_kernel3_zdir_h = ydim1; cudaMemcpyToSymbol( xdim2_advec_cell_kernel3_zdir, &xdim2, sizeof(int) ); xdim2_advec_cell_kernel3_zdir_h = xdim2; cudaMemcpyToSymbol( ydim2_advec_cell_kernel3_zdir, &ydim2, sizeof(int) ); ydim2_advec_cell_kernel3_zdir_h = ydim2; cudaMemcpyToSymbol( xdim3_advec_cell_kernel3_zdir, &xdim3, sizeof(int) ); xdim3_advec_cell_kernel3_zdir_h = xdim3; cudaMemcpyToSymbol( ydim3_advec_cell_kernel3_zdir, &ydim3, sizeof(int) ); ydim3_advec_cell_kernel3_zdir_h = ydim3; cudaMemcpyToSymbol( xdim4_advec_cell_kernel3_zdir, &xdim4, sizeof(int) ); xdim4_advec_cell_kernel3_zdir_h = xdim4; cudaMemcpyToSymbol( ydim4_advec_cell_kernel3_zdir, &ydim4, sizeof(int) ); ydim4_advec_cell_kernel3_zdir_h = ydim4; cudaMemcpyToSymbol( xdim5_advec_cell_kernel3_zdir, &xdim5, sizeof(int) ); xdim5_advec_cell_kernel3_zdir_h = xdim5; cudaMemcpyToSymbol( ydim5_advec_cell_kernel3_zdir, &ydim5, sizeof(int) ); ydim5_advec_cell_kernel3_zdir_h = ydim5; cudaMemcpyToSymbol( xdim6_advec_cell_kernel3_zdir, &xdim6, sizeof(int) ); xdim6_advec_cell_kernel3_zdir_h = xdim6; cudaMemcpyToSymbol( ydim6_advec_cell_kernel3_zdir, &ydim6, sizeof(int) ); ydim6_advec_cell_kernel3_zdir_h = ydim6; cudaMemcpyToSymbol( xdim7_advec_cell_kernel3_zdir, &xdim7, sizeof(int) ); xdim7_advec_cell_kernel3_zdir_h = xdim7; cudaMemcpyToSymbol( ydim7_advec_cell_kernel3_zdir, &ydim7, sizeof(int) ); ydim7_advec_cell_kernel3_zdir_h = ydim7; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; char *p_a[8]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif //OPS_MPI int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif //OPS_MPI int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif //OPS_MPI int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif //OPS_MPI int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7+ dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7+ dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); ops_timers_core(&c1,&t1); OPS_kernels[39].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_advec_cell_kernel3_zdir<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[39].time += t2-t1; ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[6],range); ops_set_halo_dirtybit3(&args[7],range); //Update kernel record OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg3); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg4); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg5); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg6); OPS_kernels[39].transfer += ops_compute_transfer(dim, range, &arg7); }
4ee37ebe3eed89709685d56a4ffab5bcadb4f689.hip
// !!! This is a file automatically generated by hipify!!! /* * File: MBSet.cu * * Created on June 24, 2012 * * Purpose: This program displays Mandelbrot set using the GPU via CUDA and * OpenGL immediate mode. * */ #include <iostream> #include <stack> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <math.h> #include <time.h> #include <vector> #include "Complex.cu" #include <unistd.h> #include <hip/hip_runtime_api.h> #include <GL/freeglut.h> #include <GL/gl.h> #include <GL/glu.h> #include <GL/glut.h> // Size of window in pixels, both width and height #define WINDOW_DIM 512 //threads per block #define THREADS_PB 32 #define N (WINDOW_DIM*WINDOW_DIM) using namespace std; //==================GLOBALS======================== // Initial screen coordinates, both host and device. Complex minC(-2.0, -1.2); Complex maxC(1.0, 1.8); Complex* dev_minC; Complex* dev_maxC; const int maxIt = 2000; // Msximum Iterations float xmin = -2.0, xmax = 1.0; float ymin = -1.2, ymax = 1.8; Complex* c = new Complex[WINDOW_DIM * WINDOW_DIM]; Complex* dev_c; int w = WINDOW_DIM; int h = WINDOW_DIM; int iter_count [N]; int* dev_icount; float dx, dy, dz; bool drawing = false; //===============CLASSES/STUCTS================== class RGB { public: RGB(): r(0), g(0), b(0){ } RGB( float r0, float g0, double b0): r(r0),g(g0),b(b0){} public: float r; float g; float b; }; struct Point { int x,y; }; struct Frame { public: float minC_x; float minC_y; float maxC_x; float maxC_y; Frame(float a, float b, float c, float d) : minC_x(a), minC_y(b), maxC_x(c), maxC_y(d){} }; //============CLASS VARIABLES=================== vector <Frame> frame_vec; RGB* colors = 0; // Array of color values Point start, end; //============ MB FUNCTIONS ======================== __global__ void calcMB (Complex* dev_minC, Complex* dev_maxC, int* dev_icount, Complex* dev_c) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index / WINDOW_DIM; int j = index % WINDOW_DIM; double dr = dev_maxC->r - dev_minC->r; double di = dev_maxC->i - dev_minC->i; double nr = (double) i / WINDOW_DIM; double ni = (double) j / WINDOW_DIM; dev_c[index].r = dev_minC->r + nr * dr; dev_c[index].i = dev_minC->i + ni * di; Complex Z (0,0); Z.r = dev_c[index].r; Z.i = dev_c[index].i; dev_icount[index] = 0; while(dev_icount[index] < maxIt) { if (Z.magnitude2() < 4.0f) { dev_icount[index]++; Z = (Z*Z) + dev_c[index]; } else{ break; } } } void cuda() { //make space on the device hipMalloc((void**)&dev_icount, N *sizeof(int)); hipMalloc((void**)&dev_c, N * sizeof(Complex)); hipMalloc((void**)&dev_minC, sizeof(Complex)); hipMalloc((void**)&dev_maxC, sizeof(Complex)); //copy from host to device hipMemcpy(dev_minC, &minC, sizeof(Complex), hipMemcpyHostToDevice); hipMemcpy(dev_maxC, &maxC, sizeof(Complex), hipMemcpyHostToDevice); hipMemcpy(dev_icount, iter_count, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_c, c, N*sizeof(Complex), hipMemcpyHostToDevice); //do the calculation hipLaunchKernelGGL(( calcMB), dim3(N / THREADS_PB), dim3(THREADS_PB) , 0, 0, dev_minC, dev_maxC, dev_icount, dev_c); //copy from device to host hipMemcpy(iter_count, dev_icount, N*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(c, dev_c, N*sizeof(Complex), hipMemcpyDeviceToHost); } //================ OGL FUNCTIONS ================== void Square() { glColor3f(1, 1, 1); glBegin(GL_LINE_LOOP); glVertex2f(start.x, end.y); glVertex2f(start.x, start.y); glVertex2f(end.x, start.y); glVertex2f(end.x, end.y); glEnd(); } void display() { glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0, WINDOW_DIM, WINDOW_DIM, 0); glDisable(GL_DEPTH_TEST); glMatrixMode(GL_MODELVIEW); glClearColor(0.0, 0.0, 0.0, 0); glClear(GL_COLOR_BUFFER_BIT); glBegin(GL_POINTS); for(int i = 0; i < WINDOW_DIM; i++) { for(int j = 0; j < WINDOW_DIM; j++) { glColor3f(colors[iter_count[i*WINDOW_DIM + j]].r, colors[iter_count[i*WINDOW_DIM + j]].g, colors[iter_count[i*WINDOW_DIM + j]].b); glVertex2d(i, j); } } glEnd(); if(drawing) Square(); glutSwapBuffers(); } //================ USER INPUT ================== void mouse(int button, int state, int x, int y) { if(button == GLUT_LEFT_BUTTON){ if(state==GLUT_DOWN) { start.x = x; end.x = x; start.y = y; end.y = y; drawing = true; } if(state==GLUT_UP) { frame_vec.push_back(Frame(minC.r, minC.i, maxC.r, maxC.i)); if(x > start.x && y > start.y) { end.x = start.x + dz; end.y = start.y + dz; } else if(x < start.x && y < start.y) { end.x = start.x - dz; end.y = start.y - dz; } else if(x > start.x && y < start.y) { end.x = start.x + dz; end.y = start.y - dz; } else if(x < start.x && y > start.y) { end.x = start.x - dz; end.y = start.y + dz; } for(int i = 0; i < WINDOW_DIM; i++) { for(int j = 0; j < WINDOW_DIM; j++) { if(i == start.x && j == start.y) { minC.r = c[i*WINDOW_DIM + j].r; minC.i = c[i*WINDOW_DIM + j].i; } if(i == end.x && j == end.y) { maxC.r = c[i*WINDOW_DIM + j].r; maxC.i = c[i*WINDOW_DIM + j].i; } } } cuda(); drawing = false; glutPostRedisplay(); } } } void motion(int x, int y) { dx = abs(x - start.x); dy = abs(y - start.y); if(dx > dy) dz = dy; if(dx < dy) dz = dx; if(x > start.x && y > start.y) { end.x = start.x + dz; end.y = start.y + dz; } if(x < start.x && y < start.y) { end.x = start.x - dz; end.y = start.y - dz; } if(x < start.x && y > start.y) { end.x = start.x - dz; end.y = start.y + dz; } if(x > start.x && y < start.y) { end.x = start.x + dz; end.y = start.y - dz; } glutPostRedisplay(); } void keyboard(unsigned char key, int x, int y) { if (key == 'b') { //load previous frame if(frame_vec.size() > 0 ) { Frame back = frame_vec.back(); frame_vec.pop_back(); cout<<"Vector size = "<<frame_vec.size()<<endl; minC.r = back.minC_x; minC.i = back.minC_y; maxC.r = back.maxC_x; maxC.i = back.maxC_y; cuda(); glutPostRedisplay(); } } if (key =='q') { freeMem(); exit(0); } } void InitializeColors() { time_t t = time(0); srand48(t); colors = new RGB[maxIt + 1]; for (int i = 0; i < maxIt; ++i) { if (i < 5) { // Try this.. just white for small it counts colors[i] = RGB(1, 1, 1); } else { colors[i] = RGB(drand48(), drand48(), drand48()); } } colors[maxIt] = RGB(); // black } void freeMem() { free(c); hipFree(d_icount); hipFree(d_minC); hipFree(dev_c); hipFree(d_maxC); cout << "Exited Cleanly!" << endl; } int main(int argc, char** argv) { // Initialize OPENGL here // Set up necessary host and device buffers // set up the opengl callbacks for display, mouse and keyboard // Calculate the interation counts // Grad students, pick the colors for the 0 .. 1999 iteration count pixels InitializeColors(); glutInit(&argc, argv); //Window glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH); glutInitWindowSize(WINDOW_DIM, WINDOW_DIM); glutInitWindowPosition(100,100); glutCreateWindow("MBSet"); cuda(); //Callbacks glutDisplayFunc(display); glutIdleFunc(display); glutMouseFunc(mouse); glutKeyboardFunc(keyboard); glutMotionFunc(motion); glutMainLoop(); // This will callback the display, keyboard and mouse return 0; }
4ee37ebe3eed89709685d56a4ffab5bcadb4f689.cu
/* * File: MBSet.cu * * Created on June 24, 2012 * * Purpose: This program displays Mandelbrot set using the GPU via CUDA and * OpenGL immediate mode. * */ #include <iostream> #include <stack> #include <cuda_runtime_api.h> #include <stdio.h> #include <math.h> #include <time.h> #include <vector> #include "Complex.cu" #include <unistd.h> #include <cuda_runtime_api.h> #include <GL/freeglut.h> #include <GL/gl.h> #include <GL/glu.h> #include <GL/glut.h> // Size of window in pixels, both width and height #define WINDOW_DIM 512 //threads per block #define THREADS_PB 32 #define N (WINDOW_DIM*WINDOW_DIM) using namespace std; //==================GLOBALS======================== // Initial screen coordinates, both host and device. Complex minC(-2.0, -1.2); Complex maxC(1.0, 1.8); Complex* dev_minC; Complex* dev_maxC; const int maxIt = 2000; // Msximum Iterations float xmin = -2.0, xmax = 1.0; float ymin = -1.2, ymax = 1.8; Complex* c = new Complex[WINDOW_DIM * WINDOW_DIM]; Complex* dev_c; int w = WINDOW_DIM; int h = WINDOW_DIM; int iter_count [N]; int* dev_icount; float dx, dy, dz; bool drawing = false; //===============CLASSES/STUCTS================== class RGB { public: RGB(): r(0), g(0), b(0){ } RGB( float r0, float g0, double b0): r(r0),g(g0),b(b0){} public: float r; float g; float b; }; struct Point { int x,y; }; struct Frame { public: float minC_x; float minC_y; float maxC_x; float maxC_y; Frame(float a, float b, float c, float d) : minC_x(a), minC_y(b), maxC_x(c), maxC_y(d){} }; //============CLASS VARIABLES=================== vector <Frame> frame_vec; RGB* colors = 0; // Array of color values Point start, end; //============ MB FUNCTIONS ======================== __global__ void calcMB (Complex* dev_minC, Complex* dev_maxC, int* dev_icount, Complex* dev_c) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index / WINDOW_DIM; int j = index % WINDOW_DIM; double dr = dev_maxC->r - dev_minC->r; double di = dev_maxC->i - dev_minC->i; double nr = (double) i / WINDOW_DIM; double ni = (double) j / WINDOW_DIM; dev_c[index].r = dev_minC->r + nr * dr; dev_c[index].i = dev_minC->i + ni * di; Complex Z (0,0); Z.r = dev_c[index].r; Z.i = dev_c[index].i; dev_icount[index] = 0; while(dev_icount[index] < maxIt) { if (Z.magnitude2() < 4.0f) { dev_icount[index]++; Z = (Z*Z) + dev_c[index]; } else{ break; } } } void cuda() { //make space on the device cudaMalloc((void**)&dev_icount, N *sizeof(int)); cudaMalloc((void**)&dev_c, N * sizeof(Complex)); cudaMalloc((void**)&dev_minC, sizeof(Complex)); cudaMalloc((void**)&dev_maxC, sizeof(Complex)); //copy from host to device cudaMemcpy(dev_minC, &minC, sizeof(Complex), cudaMemcpyHostToDevice); cudaMemcpy(dev_maxC, &maxC, sizeof(Complex), cudaMemcpyHostToDevice); cudaMemcpy(dev_icount, iter_count, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_c, c, N*sizeof(Complex), cudaMemcpyHostToDevice); //do the calculation calcMB<<< N / THREADS_PB, THREADS_PB >>>(dev_minC, dev_maxC, dev_icount, dev_c); //copy from device to host cudaMemcpy(iter_count, dev_icount, N*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(c, dev_c, N*sizeof(Complex), cudaMemcpyDeviceToHost); } //================ OGL FUNCTIONS ================== void Square() { glColor3f(1, 1, 1); glBegin(GL_LINE_LOOP); glVertex2f(start.x, end.y); glVertex2f(start.x, start.y); glVertex2f(end.x, start.y); glVertex2f(end.x, end.y); glEnd(); } void display() { glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0, WINDOW_DIM, WINDOW_DIM, 0); glDisable(GL_DEPTH_TEST); glMatrixMode(GL_MODELVIEW); glClearColor(0.0, 0.0, 0.0, 0); glClear(GL_COLOR_BUFFER_BIT); glBegin(GL_POINTS); for(int i = 0; i < WINDOW_DIM; i++) { for(int j = 0; j < WINDOW_DIM; j++) { glColor3f(colors[iter_count[i*WINDOW_DIM + j]].r, colors[iter_count[i*WINDOW_DIM + j]].g, colors[iter_count[i*WINDOW_DIM + j]].b); glVertex2d(i, j); } } glEnd(); if(drawing) Square(); glutSwapBuffers(); } //================ USER INPUT ================== void mouse(int button, int state, int x, int y) { if(button == GLUT_LEFT_BUTTON){ if(state==GLUT_DOWN) { start.x = x; end.x = x; start.y = y; end.y = y; drawing = true; } if(state==GLUT_UP) { frame_vec.push_back(Frame(minC.r, minC.i, maxC.r, maxC.i)); if(x > start.x && y > start.y) { end.x = start.x + dz; end.y = start.y + dz; } else if(x < start.x && y < start.y) { end.x = start.x - dz; end.y = start.y - dz; } else if(x > start.x && y < start.y) { end.x = start.x + dz; end.y = start.y - dz; } else if(x < start.x && y > start.y) { end.x = start.x - dz; end.y = start.y + dz; } for(int i = 0; i < WINDOW_DIM; i++) { for(int j = 0; j < WINDOW_DIM; j++) { if(i == start.x && j == start.y) { minC.r = c[i*WINDOW_DIM + j].r; minC.i = c[i*WINDOW_DIM + j].i; } if(i == end.x && j == end.y) { maxC.r = c[i*WINDOW_DIM + j].r; maxC.i = c[i*WINDOW_DIM + j].i; } } } cuda(); drawing = false; glutPostRedisplay(); } } } void motion(int x, int y) { dx = abs(x - start.x); dy = abs(y - start.y); if(dx > dy) dz = dy; if(dx < dy) dz = dx; if(x > start.x && y > start.y) { end.x = start.x + dz; end.y = start.y + dz; } if(x < start.x && y < start.y) { end.x = start.x - dz; end.y = start.y - dz; } if(x < start.x && y > start.y) { end.x = start.x - dz; end.y = start.y + dz; } if(x > start.x && y < start.y) { end.x = start.x + dz; end.y = start.y - dz; } glutPostRedisplay(); } void keyboard(unsigned char key, int x, int y) { if (key == 'b') { //load previous frame if(frame_vec.size() > 0 ) { Frame back = frame_vec.back(); frame_vec.pop_back(); cout<<"Vector size = "<<frame_vec.size()<<endl; minC.r = back.minC_x; minC.i = back.minC_y; maxC.r = back.maxC_x; maxC.i = back.maxC_y; cuda(); glutPostRedisplay(); } } if (key =='q') { freeMem(); exit(0); } } void InitializeColors() { time_t t = time(0); srand48(t); colors = new RGB[maxIt + 1]; for (int i = 0; i < maxIt; ++i) { if (i < 5) { // Try this.. just white for small it counts colors[i] = RGB(1, 1, 1); } else { colors[i] = RGB(drand48(), drand48(), drand48()); } } colors[maxIt] = RGB(); // black } void freeMem() { free(c); cudaFree(d_icount); cudaFree(d_minC); cudaFree(dev_c); cudaFree(d_maxC); cout << "Exited Cleanly!" << endl; } int main(int argc, char** argv) { // Initialize OPENGL here // Set up necessary host and device buffers // set up the opengl callbacks for display, mouse and keyboard // Calculate the interation counts // Grad students, pick the colors for the 0 .. 1999 iteration count pixels InitializeColors(); glutInit(&argc, argv); //Window glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH); glutInitWindowSize(WINDOW_DIM, WINDOW_DIM); glutInitWindowPosition(100,100); glutCreateWindow("MBSet"); cuda(); //Callbacks glutDisplayFunc(display); glutIdleFunc(display); glutMouseFunc(mouse); glutKeyboardFunc(keyboard); glutMotionFunc(motion); glutMainLoop(); // This will callback the display, keyboard and mouse return 0; }
54090f16e61b6286af2a0ed1f47164dcbc249e39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @author Tingxing Dong @author Azzam Haidar @generated from magmablas/ztrsv.cu normal z -> d, Tue Feb 9 16:05:35 2016 */ #include "magma_internal.h" #include "magma_templates.h" #define PRECISION_d #define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched #define NUM_THREADS 128 //64 //128 #define BLOCK_SIZE_N 128 #define DIM_X_N 128 #define DIM_Y_N 1 #define BLOCK_SIZE_T 32 #define DIM_X_T 16 #define DIM_Y_T 8 #include "dtrsv_template_device.cuh" #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column extern __shared__ double shared_data[]; //============================================================================== template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag> __global__ void dtrsv_notrans_kernel_outplace( int n, const double * __restrict__ A, int lda, double *b, int incb, double *x) { dtrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } //============================================================================== template<const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag> __global__ void dtrsv_trans_kernel_outplace( int n, const double * __restrict__ A, int lda, double *b, int incb, double *x) { dtrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } //============================================================================== extern "C" void magmablas_dtrsv_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaDouble_ptr b, magma_int_t incb, magmaDouble_ptr x, magma_queue_t queue, magma_int_t flag=0) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; dim3 threads( NUM_THREADS ); dim3 blocks( 1, 1, 1 ); size_t shmem = n * sizeof(double); if (trans == MagmaNoTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } else //Lower { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } } else if (trans == MagmaTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } } else if (trans == MagmaConjTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } } } /* README: flag decides if the dtrsv_outplace see an updated x or not. 0: No; other: Yes In recursive, flag must be nonzero except the 1st call */ extern "C" void magmablas_dtrsv_recursive_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaDouble_ptr b, magma_int_t incb, magmaDouble_ptr x, magma_queue_t queue) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; //Init x with zero //magmablas_dlaset( MagmaFull, n, incb, MAGMA_D_ZERO, MAGMA_D_ZERO, x, n, queue ); magma_int_t col = n; if (trans == MagmaNoTrans) { for (magma_int_t i=0; i < n; i+= NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaUpper) { col -= jb; //assume x_array contains zero elements, magmablas_dgemv will cause slow down magma_dgemv( MagmaNoTrans, jb, i, MAGMA_D_ONE, A(col, col+jb), lda, x+col+jb, 1, MAGMA_D_ONE, x+col, 1, queue ); } else { col = i; magma_dgemv( MagmaNoTrans, jb, i, MAGMA_D_ONE, A(col, 0), lda, x, 1, MAGMA_D_ONE, x+col, 1, queue ); } magmablas_dtrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } else { for (magma_int_t i=0; i < n; i += NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaLower) { col -= jb; magma_dgemv( MagmaConjTrans, i, jb, MAGMA_D_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_D_ONE, x+col, 1, queue ); } else { col = i; magma_dgemv( MagmaConjTrans, i, jb, MAGMA_D_ONE, A(0, col), lda, x, 1, MAGMA_D_ONE, x+col, 1, queue ); } magmablas_dtrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } } //============================================================================== /** Purpose ------- dtrsv solves one of the matrix equations on gpu op(A)*x = B, or x*op(A) = B, where alpha is a scalar, X and B are vectors, A is a unit, or non-unit, upper or lower triangular matrix and op(A) is one of op(A) = A, or op(A) = A^T, or op(A) = A^H. The vector x is overwritten on b. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] trans magma_trans_t. On entry, trans specifies the form of op(A) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op(A) = A. - = MagmaTrans: op(A) = A^T. - = MagmaConjTrans: op(A) = A^H. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] n INTEGER. On entry, n N specifies the order of the matrix A. n >= 0. @param[in] dA DOUBLE PRECISION array of dimension ( lda, n ) Before entry with uplo = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = MagmaUnit, the diagonal elements of A are not referenced either, but are assumed to be unity. @param[in] ldda INTEGER. On entry, lda specifies the first dimension of A. lda >= max( 1, n ). @param[in] db DOUBLE PRECISION array of dimension n On exit, b is overwritten with the solution vector X. @param[in] incb INTEGER. On entry, incb specifies the increment for the elements of b. incb must not be zero. Unchanged on exit. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_dtrsv( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr db, magma_int_t incb, magma_queue_t queue) { magma_int_t size_x = n * incb; magmaDouble_ptr dx=NULL; magma_dmalloc( &dx, size_x ); magmablas_dlaset( MagmaFull, n, 1, MAGMA_D_ZERO, MAGMA_D_ZERO, dx, n, queue ); magmablas_dtrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue ); magmablas_dlacpy( MagmaFull, n, 1, dx, n, db, n, queue ); magma_free( dx ); }
54090f16e61b6286af2a0ed1f47164dcbc249e39.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @author Tingxing Dong @author Azzam Haidar @generated from magmablas/ztrsv.cu normal z -> d, Tue Feb 9 16:05:35 2016 */ #include "magma_internal.h" #include "magma_templates.h" #define PRECISION_d #define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched #define NUM_THREADS 128 //64 //128 #define BLOCK_SIZE_N 128 #define DIM_X_N 128 #define DIM_Y_N 1 #define BLOCK_SIZE_T 32 #define DIM_X_T 16 #define DIM_Y_T 8 #include "dtrsv_template_device.cuh" #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column extern __shared__ double shared_data[]; //============================================================================== template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag> __global__ void dtrsv_notrans_kernel_outplace( int n, const double * __restrict__ A, int lda, double *b, int incb, double *x) { dtrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } //============================================================================== template<const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag> __global__ void dtrsv_trans_kernel_outplace( int n, const double * __restrict__ A, int lda, double *b, int incb, double *x) { dtrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } //============================================================================== extern "C" void magmablas_dtrsv_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaDouble_ptr b, magma_int_t incb, magmaDouble_ptr x, magma_queue_t queue, magma_int_t flag=0) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; dim3 threads( NUM_THREADS ); dim3 blocks( 1, 1, 1 ); size_t shmem = n * sizeof(double); if (trans == MagmaNoTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } else //Lower { if (diag == MagmaNonUnit) { if (flag == 0) { dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit> <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit> <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } } else if (trans == MagmaTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } } else if (trans == MagmaConjTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } } } /* README: flag decides if the dtrsv_outplace see an updated x or not. 0: No; other: Yes In recursive, flag must be nonzero except the 1st call */ extern "C" void magmablas_dtrsv_recursive_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaDouble_ptr b, magma_int_t incb, magmaDouble_ptr x, magma_queue_t queue) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; //Init x with zero //magmablas_dlaset( MagmaFull, n, incb, MAGMA_D_ZERO, MAGMA_D_ZERO, x, n, queue ); magma_int_t col = n; if (trans == MagmaNoTrans) { for (magma_int_t i=0; i < n; i+= NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaUpper) { col -= jb; //assume x_array contains zero elements, magmablas_dgemv will cause slow down magma_dgemv( MagmaNoTrans, jb, i, MAGMA_D_ONE, A(col, col+jb), lda, x+col+jb, 1, MAGMA_D_ONE, x+col, 1, queue ); } else { col = i; magma_dgemv( MagmaNoTrans, jb, i, MAGMA_D_ONE, A(col, 0), lda, x, 1, MAGMA_D_ONE, x+col, 1, queue ); } magmablas_dtrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } else { for (magma_int_t i=0; i < n; i += NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaLower) { col -= jb; magma_dgemv( MagmaConjTrans, i, jb, MAGMA_D_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_D_ONE, x+col, 1, queue ); } else { col = i; magma_dgemv( MagmaConjTrans, i, jb, MAGMA_D_ONE, A(0, col), lda, x, 1, MAGMA_D_ONE, x+col, 1, queue ); } magmablas_dtrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } } //============================================================================== /** Purpose ------- dtrsv solves one of the matrix equations on gpu op(A)*x = B, or x*op(A) = B, where alpha is a scalar, X and B are vectors, A is a unit, or non-unit, upper or lower triangular matrix and op(A) is one of op(A) = A, or op(A) = A^T, or op(A) = A^H. The vector x is overwritten on b. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] trans magma_trans_t. On entry, trans specifies the form of op(A) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op(A) = A. - = MagmaTrans: op(A) = A^T. - = MagmaConjTrans: op(A) = A^H. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] n INTEGER. On entry, n N specifies the order of the matrix A. n >= 0. @param[in] dA DOUBLE PRECISION array of dimension ( lda, n ) Before entry with uplo = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = MagmaUnit, the diagonal elements of A are not referenced either, but are assumed to be unity. @param[in] ldda INTEGER. On entry, lda specifies the first dimension of A. lda >= max( 1, n ). @param[in] db DOUBLE PRECISION array of dimension n On exit, b is overwritten with the solution vector X. @param[in] incb INTEGER. On entry, incb specifies the increment for the elements of b. incb must not be zero. Unchanged on exit. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_dtrsv( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr db, magma_int_t incb, magma_queue_t queue) { magma_int_t size_x = n * incb; magmaDouble_ptr dx=NULL; magma_dmalloc( &dx, size_x ); magmablas_dlaset( MagmaFull, n, 1, MAGMA_D_ZERO, MAGMA_D_ZERO, dx, n, queue ); magmablas_dtrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue ); magmablas_dlacpy( MagmaFull, n, 1, dx, n, db, n, queue ); magma_free( dx ); }
6d8b1a66014cb329549602f3b57115b03a67d250.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <cutil_inline.h> #include <cudpp/cudpp.h> #include "gcompress_cuda.h" //#define NS3 int byte_num(int max_num) { if (max_num > THREE_BYTE) return 4; if (max_num > ONE_BYTE) return 2; return 1; } void gc_print_int(int* buf, int num) { int* cpu_val = (int*)malloc(num*sizeof(int)); hipMemcpy(cpu_val, buf, num*sizeof(int), hipMemcpyDeviceToHost); //hipMemcpy(cpu_val, lpartkeyVal, maxValue, hipMemcpyDeviceToHost); for (int i = 0; i < num; i++) printf("%d\n", cpu_val[i]); free(cpu_val); } void gc_print_char(char* buf, int num) { char* cpu_val = (char*)malloc(num); hipMemcpy(cpu_val, buf, num, hipMemcpyDeviceToHost); //hipMemcpy(cpu_val, lpartkeyVal, maxValue, hipMemcpyDeviceToHost); for (int i = 0; i < num; i++) printf("%d\n", cpu_val[i]); free(cpu_val); } //============================================================================= //query operators //============================================================================= __global__ void gc_sum1_kernel(float* price, float* discount, int entry_num) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* out = (float4*)price; float4* raw = (float4*)discount; float4 r = raw[ttid]; float4 o = out[ttid]; o.x = o.x * (1.0f - r.x); o.y = o.y * (1.0f - r.y); o.z = o.z * (1.0f - r.z); o.w = o.w * (1.0f - r.w); out[ttid] = o; } void gc_sum1(gcStream_t stream, float* price, float* discount, int entry_num) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_sum1_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, price, discount, entry_num); CUT_CHECK_ERROR("gc_intersect"); } __global__ void gc_sum2_kernel(float* price, char* type, int* joinPos, int entry_num) { int ttid = TID; if (ttid >= entry_num) return; if (type[joinPos[ttid]] < 125) price[ttid] = 0.0f; } void gc_sum2(gcStream_t stream, float* price, char* type, int* joinPos, int entry_num) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(entry_num, blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_sum2_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, price, type, joinPos, entry_num); CUT_CHECK_ERROR("gc_intersect"); } __global__ void gc_scatter_hist_kernel(char* vector, int entry_num, int* hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int index = ttid + ttid; char c = vector[ttid]; int4* out = (int4*)hist; int4 o; if (c & 0x80) o.x = 1; else o.x = 0; if (c & 0x40) o.y = 1; else o.y = 0; if (c & 0x20) o.z = 1; else o.z = 0; if (c & 0x10) o.w = 1; else o.w = 0; out[index] = o; if (c & 0x08) o.x = 1; else o.x = 0; if (c & 0x04) o.y = 1; else o.y = 0; if (c & 0x02) o.z = 1; else o.z = 0; if (c & 0x01) o.w = 1; else o.w = 0; out[index+1] = o; } __global__ void gc_scatter_float_kernel(float* column, int* offset, int* hist, int entry_num, float* out) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* h4 = (int4*)hist; int4* o4 = (int4*)offset; float4* c4 = (float4*)column; int4 h = h4[ttid]; int4 o = o4[ttid]; float4 c = c4[ttid]; if (h.x == 1) out[o.x] = c.x; if (h.y == 1) out[o.y] = c.y; if (h.z == 1) out[o.z] = c.z; if (h.w == 1) out[o.w] = c.w; } void gc_scatter_float(gcStream_t stream, float* column, char* vector, int entry_num, float** out, int* num) { int* hist = (int*)gc_malloc(sizeof(int) * entry_num); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_scatter_hist_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, vector, entry_num, hist); CUT_CHECK_ERROR("gc_intersect"); int* offset = (int*)gc_malloc(sizeof(int)*entry_num); *num = prefixSum(hist, entry_num, offset, EXCLUSIVE); *out = (float*)gc_malloc(*num * sizeof(float)); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_scatter_float_kernel), dim3(gridDim),dim3(blockDim), stream.stream, 0, column, offset, hist, entry_num, *out); CUT_CHECK_ERROR("gc_intersect"); gc_free(offset); gc_free(hist); } __global__ void gc_scatter_kernel(int* column, int* offset,int entry_num, int* out) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* o4 = (int4*)offset; int4* c4 = (int4*)column; int4 o = o4[ttid]; int4 c = c4[ttid]; if (o.x != -1) out[o.x] = c.x; if (o.y != -1) out[o.y] = c.y; if (o.z != -1) out[o.z] = c.z; if (o.w != -1) out[o.w] = c.w; } __global__ void gc_scatter_fix_kernel(int* offset, int entry_num, int* hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* h4 = (int4*)hist; int4* o4 = (int4*)offset; int4 h = h4[ttid]; int4 o = o4[ttid]; if (h.x == 0) o.x = -1; if (h.y == 0) o.y = -1; if (h.z == 0) o.z = -1; if (h.w == 0) o.w = -1; o4[ttid] = o; } void gc_scatter(gcStream_t stream, int* column, char* vector, int entry_num, int** out, int* num) { int* hist = (int*)gc_malloc(sizeof(int) * entry_num); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_scatter_hist_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, vector, entry_num, hist); CUT_CHECK_ERROR("gc_intersect"); int* offset = (int*)gc_malloc(sizeof(int)*entry_num); *num = prefixSum(hist, entry_num, offset, EXCLUSIVE); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_scatter_fix_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, offset, entry_num, hist); CUT_CHECK_ERROR("gc_intersect"); gc_free(hist); printf("%d\n", *num); *out = (int*)gc_malloc(*num * sizeof(int)); hipLaunchKernelGGL(( gc_scatter_kernel), dim3(gridDim),dim3(blockDim), stream.stream, 0, column, offset, entry_num, *out); CUT_CHECK_ERROR("gc_intersect"); gc_free(offset); } __global__ void gc_intersect_kernel(char* pos1, char* pos2, int entry_num, char* pos) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; char4* raw1 = (char4*)pos1; char4* raw2 = (char4*)pos2; char4* raw = (char4*)pos; char4 v1 = raw1[ttid]; char4 v2 = raw2[ttid]; char4 v; /* v.x = (v1.x & v2.x); v.y = (v1.y & v2.y); v.z = (v1.z & v2.z); v.w = (v1.w & v2.w); */ if (v1.x == 1 and v2.x == 1) v.x = 1; else v.x = 0; if (v1.y == 1 and v2.y == 1) v.y = 1; else v.y = 0; if (v1.z == 1 and v2.z == 1) v.z = 1; else v.z = 0; if (v1.w == 1 and v2.w == 1) v.w = 1; else v.w = 0; raw[ttid] = v; } void gc_intersect(gcStream_t stream, char* pos1, char* pos2, int entry_num, char* pos) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_intersect_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, pos1, pos2, entry_num, pos); CUT_CHECK_ERROR("gc_intersect"); } __global__ void gc_filter1_kernel(int* column, int* pos_list, int entry_num) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw = (int4*)column; int4 v = raw[ttid]; int4* raw1 = (int4*)pos_list; int4 v2 = raw1[ttid]; if (v2.x == 0) v.x = 0; if (v2.y == 0) v.y = 0; if (v2.z == 0) v.z = 0; if (v2.w == 0) v.w = 0; raw[ttid] = v; } void gc_filter1(gcStream_t stream, int* column, int* pos_list, int entry_num) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_filter1_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, column, pos_list, entry_num); CUT_CHECK_ERROR("gc_filter1"); } __global__ void gc_cal_q14_kernel(float *price, char* discount, char* type, int* lpartkey_offset_in, int* lpartkey_offset_out, int* lpartkey_len, char* pos_vector, int* lpartkey_pos_list, int* ppartkey_pos_list, int entry_num, int centry_num, float* out1, char* out2) { int ttid = TID; if (ttid >= centry_num) return; int lpos = lpartkey_pos_list[ttid]; int ppos = ppartkey_pos_list[ttid]; int llen = lpartkey_len[lpos]; int loffset = lpartkey_offset_in[lpos]; for (int i = 0; i < llen; i++) { int pos = loffset + i; char c = pos_vector[pos / 8]; float o1 = 0.0f; char o2 = 0; if (c & (0x80 >> (pos%8 - 1))) { float price1 = price[pos]; float discount1 = (float)discount[pos] / 100.0; o1 = (1 - discount1) * price1; if (type[ppos] >= 125) o2 = 1; } out1[pos] = o1; out2[pos] = o2; } } void gc_cal_q14(gcStream_t stream, float* price, char* discount, char* type, int* lpartkey_offset_in, int* lpartkey_offset_out, int* lpartkey_len, char* pos_vector, int* lpartkey_pos_list, int* ppartkey_pos_list, int entry_num, int centry_num, float* out1, char* out2) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(centry_num, blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_cal_q14_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, price, discount, type, lpartkey_offset_in, lpartkey_offset_out, lpartkey_len, pos_vector, lpartkey_pos_list, ppartkey_pos_list, entry_num, centry_num, out1, out2); CUT_CHECK_ERROR("gc_pos_vector_hist"); } __global__ void gc_cal_q14_final_kernel(float* out1, char* out2, int entry_num, float sum, float* out) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* raw1 = (float4*)out1; char4* raw2 = (char4*)out2; float4 v1 = raw1[ttid]; char4 v2 = raw2[ttid]; float4 o; if (v2.x == 1) o.x = 100.0f * v1.x; else o.x = 0.0f; if (v2.y == 1) o.y = 100.0f * v1.y; else o.y = 0.0f; if (v2.z == 1) o.z = 100.0f * v1.z; else o.z = 0.0f; if (v2.w == 1) o.w = 100.0f * v1.w; else o.w = 0.0f; float4* out4 = (float4*)out; out4[ttid] = o; } void gc_cal_q14_final(gcStream_t stream, float* out1, char* out2, int entry_num, float sum, float* out) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_cal_q14_final_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, out1, out2, entry_num, sum, out); CUT_CHECK_ERROR("gc_pos_vector_hist"); } /* __global__ void gc_cal_q14_kernel(float *price, char* discount, char* type, int* lpartkey_offset_in, int* lpartkey_offset_out, int* lpartkey_len, char* pos_vector, int* lpartkey_pos_list, int* ppartkey_pos_list, int entry_num, int centry_num, float* out1, float* out2) { int ttid = TID; if (ttid >= centry_num) return; int lpos = lpartkey_pos_list[ttid]; int ppos = ppartkey_pos_list[ttid]; int llen = lpartkey_len[ttid]; int loffset = lpartkey_offset_in[lpos]; for (int i = 0; i < llen; i++) { int pos = loffset + i; char c = pos_vector[pos / 8]; float o1 = 0.0f; float o2 = 0.0f; if (c & (0x80 >> (c%8 - 1))) { float price1 = price[pos]; float discount1 = (float)discount[pos] / 100.0; o2 = (1 - discount1) * price1; if (type[ppos] >= 125) o1 = o2; } out1[pos] = o1; out2[pos] = o2; } } void gc_cal_q14(gcStream_t stream, float* price, char* discount, char* type, int* lpartkey_offset_in, int* lpartkey_offset_out, int* lpartkey_len, char* pos_vector, int* lpartkey_pos_list, int* ppartkey_pos_list, int entry_num, int centry_num, float* out1, float* out2) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(centry_num, blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_cal_q14_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, price, discount, type, lpartkey_offset_in, lpartkey_offset_out, lpartkey_len, pos_vector, lpartkey_pos_list, ppartkey_pos_list, entry_num, centry_num, out1, out2); CUT_CHECK_ERROR("gc_pos_vector_hist"); } */ __global__ void gc_pos_vector_hist_kernel(int* column, char* val_vector, int centry_num, int* hist) { int ttid = TID; if (ttid >= CEIL(centry_num, 4)) return; int4* raw = (int4*)column; int4 v = raw[ttid]; int4* out = (int4*)hist; int4 o; o.x = val_vector[v.x]; o.y = val_vector[v.y]; o.z = val_vector[v.z]; o.w = val_vector[v.w]; out[ttid] = o; } __global__ void gc_pos_vector_kernel(int* column, int* hist, int* offset, int centry_num, int* pos_list) { int ttid = TID; if (ttid >= CEIL(centry_num, 4)) return; int4* offset4 = (int4*)offset; int4* hist4 = (int4*)hist; int4 o = offset4[ttid]; int4 h = hist4[ttid]; if (h.x) pos_list[o.x] = ttid * 4; if (h.y) pos_list[o.y] = ttid * 4 + 1; if (h.z) pos_list[o.z] = ttid * 4 + 2; if (h.w) pos_list[o.w] = ttid * 4 + 3; } void gc_pos_vector(gcStream_t stream, int* column, char* val_vector, int centry_num, int** pos_list, int* num) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); int* hist = (int*)gc_malloc(sizeof(int) * CEIL(centry_num, 4) * 4); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(centry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_pos_vector_hist_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, column, val_vector, centry_num, hist); CUT_CHECK_ERROR("gc_pos_vector_hist"); int* offset = (int*)gc_malloc(sizeof(int) * CEIL(centry_num, 4) * 4); *num = prefixSum(hist, centry_num, offset, EXCLUSIVE); printf("%d \n", *num); *pos_list = (int*)gc_malloc(*num * sizeof(int)); hipLaunchKernelGGL(( gc_pos_vector_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, column, hist, offset, centry_num, *pos_list); CUT_CHECK_ERROR("gc_pos_vector"); gc_free(offset); gc_free(hist); } __global__ void gc_val_vector_kernel(int* gpu_column, int entry_num, char* gpu_val_vector) { int ttid = TID; if (ttid >= entry_num) return; int v = gpu_column[ttid]; gpu_val_vector[v] = 1; } void gc_val_vector(gcStream_t stream, int* gpu_column, int entry_num, int max_val, char* gpu_val_vector) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); hipMemset(gpu_val_vector, 0, max_val); THREAD_CONF(gridDim, blockDim, CEIL(entry_num, blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_val_vector_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, gpu_column, entry_num, gpu_val_vector); CUT_CHECK_ERROR("gc_val_vector"); } __global__ void gc_select_char_ng_lt(char* gpu_column, int entry_num, char low, char high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; char4* raw4 = (char4*)gpu_column; int index = ttid + ttid; char4 v = raw4[index]; char c = 0; if (v.x < high) c |= 0x80; if (v.y < high) c |= 0x40; if (v.z < high) c |= 0x20; if (v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x < high) c |= 0x08; if (v.y < high) c |= 0x04; if (v.z < high) c |= 0x02; if (v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } __global__ void gc_select_char_ge_le(char* gpu_column, int entry_num, char low, char high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; char4* raw4 = (char4*)gpu_column; int index = ttid + ttid; char4 v = raw4[index]; char c = 0; if (v.x >= low && v.x <= high) c |= 0x80; if (v.y >= low && v.y <= high) c |= 0x40; if (v.z >= low && v.z <= high) c |= 0x20; if (v.w >= low && v.w <= high) c |= 0x10; v = raw4[index+1]; if (v.x >= low && v.x <= high) c |= 0x08; if (v.y >= low && v.y <= high) c |= 0x04; if (v.z >= low && v.z <= high) c |= 0x02; if (v.w >= low && v.w <= high) c |= 0x01; gpu_pos_vector[ttid] = c; } __global__ void gc_select_char_gt_lt(char* gpu_column, int entry_num, char low, char high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; char4* raw4 = (char4*)gpu_column; int index = ttid + ttid; char4 v = raw4[index]; char c = 0; if (v.x > low && v.x < high) c |= 0x80; if (v.y > low && v.y < high) c |= 0x40; if (v.z > low && v.z < high) c |= 0x20; if (v.w > low && v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x > low && v.x < high) c |= 0x08; if (v.y > low && v.y < high) c |= 0x04; if (v.z > low && v.z < high) c |= 0x02; if (v.w > low && v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } void gc_select_char(gcStream_t stream, char* gpu_column, int entry_num, char low, char op_low, char high, char op_high, char* gpu_pos_vector) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); if (op_low == GT && op_high == LT) { hipLaunchKernelGGL(( gc_select_char_gt_lt), dim3(gridDim), dim3(blockDim), stream.stream, 0, gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } else if (op_low == GE && op_high == LE) { hipLaunchKernelGGL(( gc_select_char_ge_le), dim3(gridDim), dim3(blockDim), stream.stream, 0, gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } else if (op_low == NG && op_high == LT) { hipLaunchKernelGGL(( gc_select_char_ng_lt), dim3(gridDim), dim3(blockDim), stream.stream, 0, gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } } __global__ void gc_select_short_ge_lt(short* gpu_column, int entry_num, short low, short high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; short4* raw4 = (short4*)gpu_column; int index = ttid + ttid; short4 v = raw4[index]; char c = 0; if (v.x >= low && v.x < high) c |= 0x80; if (v.y >= low && v.y < high) c |= 0x40; if (v.z >= low && v.z < high) c |= 0x20; if (v.w >= low && v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x >= low && v.x < high) c |= 0x08; if (v.y >= low && v.y < high) c |= 0x04; if (v.z >= low && v.z < high) c |= 0x02; if (v.w >= low && v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } void gc_select_short(gcStream_t stream, short* gpu_column, int entry_num, short low, char op_low, short high, char op_high, char* gpu_pos_vector) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); if (op_low == GE && op_high == LT) { hipLaunchKernelGGL(( gc_select_short_ge_lt), dim3(gridDim), dim3(blockDim), stream.stream, 0, gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } } __global__ void gc_select_int_ng_lt(int* gpu_column, int entry_num, int low, int high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int4* raw4 = (int4*)gpu_column; int index = ttid + ttid; int4 v = raw4[index]; char c = 0; if (v.x < high) c |= 0x80; if (v.y < high) c |= 0x40; if (v.z < high) c |= 0x20; if (v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x < high) c |= 0x08; if (v.y < high) c |= 0x04; if (v.z < high) c |= 0x02; if (v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } __global__ void gc_select_int_ge_le(int* gpu_column, int entry_num, int low, int high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int4* raw4 = (int4*)gpu_column; int index = ttid + ttid; int4 v = raw4[index]; char c = 0; if (v.x >= low && v.x <= high) c |= 0x80; if (v.y >= low && v.y <= high) c |= 0x40; if (v.z >= low && v.z <= high) c |= 0x20; if (v.w >= low && v.w <= high) c |= 0x10; v = raw4[index+1]; if (v.x >= low && v.x <= high) c |= 0x08; if (v.y >= low && v.y <= high) c |= 0x04; if (v.z >= low && v.z <= high) c |= 0x02; if (v.w >= low && v.w <= high) c |= 0x01; gpu_pos_vector[ttid] = c; } __global__ void gc_select_int_gt_ng(int* gpu_column, int entry_num, int low, int high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int4* raw4 = (int4*)gpu_column; int index = ttid + ttid; int4 v = raw4[index]; char c = 0; if (v.x > low) c |= 0x80; if (v.y > low) c |= 0x40; if (v.z > low) c |= 0x20; if (v.w > low) c |= 0x10; v = raw4[index+1]; if (v.x > low) c |= 0x08; if (v.y > low) c |= 0x04; if (v.z > low) c |= 0x02; if (v.w > low) c |= 0x01; gpu_pos_vector[ttid] = c; } __global__ void gc_select_int_ge_lt(int* gpu_column, int entry_num, int low, int high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int4* raw4 = (int4*)gpu_column; int index = ttid + ttid; int4 v = raw4[index]; char c = 0; if (v.x >= low && v.x < high) c |= 0x80; if (v.y >= low && v.y < high) c |= 0x40; if (v.z >= low && v.z < high) c |= 0x20; if (v.w >= low && v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x >= low && v.x < high) c |= 0x08; if (v.y >= low && v.y < high) c |= 0x04; if (v.z >= low && v.z < high) c |= 0x02; if (v.w >= low && v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } void gc_select_int(gcStream_t stream, int* gpu_column, int entry_num, int low, char op_low, int high, char op_high, char* gpu_pos_vector) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); if (op_low == GE && op_high == LT) { hipLaunchKernelGGL(( gc_select_int_ge_lt), dim3(gridDim), dim3(blockDim), stream.stream, 0, gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } else if (op_low == GE && op_high == LE) { hipLaunchKernelGGL(( gc_select_int_ge_le), dim3(gridDim), dim3(blockDim), stream.stream, 0, gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } else if (op_low == NG && op_high == LT) { hipLaunchKernelGGL(( gc_select_int_ng_lt), dim3(gridDim), dim3(blockDim), stream.stream, 0, gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } else if (op_low == GT && op_high == NG) { hipLaunchKernelGGL(( gc_select_int_gt_ng), dim3(gridDim), dim3(blockDim), stream.stream, 0, gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } } __global__ void gc_select_float_gt_lt(float* gpu_column, int entry_num, float low, float high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; float4* raw4 = (float4*)gpu_column; int index = ttid + ttid; float4 v = raw4[index]; char c = 0; if (v.x > low && v.x < high) c |= 0x80; if (v.y > low && v.y < high) c |= 0x40; if (v.z > low && v.z < high) c |= 0x20; if (v.w > low && v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x > low && v.x < high) c |= 0x08; if (v.y > low && v.y < high) c |= 0x04; if (v.z > low && v.z < high) c |= 0x02; if (v.w > low && v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } void gc_select_float(gcStream_t stream, float* gpu_column, int entry_num, float low, char op_low, float high, char op_high, char* gpu_pos_vector) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); if (op_low == GT && op_high == LT) { hipLaunchKernelGGL(( gc_select_float_gt_lt), dim3(gridDim), dim3(blockDim), stream.stream, 0, gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } } __global__ void gc_filter_kernel(char* pos_vector1, char* pos_vector2, char* pos_vector3, int entry_num, char* pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 32)) return; char4* raw1 = (char4*)pos_vector1; char4* raw2 = (char4*)pos_vector2; char4* raw3 = (char4*)pos_vector3; char4* out = (char4*)pos_vector; char4 v1 = raw1[ttid]; char4 v2 = raw2[ttid]; char4 v3 = raw3[ttid]; char4 o; o.x = (v1.x & v2.x & v3.x); o.y = (v1.y & v2.y & v3.y); o.z = (v1.z & v2.z & v3.z); o.w = (v1.w & v2.w & v3.w); out[ttid] = o; } void gc_filter(gcStream_t stream, char* pos_vector1, char* pos_vector2, char* pos_vector3, char* pos_vector, int entry_num) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 32), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_filter_kernel), dim3(gridDim), dim3(blockDim), stream.stream, 0, pos_vector1, pos_vector2, pos_vector3, entry_num, pos_vector); CUT_CHECK_ERROR("gc_filter_kernel"); } __global__ void gc_filter_float_product(float* column1, float* column2, int entry_num, char* pos_vector, float* product) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int index = ttid + ttid; char c = pos_vector[ttid]; float4* raw1 = (float4*)column1; float4* raw2 = (float4*)column2; float4 v1 = raw1[index]; float4 v2 = raw2[index]; float4* out = (float4*)product; float4 o; if (c & 0x80) o.x = v1.x * v2.x; else o.x = 0.0f; if (c & 0x40) o.y = v1.y * v2.y; else o.y = 0.0f; if (c & 0x20) o.z = v1.z * v2.z; else o.z = 0.0f; if (c & 0x10) o.w = v1.w * v2.w; else o.w = 0.0f; out[index] = o; v1 = raw1[index+1]; v2 = raw2[index+1]; if (c & 0x08) o.x = v1.x * v2.x; else o.x = 0.0f; if (c & 0x04) o.y = v1.y * v2.y; else o.y = 0.0f; if (c & 0x02) o.z = v1.z * v2.z; else o.z = 0.0f; if (c & 0x01) o.w = v1.w * v2.w; else o.w = 0.0f; out[index+1] = o; } void gc_filter_float_value(gcStream_t stream, float* column1, float* column2, int entry_num, char* pos_vector, float* out) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_filter_float_product), dim3(gridDim), dim3(blockDim), stream.stream, 0, column1, column2, entry_num, pos_vector, out); CUT_CHECK_ERROR("gc_filter_float_product"); /* int* hist = (int*)gc_malloc(sizeof(int) * CEIL(entry_num, 8) * 8); gc_filter_float_hist_kernel<<<gridDim, blockDim, stream.stream>>>(pos_vector, hist) CUT_CHECK_ERROR("gc_filter_float_hist_kernel"); int* offset = (int*)gc_malloc( sizeof(int) * CEIL(entry_num,8) * 8); prefixSum(); gc_free(offset); gc_free(hist); */ } __global__ void gc_filter_float_product2(float* column1, char* column2, int entry_num, char* pos_vector, float* product) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int index = ttid + ttid; char c = pos_vector[ttid]; float4* raw1 = (float4*)column1; char4* raw2 = (char4*)column2; float4 v1 = raw1[index]; char4 v2 = raw2[index]; float4* out = (float4*)product; float4 o; int4 v22; v22.x = (int)v2.x; v22.y = (int)v2.y; v22.z = (int)v2.z; v22.w = (int)v2.w; if (c & 0x80) o.x = v1.x * (float)v22.x; else o.x = 0.0f; if (c & 0x40) o.y = v1.y * (float)v22.y; else o.y = 0.0f; if (c & 0x20) o.z = v1.z * (float)v22.z; else o.z = 0.0f; if (c & 0x10) o.w = v1.w * (float)v22.w; else o.w = 0.0f; out[index] = o; v1 = raw1[index+1]; v2 = raw2[index+1]; if (c & 0x08) o.x = v1.x * (float)v22.x; else o.x = 0.0f; if (c & 0x04) o.y = v1.y * (float)v22.y; else o.y = 0.0f; if (c & 0x02) o.z = v1.z * (float)v22.z; else o.z = 0.0f; if (c & 0x01) o.w = v1.w * (float)v22.w; else o.w = 0.0f; out[index+1] = o; } void gc_filter_float_value2(gcStream_t stream, float* column1, char* column2, int entry_num, char* pos_vector, float* out) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_filter_float_product2), dim3(gridDim), dim3(blockDim), stream.stream, 0, column1, column2, entry_num, pos_vector, out); CUT_CHECK_ERROR("gc_filter_float_product"); /* int* hist = (int*)gc_malloc(sizeof(int) * CEIL(entry_num, 8) * 8); gc_filter_float_hist_kernel<<<gridDim, blockDim, stream.stream>>>(pos_vector, hist) CUT_CHECK_ERROR("gc_filter_float_hist_kernel"); int* offset = (int*)gc_malloc( sizeof(int) * CEIL(entry_num,8) * 8); prefixSum(); gc_free(offset); gc_free(hist); */ } //============================================================================= //Utils //============================================================================= int prefixSum(int* input, int num, int* output, char flag) { CUDPPConfiguration config; config.op = CUDPP_ADD; config.datatype = CUDPP_INT; config.algorithm = CUDPP_SCAN; if (flag == EXCLUSIVE) config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; else config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; CUDPPHandle scanplan = 0; CUDPPResult result = cudppPlan(&scanplan, config, num, 1, 0); if (CUDPP_SUCCESS != result) { printf("Error creating CUDPPPlan\n"); exit(-1); } cudppScan(scanplan, (void*)output, (void*)input, num); result = cudppDestroyPlan(scanplan); if (CUDPP_SUCCESS != result) { printf("Error destroying CUDPPPlan\n"); exit(-1); } int last_offset = 0; int last_hist = 0; hipMemcpy(&last_offset, &output[num -1], sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&last_hist, &input[num -1], sizeof(int), hipMemcpyDeviceToHost); return (last_offset + last_hist); } float sumFloat2(float* input, int num, float* output) { CUDPPConfiguration config; config.op = CUDPP_ADD; config.datatype = CUDPP_FLOAT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; CUDPPHandle scanplan = 0; CUDPPResult result = cudppPlan(&scanplan, config, num, 1, 0); if (CUDPP_SUCCESS != result) { printf("Error creating CUDPPPlan\n"); exit(-1); } cudppScan(scanplan, (void*)output, (void*)input, num); result = cudppDestroyPlan(scanplan); if (CUDPP_SUCCESS != result) { printf("Error destroying CUDPPPlan\n"); exit(-1); } float last_offset = 0; hipMemcpy(&last_offset, &output[num -1], sizeof(float), hipMemcpyDeviceToHost); return last_offset; } float sumFloat(float* input, int num) { float* input2 = 0; hipHostMalloc((void**)&input2, (sizeof(float)*num)); hipMemcpy(input2, input, sizeof(float)*num, hipMemcpyDeviceToHost); float sum = 0.0f; for (int i = 0; i < num; i++) { sum += input2[i]; //printf("%f, %f\n", input2[i], sum); } hipHostFree(input2); return sum; /* CUDPPConfiguration config; config.op = CUDPP_ADD; config.datatype = CUDPP_FLOAT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; CUDPPHandle scanplan = 0; CUDPPResult result = cudppPlan(&scanplan, config, num, 1, 0); if (CUDPP_SUCCESS != result) { printf("Error creating CUDPPPlan\n"); exit(-1); } cudppScan(scanplan, (void*)output, (void*)input, num); result = cudppDestroyPlan(scanplan); if (CUDPP_SUCCESS != result) { printf("Error destroying CUDPPPlan\n"); exit(-1); } float last_offset = 0; hipMemcpy(&last_offset, &output[num -1], sizeof(float), hipMemcpyDeviceToHost); return last_offset; */ } //============================================================================= //GPU compression //============================================================================= //----------------------------------------------------------------------------- //nsv //----------------------------------------------------------------------------- __device__ int byteNum(int num) { if (num > TWO_BYTE) return 3; if (num > ONE_BYTE) return 2; return 1; } __device__ int byteNumLong(long num) { if (num > THREE_BYTE) return 4; if (num > ONE_BYTE) return 2; return 1; } __global__ void gc_compress_nsv_long_kernel1(long* gpu_ubuf, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= entry_num) return; long v = gpu_ubuf[ttid]; gpu_hist[ttid] = byteNumLong(v); } __global__ void gc_compress_nsv_long_kernel2(long* gpu_ubuf, int* gpu_offset, int entry_num, char* gpu_value, char* gpu_len) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; __shared__ long2 sbuf[256]; long2* raw = (long2*)gpu_ubuf; sbuf[threadIdx.x] = raw[ttid*2]; __syncthreads(); int hist; int4* offset4 = (int4*)gpu_offset; int4 offset = offset4[ttid]; char len = 0; char* src = (char*)&sbuf[threadIdx.x].x; hist = byteNumLong(sbuf[threadIdx.x].x); for (int i = 0; i < hist; ++i) gpu_value[offset.x + i] = src[i]; len |= ((char)hist << 6); src = (char*)&sbuf[threadIdx.x].y; hist = byteNumLong(sbuf[threadIdx.x].y); for (int i = 0; i < hist; ++i) gpu_value[offset.y + i] = src[i]; len |= ((char)hist << 4); sbuf[threadIdx.x] = raw[ttid*2+1]; __syncthreads(); src = (char*)&sbuf[threadIdx.x].x; hist = byteNumLong(sbuf[threadIdx.x].x); for (int i = 0; i < hist; ++i) gpu_value[offset.z + i] = src[i]; len |= ((char)hist << 2); src = (char*)&sbuf[threadIdx.x].y; hist = byteNumLong(sbuf[threadIdx.x].y); for (int i = 0; i < hist; ++i) gpu_value[offset.w + i] = src[i]; len |= ((char)hist); gpu_len[ttid] = len; } void gc_compress_nsv_long(gcStream_t Stream, long* gpu_ubuf, int entry_num, char** gpu_value, char* gpu_len, int* size) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(entry_num, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * entry_num); hipLaunchKernelGGL(( gc_compress_nsv_long_kernel1), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); //hipDeviceSynchronize(); //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * entry_num); int totalSize = prefixSum(gpu_hist, entry_num, gpu_offset, EXCLUSIVE); *size = totalSize; //hipDeviceSynchronize(); /* int* cpu_offset = (int*)malloc(sizeof(int)*entry_num); int* cpu_hist = (int*)malloc(sizeof(int)*entry_num); hipMemcpy(cpu_offset, gpu_offset, sizeof(int)*entry_num, hipMemcpyDeviceToHost); hipMemcpy(cpu_hist, gpu_hist, sizeof(int)*entry_num, hipMemcpyDeviceToHost); for (int i= 0; i < 10; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); *gpu_value = (char*)gc_malloc(totalSize); hipLaunchKernelGGL(( gc_compress_nsv_long_kernel2), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, gpu_offset, entry_num, *gpu_value, gpu_len); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_hist); gc_free(gpu_offset); } __global__ void gc_compress_nsv_kernel1(int* gpu_ubuf, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4 v = raw4[ttid]; int o = 0; o += byteNum(v.x); o += byteNum(v.y); o += byteNum(v.z); o += byteNum(v.w); gpu_hist[ttid] = o; } __global__ void gc_compress_nsv_kernel2(int* gpu_ubuf, int* gpu_offset, int entry_num, char* gpu_value, char* gpu_len) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; __shared__ int4 sbuf[256]; sbuf[threadIdx.x] = raw4[ttid]; __syncthreads(); int hist; int offset = gpu_offset[ttid]; char len = 0; char* src = (char*)&sbuf[threadIdx.x].x; hist = byteNum(sbuf[threadIdx.x].x); for (int i = 0; i < hist; ++i) gpu_value[offset + i] = src[i]; len |= ((char)hist << 6); offset += hist; src = (char*)&sbuf[threadIdx.x].y; hist = byteNum(sbuf[threadIdx.x].y); for (int i = 0; i < hist; ++i) gpu_value[offset + i] = src[i]; len |= ((char)hist << 4); offset += hist; src = (char*)&sbuf[threadIdx.x].z; hist = byteNum(sbuf[threadIdx.x].z); for (int i = 0; i < hist; ++i) gpu_value[offset + i] = src[i]; len |= ((char)hist << 2); offset += hist; src = (char*)&sbuf[threadIdx.x].w; hist = byteNum(sbuf[threadIdx.x].w); for (int i = 0; i < hist; ++i) gpu_value[offset + i] = src[i]; len |= ((char)hist); offset += hist; gpu_len[ttid] = len; } void gc_compress_nsv(gcStream_t Stream, int* gpu_ubuf, int entry_num, char** gpu_value, char* gpu_len, int* size) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * threadNum); hipLaunchKernelGGL(( gc_compress_nsv_kernel1), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * threadNum); int totalSize = prefixSum(gpu_hist, threadNum, gpu_offset, EXCLUSIVE); *size = totalSize; /* int* cpu_offset = (int*)malloc(sizeof(int)*entry_num); int* cpu_hist = (int*)malloc(sizeof(int)*entry_num); hipMemcpy(cpu_offset, gpu_offset, sizeof(int)*entry_num, hipMemcpyDeviceToHost); hipMemcpy(cpu_hist, gpu_hist, sizeof(int)*entry_num, hipMemcpyDeviceToHost); for (int i= 0; i < 10; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter *gpu_value = (char*)gc_malloc(totalSize); hipLaunchKernelGGL(( gc_compress_nsv_kernel2), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, gpu_offset, entry_num, *gpu_value, gpu_len); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_hist); gc_free(gpu_offset); } #if 0 __global__ void gc_decompress_nsv_kernel1(char* gpu_len, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* hist = (int4*)gpu_hist; char v = gpu_len[ttid]; int4 o; o.x = ((v & 0xc0) >> 6); o.y = ((v & 0x30) >> 4); o.z = ((v & 0x0c) >> 2); o.w = ((v & 0x03)); hist[ttid] = o; } __global__ void gc_decompress_nsv_kernel2(int* gpu_ubuf, int* gpu_offset, int entry_num, char* gpu_value, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; __shared__ int4 ibuf[256]; ibuf[threadIdx.x].x = 0; ibuf[threadIdx.x].y = 0; ibuf[threadIdx.x].z = 0; ibuf[threadIdx.x].w = 0; __syncthreads(); int4* hist4 = (int4*)gpu_hist; int4 h = hist4[ttid]; int4* offset4 = (int4*)gpu_offset; int4 o = offset4[ttid]; char* cbuf = NULL; cbuf = (char*)&ibuf[threadIdx.x].x; for (int i = 0; i < h.x; i++) cbuf[i] = gpu_value[o.x + i]; cbuf = (char*)&ibuf[threadIdx.x].y; for (int i = 0; i < h.y; i++) cbuf[i] = gpu_value[o.y + i]; cbuf = (char*)&ibuf[threadIdx.x].z; for (int i = 0; i < h.z; i++) cbuf[i] = gpu_value[o.z + i]; cbuf = (char*)&ibuf[threadIdx.x].w; for (int i = 0; i < h.w; i++) cbuf[i] = gpu_value[o.w + i]; int4* raw4 = (int4*)gpu_ubuf; raw4[ttid] = ibuf[threadIdx.x]; __syncthreads(); } void gc_decompress_nsv(gcStream_t Stream, int* gpu_ubuf, int entry_num, char* gpu_value, char* gpu_len) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * entry_num); hipLaunchKernelGGL(( gc_decompress_nsv_kernel1), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_len, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * entry_num); int totalSize = prefixSum(gpu_hist, entry_num, gpu_offset, EXCLUSIVE); /* int* cpu_offset = (int*)malloc(sizeof(int)*threadNum); int* cpu_hist = (int*)malloc(sizeof(int)*threadNum); hipMemcpy(cpu_offset, gpu_offset, sizeof(int)*threadNum, hipMemcpyDeviceToHost); hipMemcpy(cpu_hist, gpu_hist, sizeof(int)*threadNum, hipMemcpyDeviceToHost); for (int i= 0; i < threadNum; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter hipLaunchKernelGGL(( gc_decompress_nsv_kernel2), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, gpu_offset, entry_num, gpu_value, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_hist); gc_free(gpu_offset); } #endif __global__ void gc_decompress_nsv_kernel1(char* gpu_len, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; char v = gpu_len[ttid]; int o = 0; o += ((v & 0xc0) >> 6); o += ((v & 0x30) >> 4); o += ((v & 0x0c) >> 2); o += ((v & 0x03)); gpu_hist[ttid] = o; } __global__ void gc_decompress_nsv_kernel2(int* gpu_ubuf, int* gpu_offset, int entry_num, char* gpu_value, char* gpu_len) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; __shared__ int4 ibuf[256]; ibuf[threadIdx.x].x = 0; ibuf[threadIdx.x].y = 0; ibuf[threadIdx.x].z = 0; ibuf[threadIdx.x].w = 0; __syncthreads(); char v = gpu_len[ttid]; int h = 0; int o = gpu_offset[ttid]; char* cbuf = NULL; h = ((v & 0xc0) >> 6); cbuf = (char*)&ibuf[threadIdx.x].x; for (int i = 0; i < h; i++) cbuf[i] = gpu_value[o + i]; o += h; h = ((v & 0x30) >> 4); cbuf = (char*)&ibuf[threadIdx.x].y; for (int i = 0; i < h; i++) cbuf[i] = gpu_value[o + i]; o += h; h = ((v & 0x0c) >> 2); cbuf = (char*)&ibuf[threadIdx.x].z; for (int i = 0; i < h; i++) cbuf[i] = gpu_value[o + i]; o += h; h = ((v & 0x03)); cbuf = (char*)&ibuf[threadIdx.x].w; for (int i = 0; i < h; i++) cbuf[i] = gpu_value[o + i]; o += h; int4* raw4 = (int4*)gpu_ubuf; raw4[ttid] = ibuf[threadIdx.x]; __syncthreads(); } void gc_decompress_nsv(gcStream_t Stream, int* gpu_ubuf, int entry_num, char* gpu_value, char* gpu_len) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * threadNum); hipLaunchKernelGGL(( gc_decompress_nsv_kernel1), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_len, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * threadNum); int totalSize = prefixSum(gpu_hist, threadNum, gpu_offset, EXCLUSIVE); gc_free(gpu_hist); /* int* cpu_offset = (int*)malloc(sizeof(int)*threadNum); int* cpu_hist = (int*)malloc(sizeof(int)*threadNum); hipMemcpy(cpu_offset, gpu_offset, sizeof(int)*threadNum, hipMemcpyDeviceToHost); hipMemcpy(cpu_hist, gpu_hist, sizeof(int)*threadNum, hipMemcpyDeviceToHost); for (int i= 0; i < threadNum; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter hipLaunchKernelGGL(( gc_decompress_nsv_kernel2), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, gpu_offset, entry_num, gpu_value, gpu_len); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_offset); } #if 0 __global__ void gc_decompress_nsv_kernel1(char* gpu_len, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; char v = gpu_len[ttid]; int4* hist4 = (int4*)gpu_hist; int4 o; o.x = ((v & 0xc0) >> 6); o.y = ((v & 0x30) >> 4); o.z = ((v & 0x0c) >> 2); o.w = ((v & 0x03)); hist4[ttid] = o; } __global__ void gc_decompress_nsv_kernel2(int* gpu_ubuf, int* gpu_offset, int* gpu_hist, int entry_num, char* gpu_value) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; __shared__ int4 ibuf[256]; ibuf[threadIdx.x].x = 0; ibuf[threadIdx.x].y = 0; ibuf[threadIdx.x].z = 0; ibuf[threadIdx.x].w = 0; __syncthreads(); int4* hist = (int4*)gpu_hist; int4 h = hist[ttid]; int4* offset = (int4*)gpu_offset; int4 o = offset[ttid]; char* cbuf = NULL; cbuf = (char*)&ibuf[threadIdx.x].x; for (int i = 0; i < h.x; i--) cbuf[3 - i] = gpu_value[o.x + h.x - 1 - i]; cbuf = (char*)&ibuf[threadIdx.x].y; for (int i = 0; i < h.y; i--) cbuf[3 - i] = gpu_value[o.y + h.y - 1 - i]; cbuf = (char*)&ibuf[threadIdx.x].z; for (int i = 0; i < h.z; i--) cbuf[3 - i] = gpu_value[o.z + h.z - 1 - i]; cbuf = (char*)&ibuf[threadIdx.x].w; for (int i = 0; i < h.w; i--) cbuf[3 - i] = gpu_value[o.w + h.w - 1 - i]; int4* raw4 = (int4*)gpu_ubuf; raw4[ttid] = ibuf[threadIdx.x]; } void gc_decompress_nsv(gcStream_t Stream, int* gpu_ubuf, int entry_num, char* gpu_value, char* gpu_len) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * entry_num); hipLaunchKernelGGL(( gc_decompress_nsv_kernel1), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_len, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * entry_num); int totalSize = prefixSum(gpu_hist, entry_num, gpu_offset, EXCLUSIVE); /* int* cpu_offset = (int*)malloc(sizeof(int)*entry_num); int* cpu_hist = (int*)malloc(sizeof(int)*entry_num); hipMemcpy(cpu_offset, gpu_offset, sizeof(int)*entry_num, hipMemcpyDeviceToHost); hipMemcpy(cpu_hist, gpu_hist, sizeof(int)*entry_num, hipMemcpyDeviceToHost); for (int i= 0; i < 10; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter return; hipLaunchKernelGGL(( gc_decompress_nsv_kernel2), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, gpu_offset, gpu_hist, entry_num, gpu_value); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_offset); gc_free(gpu_hist); } #endif #if 0 __global__ void gc_compress_nsv_kernel1(int* gpu_ubuf, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4 v = raw4[ttid]; int4 o; int4* out4 = (int4*)gpu_hist; o.x = byteNum(v.x); o.y = byteNum(v.y); o.z = byteNum(v.z); o.w = byteNum(v.w); out4[ttid] = o; } __global__ void gc_compress_nsv_kernel2(int* gpu_ubuf, int* gpu_offset, int* gpu_hist, int entry_num, char* gpu_value, char* gpu_len) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; __shared__ int4 sbuf[256]; sbuf[threadIdx.x] = raw4[ttid]; __syncthreads(); int4* hist4 = (int4*)gpu_hist; int4 hist = hist4[ttid]; int4* offset4 = (int4*)gpu_offset; int4 offset = offset4[ttid]; char len = 0; char* src = (char*)&sbuf[threadIdx.x].x; for (int i = 0; i < hist.x; ++i) gpu_value[offset.x + i] = src[i]; len |= ((char)hist.x << 6); src = (char*)&sbuf[threadIdx.x].y; for (int i = 0; i < hist.y; ++i) gpu_value[offset.y + i] = src[i]; len |= ((char)hist.x << 4); src = (char*)&sbuf[threadIdx.x].z; for (int i = 0; i < hist.z; ++i) gpu_value[offset.z + i] = src[i]; len |= ((char)hist.x << 2); src = (char*)&sbuf[threadIdx.x].w; for (int i = 0; i < hist.w; ++i) gpu_value[offset.w + i] = src[i]; len |= ((char)hist.x); gpu_len[ttid] = len; } void gc_compress_nsv(gcStream_t Stream, int* gpu_ubuf, int entry_num, char** gpu_value, char* gpu_len) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * entry_num); hipLaunchKernelGGL(( gc_compress_nsv_kernel1), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); return; //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * entry_num); int totalSize = prefixSum(gpu_hist, entry_num, gpu_offset, EXCLUSIVE); /* int* cpu_offset = (int*)malloc(sizeof(int)*entry_num); int* cpu_hist = (int*)malloc(sizeof(int)*entry_num); hipMemcpy(cpu_offset, gpu_offset, sizeof(int)*entry_num, hipMemcpyDeviceToHost); hipMemcpy(cpu_hist, gpu_hist, sizeof(int)*entry_num, hipMemcpyDeviceToHost); for (int i= 0; i < 10; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter *gpu_value = (char*)gc_malloc(totalSize); hipLaunchKernelGGL(( gc_compress_nsv_kernel2), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, gpu_offset, gpu_hist, entry_num, *gpu_value, gpu_len); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_hist); gc_free(gpu_offset); } #endif //----------------------------------------------------------------------------- //bitmap //----------------------------------------------------------------------------- //__device__ __constant__ char gpu_mode[84]; __global__ void gc_compress_bitmap_kernel(char* gpu_ubuf, int entry_num, char* gpu_r, char* gpu_a, char* gpu_n) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int2* raw2 = (int2*)gpu_ubuf; __shared__ int2 sbuf[256]; sbuf[threadIdx.x] = raw2[ttid]; __syncthreads(); char* raw = (char*)&sbuf[threadIdx.x]; char b = 0; if (raw[0] == 'R') b |= 0x80; if (raw[1] == 'R') b |= 0x40; if (raw[2] == 'R') b |= 0x20; if (raw[3] == 'R') b |= 0x10; if (raw[4] == 'R') b |= 0x08; if (raw[5] == 'R') b |= 0x04; if (raw[6] == 'R') b |= 0x02; if (raw[7] == 'R') b |= 0x01; gpu_r[ttid] = b; b = 0; if (raw[0] == 'A') b |= 0x80; if (raw[1] == 'A') b |= 0x40; if (raw[2] == 'A') b |= 0x20; if (raw[3] == 'A') b |= 0x10; if (raw[4] == 'A') b |= 0x08; if (raw[5] == 'A') b |= 0x04; if (raw[6] == 'A') b |= 0x02; if (raw[7] == 'A') b |= 0x01; gpu_a[ttid] = b; b = 0; if (raw[0] == 'N') b |= 0x80; if (raw[1] == 'N') b |= 0x40; if (raw[2] == 'N') b |= 0x20; if (raw[3] == 'N') b |= 0x10; if (raw[4] == 'N') b |= 0x08; if (raw[5] == 'N') b |= 0x04; if (raw[6] == 'N') b |= 0x02; if (raw[7] == 'N') b |= 0x01; gpu_n[ttid] = b; } void gc_compress_bitmap(gcStream_t Stream, char* gpu_ubuf, int entry_num, char* gpu_r, char* gpu_a, char* gpu_n) { // char mode[84] = {0}; // mode['R'] = 0; // mode['A'] = 1; // mode['N'] = 2; // hipMemcpyToSymbol("gpu_mode", mode, 84, 0, hipMemcpyHostToDevice); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_compress_bitmap_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_r, gpu_a, gpu_n); CUT_CHECK_ERROR("gc_compress_bitmap_kernel"); } __global__ void gc_decompress_bitmap_kernel(char* gpu_ubuf, int entry_num, char* gpu_r, char* gpu_a, char* gpu_n) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int2* raw2 = (int2*)gpu_ubuf; __shared__ int2 sbuf[256]; char* raw = (char*)&sbuf[threadIdx.x]; char r = gpu_r[ttid]; char a = gpu_a[ttid]; char n = gpu_n[ttid]; if (r & 0x80) raw[0] = 'R'; if (r & 0x40) raw[1] = 'R'; if (r & 0x20) raw[2] = 'R'; if (r & 0x10) raw[3] = 'R'; if (r & 0x08) raw[4] = 'R'; if (r & 0x04) raw[5] = 'R'; if (r & 0x02) raw[6] = 'R'; if (r & 0x01) raw[7] = 'R'; if (a & 0x80) raw[0] = 'A'; if (a & 0x40) raw[1] = 'A'; if (a & 0x20) raw[2] = 'A'; if (a & 0x10) raw[3] = 'A'; if (a & 0x08) raw[4] = 'A'; if (a & 0x04) raw[5] = 'A'; if (a & 0x02) raw[6] = 'A'; if (a & 0x01) raw[7] = 'A'; if (n & 0x80) raw[0] = 'N'; if (n & 0x40) raw[1] = 'N'; if (n & 0x20) raw[2] = 'N'; if (n & 0x10) raw[3] = 'N'; if (n & 0x08) raw[4] = 'N'; if (n & 0x04) raw[5] = 'N'; if (n & 0x02) raw[6] = 'N'; if (n & 0x01) raw[7] = 'N'; __syncthreads(); raw2[ttid]=sbuf[threadIdx.x]; __syncthreads(); } void gc_decompress_bitmap(gcStream_t Stream, char* gpu_ubuf, int entry_num, char* gpu_r, char* gpu_a, char* gpu_n) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_decompress_bitmap_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_r, gpu_a, gpu_n); CUT_CHECK_ERROR("gc_compress_bitmap_kernel"); } //----------------------------------------------------------------------------- //RLE //----------------------------------------------------------------------------- __global__ void gc_compress_rle_kernel1(int* gpu_ubuf, int entry_num, int* hist, int* pos) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4* hist4 = (int4*)hist; int4* pos4 = (int4*)pos; int4 v = raw4[ttid]; int4 p; int4 h; if (v.x != v.y) { h.x = 1; p.x = ttid * 4; } else { h.x = 0; p.x = 0; } if (v.y != v.z) { h.y = 1; p.y = ttid * 4 + 1; } else { h.y = 0; p.y = 0; } if (v.z != v.w) { h.z = 1; p.z = ttid * 4 + 2; } else { h.z = 0; p.z = 0; } hist4[ttid] = h; pos4[ttid] = p; } __global__ void gc_compress_rle_kernel2(int* gpu_ubuf, int entry_num, int* hist, int* pos) { int ttid = TID; if (ttid > (CEIL(entry_num, 4) - 1)) return; if (ttid == (CEIL(entry_num, 4) - 1)) { hist[entry_num - 1] = 1; pos[entry_num - 1] = entry_num - 1; return; } int4* raw4 = (int4*)(gpu_ubuf + 1); int4* hist4 = (int4*)(hist + 1); int4* pos4 = (int4*)(pos + 1); int4 v = raw4[ttid]; if (v.w != v.z) { hist4[ttid].z = 1; pos4[ttid].z = ttid * 4 + 3; } else { hist4[ttid].z = 0; pos4[ttid].z = 0; } } __global__ void gc_compress_rle_kernel3(int* gpu_ubuf, int* hist, int* offset, int entry_num, int* gpu_valbuf, int* gpu_lenbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4* hist4 = (int4*)hist; int4* offset4 = (int4*)offset; int4 v = raw4[ttid]; int4 h = hist4[ttid]; int4 o = offset4[ttid]; if (h.x == 1) { gpu_valbuf[o.x] = v.x; gpu_lenbuf[o.x] = ttid * 4; } if (h.y == 1) { gpu_valbuf[o.y] = v.y; gpu_lenbuf[o.y] = ttid * 4 + 1; } if (h.z == 1) { gpu_valbuf[o.z] = v.z; gpu_lenbuf[o.z] = ttid * 4 + 2; } if (h.w == 1) { gpu_valbuf[o.w] = v.w; gpu_lenbuf[o.w] = ttid * 4 + 3; } } __global__ void gc_compress_rle_kernel4(int* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4* out4 = (int4*)gpu_cbuf; int4 v = raw4[ttid]; int4 o; o.x = v.x; o.y = v.y - v.x; o.z = v.z - v.y; o.w = v.w - v.z; out4[ttid] = o; } __global__ void gc_compress_rle_kernel5(int* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= (CEIL(entry_num, 4) - 1)) return; if (ttid == 0) gpu_cbuf[0] = gpu_ubuf[0] + 1; int4* raw4 = (int4*)(gpu_ubuf + 1); int4* out4 = (int4*)(gpu_cbuf + 1); int4 v = raw4[ttid]; out4[ttid].w = v.w - v.z; } void gc_compress_rle(gcStream_t Stream, int* gpu_ubuf, int entry_num, int** gpu_valbuf, int** gpu_lenbuf, int* centry_num) { //step 1: get hist and pos int* hist = (int*)gc_malloc(sizeof(int)*entry_num); int* pos = (int*)gc_malloc(sizeof(int)*entry_num); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_compress_rle_kernel1), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, hist, pos); CUT_CHECK_ERROR("gc_compress_rle_kernel1"); hipLaunchKernelGGL(( gc_compress_rle_kernel2), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, hist, pos); CUT_CHECK_ERROR("gc_compress_rle_kernel2"); gc_free(pos); //step 2: get offset int* offset = (int*)gc_malloc(sizeof(int)*entry_num); hipMemset(offset, 0, sizeof(int)*entry_num); *centry_num = prefixSum(hist, entry_num, offset, EXCLUSIVE); printf("centry_num:%d\n", *centry_num); //Step 3: get value and pos2 int* gpu_pos2 = (int*)gc_malloc(sizeof(int) * (*centry_num)); * gpu_valbuf = (int*)gc_malloc(sizeof(int)*(*centry_num)); * gpu_lenbuf = (int*)gc_malloc(sizeof(int)*(*centry_num)); hipLaunchKernelGGL(( gc_compress_rle_kernel3), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, hist, offset, entry_num, *gpu_valbuf, gpu_pos2); CUT_CHECK_ERROR("gc_compress_rle_kernel3"); //Step 4: get len int cnum = *centry_num; THREAD_CONF(gridDim, blockDim, CEIL(CEIL(cnum, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_compress_rle_kernel4), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_pos2, *centry_num, *gpu_lenbuf); CUT_CHECK_ERROR("gc_compress_rle_kernel4"); hipLaunchKernelGGL(( gc_compress_rle_kernel5), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_pos2, *centry_num, *gpu_lenbuf); CUT_CHECK_ERROR("gc_compress_rle_kernel5"); gc_free(hist); gc_free(gpu_pos2); gc_free(offset); } __global__ void gc_decompress_rle_kernel(int* gpu_val, int* gpu_len, int centry_num, int* offset, int* gpu_ubuf) { int ttid = TID; if (ttid >= centry_num) return; int val = gpu_val[ttid]; int len = gpu_len[ttid]; int off = offset[ttid]; int* base = gpu_ubuf + off; for (int i = 0; i < len; i++) base[i] = val; } __global__ void gc_decompress_rle_kernel1(int* offset, int* hist, int centry_num) { int ttid = TID; if (ttid >= centry_num) return; hist[offset[ttid]] = 1; } __global__ void gc_decompress_rle_kernel2(int* gpu_ubuf, int* gpu_val, int* offset, int entry_num) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* offset4 = (int4*)offset; int4 o4 = offset4[ttid]; int4 o; o.x = gpu_val[o4.x]; o.y = gpu_val[o4.y]; o.z = gpu_val[o4.z]; o.w = gpu_val[o4.w]; int4* ubuf4 = (int4*)gpu_ubuf; ubuf4[ttid] = o; } void gc_decompress_rle(gcStream_t Stream, int** gpu_ubuf, int* entry_num, int* gpu_valbuf, int* gpu_lenbuf, int centry_num) { //step 1: get offset for centry int* offset = (int*)gc_malloc(sizeof(int)*centry_num); *entry_num = prefixSum(gpu_lenbuf, centry_num, offset, EXCLUSIVE); printf("%d\n", *entry_num); //step 2: mark boundary int* hist = (int*)gc_malloc(sizeof(int) * (*entry_num)); hipMemset(hist, 0, sizeof(int) * (*entry_num)); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(centry_num, blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_decompress_rle_kernel1), dim3(gridDim), dim3(blockDim), Stream.stream, 0, offset, hist, centry_num); CUT_CHECK_ERROR("gc_compress_rle_kernel"); gc_free(offset); offset = (int*)gc_malloc(sizeof(int) * (*entry_num)); prefixSum(hist, *entry_num, offset, INCLUSIVE); hipFree(hist); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(*entry_num,4), blockDim.x), blockDim.x); *gpu_ubuf = (int*)gc_malloc(*entry_num * sizeof(int)); hipLaunchKernelGGL(( gc_decompress_rle_kernel2), dim3(gridDim), dim3(blockDim), Stream.stream, 0, *gpu_ubuf, gpu_valbuf, offset, *entry_num); hipFree(offset); } /* void gc_decompress_rle(gcStream_t Stream, int** gpu_ubuf, int* entry_num, int* gpu_valbuf, int* gpu_lenbuf, int centry_num) { //step 1: get offset for centry int* offset = (int*)gc_malloc(sizeof(int)*centry_num); *entry_num = prefixSum(gpu_lenbuf, centry_num, offset, EXCLUSIVE); *gpu_ubuf = (int*)gc_malloc(sizeof(int)* (*entry_num)); //step 2: scatter dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(centry_num, blockDim.x), blockDim.x); gc_decompress_rle_kernel<<<gridDim, blockDim>>>(gpu_valbuf, gpu_lenbuf, centry_num, offset, *gpu_ubuf); CUT_CHECK_ERROR("gc_compress_rle_kernel"); gc_free(offset); } */ //----------------------------------------------------------------------------- //Scale //----------------------------------------------------------------------------- __global__ void gc_decompress_scale_kernel(float* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* raw4 = (float4*)gpu_ubuf; int4* out = (int4*)gpu_cbuf; int4 v = out[ttid]; float4 o; o.x = (float)(v.x) / 100.0f; o.y = (float)(v.y) / 100.0f; o.z = (float)(v.z) / 100.0f; o.w = (float)(v.w) / 100.0f; raw4[ttid] = o; } void gc_decompress_scale(gcStream_t Stream, float* gpu_ubuf, int entry_num, int* gpu_cbuf) { //printf("max_num:%d, byteNum:%d\n", max_num, byteNum); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_decompress_scale_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_ns_kernel"); } __global__ void gc_compress_scale_kernel(float* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* raw4 = (float4*)gpu_ubuf; int4* out = (int4*)gpu_cbuf; float4 v = raw4[ttid]; int4 o; o.x = (int)(v.x * 100.0f); o.y = (int)(v.y * 100.0f); o.z = (int)(v.z * 100.0f); o.w = (int)(v.w * 100.0f); out[ttid] = o; } void gc_compress_scale(gcStream_t Stream, float* gpu_ubuf, int entry_num, int* gpu_cbuf) { //printf("max_num:%d, byteNum:%d\n", max_num, byteNum); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_compress_scale_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_ns_kernel"); } //----------------------------------------------------------------------------- //NS //----------------------------------------------------------------------------- __global__ void gc_compress_ns_kernel(int* gpu_ubuf, int entry_num, short* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; short4* out = (short4*)gpu_cbuf; int4 v = raw4[ttid]; short4 o; o.x = (short)v.x; o.y = (short)v.y; o.z = (short)v.z; o.w = (short)v.w; out[ttid] = o; } void gc_compress_ns2(gcStream_t Stream, int* gpu_ubuf, int entry_num, short* gpu_cbuf, int byteNum) { //printf("max_num:%d, byteNum:%d\n", max_num, byteNum); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_compress_ns_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_ns_kernel"); } __global__ void gc_decompress_ns2_kernel(int* gpu_ubuf, int entry_num, short* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; short4* out = (short4*)gpu_cbuf; short4 v = out[ttid]; int4 o; o.x = (int)v.x; o.y = (int)v.y; o.z = (int)v.z; o.w = (int)v.w; raw4[ttid] = o; } void gc_decompress_ns2(gcStream_t Stream, int* gpu_ubuf, int entry_num, short* gpu_cbuf, int byteNum) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_decompress_ns2_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_decompress_ns_kernel"); } __global__ void gc_compress_ns_long_kernel(long* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 2)) return; long2* raw4 = (long2*)gpu_ubuf; int2* out = (int2*)gpu_cbuf; long2 v = raw4[ttid]; int2 o; o.x = (int)v.x; o.y = (int)v.y; out[ttid] = o; } void gc_compress_ns_long(gcStream_t Stream, long* gpu_ubuf, int entry_num, int* gpu_cbuf, int byteNum) { //printf("max_num:%d, byteNum:%d\n", max_num, byteNum); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 2), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_compress_ns_long_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_ns_kernel"); } __global__ void gc_compress_ns_kernel(int* gpu_ubuf, int entry_num, char* gpu_cbuf, int byteNum) { int ttid = TID; #ifdef NS3 if (ttid >= entry_num) return; #else if (ttid >= CEIL(entry_num, 4)) return; #endif int4* raw4 = (int4*)gpu_ubuf; #ifndef NS3 char4* out = (char4*)gpu_cbuf; char4 o; int4 v = raw4[ttid]; o.x = (char)v.x; o.y = (char)v.y; o.z = (char)v.z; o.w = (char)v.w; out[ttid] = o; #else char* src = (char*)&gpu_ubuf[ttid]; char* dest = (char*)&gpu_cbuf[ttid*byteNum]; for (int i = 0; i < byteNum; i++) dest[i] = src[i]; #endif } void gc_compress_ns(gcStream_t Stream, int* gpu_ubuf, int entry_num, char* gpu_cbuf, int byteNum) { //printf("max_num:%d, byteNum:%d\n", max_num, byteNum); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); #ifdef NS3 THREAD_CONF(gridDim, blockDim, CEIL(entry_num , blockDim.x), blockDim.x); #else THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num , 4), blockDim.x), blockDim.x); #endif hipLaunchKernelGGL(( gc_compress_ns_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf, byteNum); CUT_CHECK_ERROR("gc_compress_ns_kernel"); } __global__ void gc_decompress_ns_long_kernel(long* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 2)) return; long2* raw4 = (long2*)gpu_ubuf; int2* out = (int2*)gpu_cbuf; int2 v = out[ttid]; long2 o; o.x = (long)v.x; o.y = (long)v.y; raw4[ttid] = o; } void gc_decompress_ns_long(gcStream_t Stream, long* gpu_ubuf, int entry_num, int* gpu_cbuf, int byteNum) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 2), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_decompress_ns_long_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_decompress_ns_kernel"); } __global__ void gc_decompress_ns_kernel(int* gpu_ubuf, int entry_num, char* gpu_cbuf, int byteNum) { int ttid = TID; #ifdef NS3 if (ttid >= entry_num) return; #else if (ttid >= CEIL(entry_num,4)) return; #endif #ifdef NS3 char* dest = (char*)&gpu_ubuf[ttid]; char* src = (char*)&gpu_cbuf[ttid*byteNum]; //int4* raw4 = (int4*)gpu_ubuf; for (int i = 0; i < byteNum; i++) dest[i] = src[i]; #else char4* out = (char4*)gpu_cbuf; int4* raw4 = (int4*)gpu_ubuf; char4 v = out[ttid]; v.x = v.y = v.z = v.w = 0; int4 o; o.x = o.y = o.z = o.w = 0; /* o.x = (int)v.x; o.y = (int)v.y; o.z = (int)v.z; o.w = (int)v.w; */ out[ttid] = v; raw4[ttid] = o; #endif /* char3* src = (char3*)gpu_cbuf; __shared__ char s[7200]; char3* sbuf = (char3*)src; sbuf[threadIdx.x*4] = src[ttid*4]; sbuf[threadIdx.x*4+1] = src[ttid*4+1]; sbuf[threadIdx.x*4+2] = src[ttid*4+2]; sbuf[threadIdx.x*4+3] = src[ttid*4+3]; char4* dbuf = (char4*)(s + 256 * 3); __syncthreads(); int4* ddbuf = (int4*)dbuf; int4* out = (int4*)gpu_ubuf; char3* dest = (char3*)&ddbuf[threadIdx.x].x; dest[0] = sbuf[threadIdx.x*4]; dest = (char3*)&ddbuf[threadIdx.x].y; dest[0] = sbuf[threadIdx.x*4+1]; dest = (char3*)&ddbuf[threadIdx.x].z; dest[0] = sbuf[threadIdx.x*4+2]; dest = (char3*)&ddbuf[threadIdx.x].w; dest[0] = sbuf[threadIdx.x*4+3]; __syncthreads(); out[ttid] = ddbuf[threadIdx.x]; */ } void gc_decompress_ns(gcStream_t Stream, int* gpu_ubuf, int entry_num, char* gpu_cbuf, int byteNum) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); #ifdef NS3 THREAD_CONF(gridDim, blockDim, CEIL(entry_num, blockDim.x), blockDim.x); #else THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); #endif hipLaunchKernelGGL(( gc_decompress_ns_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf, byteNum); CUT_CHECK_ERROR("gc_decompress_ns_kernel"); } //----------------------------------------------------------------------------- //dict //----------------------------------------------------------------------------- __device__ __constant__ char gpu_dict[56]; __global__ void gc_decompress_dict_kernel(char* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= entry_num) return; int* raw = (int*)gpu_cbuf; int v = raw[ttid]; long* dict4 = (long*)gpu_dict; long* out4 = (long*)gpu_ubuf; long* str; str = dict4 + v; out4[ttid] = str[0]; /* int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_cbuf; int4 v = raw4[ttid]; long* dict4 = (long*)gpu_dict; long* out4 = (long*)gpu_ubuf + (ttid << 2); out4[0] = dict4[v.x]; out4[1] = dict4[v.y]; out4[2] = dict4[v.z]; out4[3] = dict4[v.w]; */ /* int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_cbuf; int4 v = raw4[ttid]; long* dict4 = (long*)gpu_dict; long* out4 = (long*)gpu_ubuf + (BLOCK_ID << 10); __shared__ long sbuf[1024]; sbuf[threadIdx.x] = dict4[v.x]; sbuf[threadIdx.x+1] = dict4[v.y]; sbuf[threadIdx.x+2] = dict4[v.z]; sbuf[threadIdx.x+3] = dict4[v.w]; __syncthreads(); //for (int i = threadIdx.x; i < 1024; i+=256) for (int i = 0; i < 4; i++) out4[threadIdx.x + i] = sbuf[threadIdx.x+i]; __syncthreads(); */ } void gc_decompress_dict(gcStream_t Stream, char* gpu_ubuf, int entry_num, int* gpu_cbuf, char* dict) { hipMemcpyToSymbol("gpu_dict", dict, 56, 0, hipMemcpyHostToDevice); //printf("%d\n", sizeof(long)); // for (int i = 0; i < 7; i++) // printf("%s\n", &dict[i * 8]); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); //THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); THREAD_CONF(gridDim, blockDim, CEIL(entry_num, blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_decompress_dict_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf); //int ssize = sizeof(long) * 4 * 256; //gc_decompress_dict_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_dict_kernel"); } //----------------------------------------------------------------------------- //SEP //----------------------------------------------------------------------------- __global__ void gc_decompress_sep_kernel(float* gpu_ubuf, int entry_num, int* gpu_left, int* gpu_right) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* raw4 = (float4*)gpu_ubuf; int4* left4 = (int4*)gpu_left; int4* right4 = (int4*)gpu_right; int4 l = left4[ttid]; int4 r = right4[ttid]; float4 v; v.x = (float)l.x + (float)r.x / 100.0f; v.y = (float)l.y + (float)r.y / 100.0f; v.z = (float)l.z + (float)r.z / 100.0f; v.w = (float)l.w + (float)r.w / 100.0f; raw4[ttid] = v; } void gc_decompress_sep(gcStream_t Stream, float* gpu_ubuf, int entry_num, int* gpu_left, int* gpu_right) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_decompress_sep_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_left, gpu_right); CUT_CHECK_ERROR("gc_compress_sep_kernel"); } __global__ void gc_compress_sep_kernel(float* gpu_ubuf, int entry_num, int* gpu_left, int* gpu_right) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* raw4 = (float4*)gpu_ubuf; float4 v = raw4[ttid]; int4 l; int4 r; int4* left4 = (int4*)gpu_left; int4* right4 = (int4*)gpu_right; l.x = (int)v.x; r.x = (int)((v.x - (float)l.x) * 100.0f); l.y = (int)v.y; r.y = (int)((v.y - (float)l.y) * 100.0f); l.z = (int)v.z; r.z = (int)((v.z - (float)l.z) * 100.0f); l.w = (int)v.w; r.w = (int)((v.w - (float)l.w) * 100.0f); left4[ttid] = l; right4[ttid] = r; } void gc_compress_sep(gcStream_t Stream, float* gpu_ubuf, int entry_num, int* gpu_left, int* gpu_right) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_compress_sep_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_left, gpu_right); CUT_CHECK_ERROR("gc_compress_sep_kernel"); } //----------------------------------------------------------------------------- //DELTA //----------------------------------------------------------------------------- __global__ void gc_compress_delta_kernel1(int* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4* out4 = (int4*)gpu_cbuf; int4 v = raw4[ttid]; int4 o; o.x = v.x; o.y = v.y - v.x; o.z = v.z - v.y; o.w = v.w - v.z; out4[ttid] = o; } __global__ void gc_compress_delta_kernel2(int* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= (CEIL(entry_num, 4))) return; /* int2* raw4 = (int2*)(gpu_ubuf + 3); int* out4 = (gpu_cbuf + 4); int2 v = raw4[ttid]; out4[ttid] = v.y - v.x; */ int4* raw4 = (int4*)(gpu_ubuf + 1); int4* out4 = (int4*)(gpu_cbuf + 1); int4 v = raw4[ttid]; out4[ttid].w = v.w - v.z; } void gc_compress_delta(gcStream_t Stream, int* gpu_ubuf, int entry_num, int* gpu_cbuf, int* first_elem) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); //printf("grid.x:%d, grid.y:%d, block.x:%d, threadNum:%d\n", gridDim.x, gridDim.y, blockDim.x, blockDim.x * gridDim.x * gridDim.y); hipLaunchKernelGGL(( gc_compress_delta_kernel1), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_delta_kernel1"); hipLaunchKernelGGL(( gc_compress_delta_kernel2), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_delta_kernel2"); } void gc_decompress_delta(gcStream_t stream, int* gpu_ubuf, int entry_num, int* gpu_cbuf, int first_elem) { int* first = (int*)malloc(sizeof(int)); hipMemcpy(first, gpu_cbuf, sizeof(int), hipMemcpyDeviceToHost); *first += first_elem; //printf("cur_first:%d, first:%d\n", *first, first_elem); hipMemcpy(gpu_cbuf, first, sizeof(int), hipMemcpyHostToDevice); free(first); prefixSum(gpu_cbuf, entry_num, gpu_ubuf, INCLUSIVE); /* CUDPPConfiguration config; config.op = CUDPP_ADD; config.datatype = CUDPP_INT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; CUDPPHandle scanplan = 0; CUDPPResult result = cudppPlan(&scanplan, config, entry_num, 1, 0); if (CUDPP_SUCCESS != result) { printf("Error creating CUDPPPlan\n"); exit(-1); } cudppScan(scanplan, (void*)gpu_ubuf, (void*)gpu_cbuf, entry_num); result = cudppDestroyPlan(scanplan); if (CUDPP_SUCCESS != result) { printf("Error destroying CUDPPPlan\n"); exit(-1); } */ } //----------------------------------------------------------------------------- //FOR //----------------------------------------------------------------------------- __global__ void gc_compress_for_kernel(int* gpu_ubuf, int entry_num, int* gpu_cbuf, int reference) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int lastid = entry_num/4; if (ttid != lastid) { int4* gpu_ubuf4 = (int4*)gpu_ubuf; int4* gpu_cbuf4 = (int4*)gpu_cbuf; int4 v = gpu_ubuf4[ttid]; v.x -= reference; v.y -= reference; v.z -= reference; v.w -= reference; gpu_cbuf4[ttid] = v; } else { // printf("**wenbin: lastid - %d - (%x, %x), %d\n", lastid, gridDim.x, gridDim.y, blockDim.x); int leftNum = entry_num % 4; int* gpu_ubuf_left = gpu_ubuf + ttid * 4; int* gpu_cbuf_left = gpu_cbuf + ttid * 4; for (int i = 0; i < leftNum; ++i) gpu_cbuf_left[i] = gpu_ubuf_left[i] - reference; } } void gc_compress_for(gcStream_t Stream, int* gpu_ubuf, int entry_num, int* gpu_cbuf, int reference) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); hipLaunchKernelGGL(( gc_compress_for_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_ubuf, entry_num, gpu_cbuf, reference); CUT_CHECK_ERROR("gc_compress_for_kernel"); } void gc_decompress_for(gcStream_t Stream, int* gpu_ubuf, int entry_num, int* gpu_cbuf, int reference) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); int ref = -reference; hipLaunchKernelGGL(( gc_compress_for_kernel), dim3(gridDim), dim3(blockDim), Stream.stream, 0, gpu_cbuf, entry_num, gpu_ubuf, ref); CUT_CHECK_ERROR("gc_compress_for_kernel"); } //============================================================================= //Stream Management //============================================================================= void gc_stream_start(gcStream_t* Stream) { CUDA_SAFE_CALL(hipStreamCreate((hipStream_t*)&Stream->stream)); CUDA_SAFE_CALL(hipEventCreate((hipEvent_t*)&Stream->event)); CUDA_SAFE_CALL(hipEventCreate((hipEvent_t*)&Stream->start)); CUDA_SAFE_CALL(hipEventRecord((hipEvent_t)Stream->start, (hipStream_t)Stream->stream)); } void gc_stream_stop(gcStream_t* Stream) { CUDA_SAFE_CALL(hipEventRecord((hipEvent_t)Stream->event, (hipStream_t)Stream->stream)); CUDA_SAFE_CALL(hipEventSynchronize((hipEvent_t)Stream->event)); float etime = 0.0f; hipEventElapsedTime(&etime, Stream->start, Stream->event); printf("***%f ms\n", etime); CUDA_SAFE_CALL(hipEventDestroy((hipEvent_t)Stream->event)); CUDA_SAFE_CALL(hipEventDestroy((hipEvent_t)Stream->start)); CUDA_SAFE_CALL(hipStreamDestroy((hipStream_t)Stream->stream)); } //============================================================================= //Memory Management //============================================================================= void* gc_malloc(size_t bufsize) { void* gpu_buf = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&gpu_buf, bufsize)); return gpu_buf; } void gc_free(void* gpu_buf) { CUDA_SAFE_CALL(hipFree(gpu_buf)); } void* gc_host2device(gcStream_t Stream, void* cpu_buf, size_t bufsize) { void* gpu_buf = NULL; int round_bufsize = CEIL(bufsize, 16) * 16 + 4; CUDA_SAFE_CALL(hipMalloc((void**)&gpu_buf, round_bufsize)); CUDA_SAFE_CALL(hipMemcpyAsync(gpu_buf, cpu_buf, bufsize, hipMemcpyHostToDevice, Stream.stream)); return gpu_buf; } void* gc_device2host(gcStream_t Stream, void* gpu_buf, size_t bufsize) { void* pinned = NULL; CUDA_SAFE_CALL(hipHostMalloc((void**)&pinned, bufsize)); CUDA_SAFE_CALL(hipMemcpyAsync(pinned, gpu_buf, bufsize, hipMemcpyDeviceToHost, Stream.stream)); //void* cpu_buf = malloc(bufsize); //memcpy(cpu_buf, pinned, bufsize); //CUDA_SAFE_CALL(hipHostFree(pinned)); //return cpu_buf; return pinned; } //============================================================================= //Testing //============================================================================= __global__ void test_kernel(int* d_input, int num) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid >= num) return; d_input[tid] = d_input[tid] * 2; } extern "C" void test_gpu(int num, int print_num) { if (num < print_num) return; int *d_input = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_input, num * sizeof(int))); int *h_input = (int*)malloc(num * sizeof(int)); for (int i = 0; i < num; i++) h_input[i] = i; CUDA_SAFE_CALL(hipMemcpy(d_input, h_input, sizeof(int)*num, hipMemcpyHostToDevice)); int block_dim = 256; int grid_dim = (num / 256 + (int)(num % 256 != 0)); hipLaunchKernelGGL(( test_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, d_input, num); CUDA_SAFE_CALL(hipMemcpy(h_input, d_input, sizeof(int)*num, hipMemcpyDeviceToHost)); for (int i = 0; i < print_num; i++) printf("%d - %d\n", i, h_input[i]); CUDA_SAFE_CALL(hipFree(d_input)); free(h_input); } void test_malloc(int size, int nloop) { unsigned timer; cutCreateTimer(&timer); cutStartTimer(timer); for (int i = 0; i < nloop; i++) { char* buf; hipMalloc((void**)&buf, size); hipFree(buf); } cutStopTimer(timer); double ctime = cutGetTimerValue(timer); printf("allocate&free %d bytes buf: %f ms\n", size, ctime / (double)nloop); } __global__ void int2char(int* intSrc, char* charSrc, int num) { int ttid = TID; if (ttid >= num) return; __shared__ int s[256]; //__shared__ char s[256]; // intSrc[ttid] = (int)charSrc[ttid]; // charSrc[ttid] = (char)intSrc[ttid]; { int d; s[threadIdx.x] = d; //charSrc[ttid] = s[threadIdx.x]; //intSrc[ttid] = charSrc[ttid]; //charSrc[ttid] = intSrc[ttid]; //s[threadIdx.x] = intSrc[ttid]; //intSrc[ttid] = s[threadIdx.x]; // intSrc[ttid] = v; //char v = charSrc[ttid]; //charSrc[ttid] = ttid; //intSrc[ttid] = i; // charSrc[ttid] = i; } } #define NUM 100000000 void test_int2char() { int* intSrc = (int*)gc_malloc(NUM * sizeof(int)); char* charSrc = (char*)gc_malloc(NUM * sizeof(char)); unsigned timer; cutCreateTimer(&timer); cutStartTimer(timer); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(NUM, blockDim.x), blockDim.x); hipLaunchKernelGGL(( int2char), dim3(gridDim), dim3(blockDim), 0, 0, intSrc, charSrc, NUM); hipDeviceSynchronize(); CUT_CHECK_ERROR("gc_intersect"); cutStopTimer(timer); double atime = cutGetTimerValue(timer); printf("%f ms\n", atime); gc_free(intSrc); gc_free(charSrc); }
6d8b1a66014cb329549602f3b57115b03a67d250.cu
#include <stdio.h> #include <cutil_inline.h> #include <cudpp/cudpp.h> #include "gcompress_cuda.h" //#define NS3 int byte_num(int max_num) { if (max_num > THREE_BYTE) return 4; if (max_num > ONE_BYTE) return 2; return 1; } void gc_print_int(int* buf, int num) { int* cpu_val = (int*)malloc(num*sizeof(int)); cudaMemcpy(cpu_val, buf, num*sizeof(int), cudaMemcpyDeviceToHost); //cudaMemcpy(cpu_val, lpartkeyVal, maxValue, cudaMemcpyDeviceToHost); for (int i = 0; i < num; i++) printf("%d\n", cpu_val[i]); free(cpu_val); } void gc_print_char(char* buf, int num) { char* cpu_val = (char*)malloc(num); cudaMemcpy(cpu_val, buf, num, cudaMemcpyDeviceToHost); //cudaMemcpy(cpu_val, lpartkeyVal, maxValue, cudaMemcpyDeviceToHost); for (int i = 0; i < num; i++) printf("%d\n", cpu_val[i]); free(cpu_val); } //============================================================================= //query operators //============================================================================= __global__ void gc_sum1_kernel(float* price, float* discount, int entry_num) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* out = (float4*)price; float4* raw = (float4*)discount; float4 r = raw[ttid]; float4 o = out[ttid]; o.x = o.x * (1.0f - r.x); o.y = o.y * (1.0f - r.y); o.z = o.z * (1.0f - r.z); o.w = o.w * (1.0f - r.w); out[ttid] = o; } void gc_sum1(gcStream_t stream, float* price, float* discount, int entry_num) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_sum1_kernel<<<gridDim, blockDim, stream.stream>>>(price, discount, entry_num); CUT_CHECK_ERROR("gc_intersect"); } __global__ void gc_sum2_kernel(float* price, char* type, int* joinPos, int entry_num) { int ttid = TID; if (ttid >= entry_num) return; if (type[joinPos[ttid]] < 125) price[ttid] = 0.0f; } void gc_sum2(gcStream_t stream, float* price, char* type, int* joinPos, int entry_num) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(entry_num, blockDim.x), blockDim.x); gc_sum2_kernel<<<gridDim, blockDim, stream.stream>>>(price, type, joinPos, entry_num); CUT_CHECK_ERROR("gc_intersect"); } __global__ void gc_scatter_hist_kernel(char* vector, int entry_num, int* hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int index = ttid + ttid; char c = vector[ttid]; int4* out = (int4*)hist; int4 o; if (c & 0x80) o.x = 1; else o.x = 0; if (c & 0x40) o.y = 1; else o.y = 0; if (c & 0x20) o.z = 1; else o.z = 0; if (c & 0x10) o.w = 1; else o.w = 0; out[index] = o; if (c & 0x08) o.x = 1; else o.x = 0; if (c & 0x04) o.y = 1; else o.y = 0; if (c & 0x02) o.z = 1; else o.z = 0; if (c & 0x01) o.w = 1; else o.w = 0; out[index+1] = o; } __global__ void gc_scatter_float_kernel(float* column, int* offset, int* hist, int entry_num, float* out) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* h4 = (int4*)hist; int4* o4 = (int4*)offset; float4* c4 = (float4*)column; int4 h = h4[ttid]; int4 o = o4[ttid]; float4 c = c4[ttid]; if (h.x == 1) out[o.x] = c.x; if (h.y == 1) out[o.y] = c.y; if (h.z == 1) out[o.z] = c.z; if (h.w == 1) out[o.w] = c.w; } void gc_scatter_float(gcStream_t stream, float* column, char* vector, int entry_num, float** out, int* num) { int* hist = (int*)gc_malloc(sizeof(int) * entry_num); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); gc_scatter_hist_kernel<<<gridDim, blockDim, stream.stream>>>(vector, entry_num, hist); CUT_CHECK_ERROR("gc_intersect"); int* offset = (int*)gc_malloc(sizeof(int)*entry_num); *num = prefixSum(hist, entry_num, offset, EXCLUSIVE); *out = (float*)gc_malloc(*num * sizeof(float)); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_scatter_float_kernel<<<gridDim,blockDim, stream.stream>>>(column, offset, hist, entry_num, *out); CUT_CHECK_ERROR("gc_intersect"); gc_free(offset); gc_free(hist); } __global__ void gc_scatter_kernel(int* column, int* offset,int entry_num, int* out) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* o4 = (int4*)offset; int4* c4 = (int4*)column; int4 o = o4[ttid]; int4 c = c4[ttid]; if (o.x != -1) out[o.x] = c.x; if (o.y != -1) out[o.y] = c.y; if (o.z != -1) out[o.z] = c.z; if (o.w != -1) out[o.w] = c.w; } __global__ void gc_scatter_fix_kernel(int* offset, int entry_num, int* hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* h4 = (int4*)hist; int4* o4 = (int4*)offset; int4 h = h4[ttid]; int4 o = o4[ttid]; if (h.x == 0) o.x = -1; if (h.y == 0) o.y = -1; if (h.z == 0) o.z = -1; if (h.w == 0) o.w = -1; o4[ttid] = o; } void gc_scatter(gcStream_t stream, int* column, char* vector, int entry_num, int** out, int* num) { int* hist = (int*)gc_malloc(sizeof(int) * entry_num); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); gc_scatter_hist_kernel<<<gridDim, blockDim, stream.stream>>>(vector, entry_num, hist); CUT_CHECK_ERROR("gc_intersect"); int* offset = (int*)gc_malloc(sizeof(int)*entry_num); *num = prefixSum(hist, entry_num, offset, EXCLUSIVE); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_scatter_fix_kernel<<<gridDim, blockDim, stream.stream>>>(offset, entry_num, hist); CUT_CHECK_ERROR("gc_intersect"); gc_free(hist); printf("%d\n", *num); *out = (int*)gc_malloc(*num * sizeof(int)); gc_scatter_kernel<<<gridDim,blockDim, stream.stream>>>(column, offset, entry_num, *out); CUT_CHECK_ERROR("gc_intersect"); gc_free(offset); } __global__ void gc_intersect_kernel(char* pos1, char* pos2, int entry_num, char* pos) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; char4* raw1 = (char4*)pos1; char4* raw2 = (char4*)pos2; char4* raw = (char4*)pos; char4 v1 = raw1[ttid]; char4 v2 = raw2[ttid]; char4 v; /* v.x = (v1.x & v2.x); v.y = (v1.y & v2.y); v.z = (v1.z & v2.z); v.w = (v1.w & v2.w); */ if (v1.x == 1 and v2.x == 1) v.x = 1; else v.x = 0; if (v1.y == 1 and v2.y == 1) v.y = 1; else v.y = 0; if (v1.z == 1 and v2.z == 1) v.z = 1; else v.z = 0; if (v1.w == 1 and v2.w == 1) v.w = 1; else v.w = 0; raw[ttid] = v; } void gc_intersect(gcStream_t stream, char* pos1, char* pos2, int entry_num, char* pos) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_intersect_kernel<<<gridDim, blockDim, stream.stream>>>(pos1, pos2, entry_num, pos); CUT_CHECK_ERROR("gc_intersect"); } __global__ void gc_filter1_kernel(int* column, int* pos_list, int entry_num) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw = (int4*)column; int4 v = raw[ttid]; int4* raw1 = (int4*)pos_list; int4 v2 = raw1[ttid]; if (v2.x == 0) v.x = 0; if (v2.y == 0) v.y = 0; if (v2.z == 0) v.z = 0; if (v2.w == 0) v.w = 0; raw[ttid] = v; } void gc_filter1(gcStream_t stream, int* column, int* pos_list, int entry_num) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_filter1_kernel<<<gridDim, blockDim, stream.stream>>>(column, pos_list, entry_num); CUT_CHECK_ERROR("gc_filter1"); } __global__ void gc_cal_q14_kernel(float *price, char* discount, char* type, int* lpartkey_offset_in, int* lpartkey_offset_out, int* lpartkey_len, char* pos_vector, int* lpartkey_pos_list, int* ppartkey_pos_list, int entry_num, int centry_num, float* out1, char* out2) { int ttid = TID; if (ttid >= centry_num) return; int lpos = lpartkey_pos_list[ttid]; int ppos = ppartkey_pos_list[ttid]; int llen = lpartkey_len[lpos]; int loffset = lpartkey_offset_in[lpos]; for (int i = 0; i < llen; i++) { int pos = loffset + i; char c = pos_vector[pos / 8]; float o1 = 0.0f; char o2 = 0; if (c & (0x80 >> (pos%8 - 1))) { float price1 = price[pos]; float discount1 = (float)discount[pos] / 100.0; o1 = (1 - discount1) * price1; if (type[ppos] >= 125) o2 = 1; } out1[pos] = o1; out2[pos] = o2; } } void gc_cal_q14(gcStream_t stream, float* price, char* discount, char* type, int* lpartkey_offset_in, int* lpartkey_offset_out, int* lpartkey_len, char* pos_vector, int* lpartkey_pos_list, int* ppartkey_pos_list, int entry_num, int centry_num, float* out1, char* out2) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(centry_num, blockDim.x), blockDim.x); gc_cal_q14_kernel<<<gridDim, blockDim, stream.stream>>>(price, discount, type, lpartkey_offset_in, lpartkey_offset_out, lpartkey_len, pos_vector, lpartkey_pos_list, ppartkey_pos_list, entry_num, centry_num, out1, out2); CUT_CHECK_ERROR("gc_pos_vector_hist"); } __global__ void gc_cal_q14_final_kernel(float* out1, char* out2, int entry_num, float sum, float* out) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* raw1 = (float4*)out1; char4* raw2 = (char4*)out2; float4 v1 = raw1[ttid]; char4 v2 = raw2[ttid]; float4 o; if (v2.x == 1) o.x = 100.0f * v1.x; else o.x = 0.0f; if (v2.y == 1) o.y = 100.0f * v1.y; else o.y = 0.0f; if (v2.z == 1) o.z = 100.0f * v1.z; else o.z = 0.0f; if (v2.w == 1) o.w = 100.0f * v1.w; else o.w = 0.0f; float4* out4 = (float4*)out; out4[ttid] = o; } void gc_cal_q14_final(gcStream_t stream, float* out1, char* out2, int entry_num, float sum, float* out) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_cal_q14_final_kernel<<<gridDim, blockDim, stream.stream>>>(out1, out2, entry_num, sum, out); CUT_CHECK_ERROR("gc_pos_vector_hist"); } /* __global__ void gc_cal_q14_kernel(float *price, char* discount, char* type, int* lpartkey_offset_in, int* lpartkey_offset_out, int* lpartkey_len, char* pos_vector, int* lpartkey_pos_list, int* ppartkey_pos_list, int entry_num, int centry_num, float* out1, float* out2) { int ttid = TID; if (ttid >= centry_num) return; int lpos = lpartkey_pos_list[ttid]; int ppos = ppartkey_pos_list[ttid]; int llen = lpartkey_len[ttid]; int loffset = lpartkey_offset_in[lpos]; for (int i = 0; i < llen; i++) { int pos = loffset + i; char c = pos_vector[pos / 8]; float o1 = 0.0f; float o2 = 0.0f; if (c & (0x80 >> (c%8 - 1))) { float price1 = price[pos]; float discount1 = (float)discount[pos] / 100.0; o2 = (1 - discount1) * price1; if (type[ppos] >= 125) o1 = o2; } out1[pos] = o1; out2[pos] = o2; } } void gc_cal_q14(gcStream_t stream, float* price, char* discount, char* type, int* lpartkey_offset_in, int* lpartkey_offset_out, int* lpartkey_len, char* pos_vector, int* lpartkey_pos_list, int* ppartkey_pos_list, int entry_num, int centry_num, float* out1, float* out2) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(centry_num, blockDim.x), blockDim.x); gc_cal_q14_kernel<<<gridDim, blockDim, stream.stream>>>(price, discount, type, lpartkey_offset_in, lpartkey_offset_out, lpartkey_len, pos_vector, lpartkey_pos_list, ppartkey_pos_list, entry_num, centry_num, out1, out2); CUT_CHECK_ERROR("gc_pos_vector_hist"); } */ __global__ void gc_pos_vector_hist_kernel(int* column, char* val_vector, int centry_num, int* hist) { int ttid = TID; if (ttid >= CEIL(centry_num, 4)) return; int4* raw = (int4*)column; int4 v = raw[ttid]; int4* out = (int4*)hist; int4 o; o.x = val_vector[v.x]; o.y = val_vector[v.y]; o.z = val_vector[v.z]; o.w = val_vector[v.w]; out[ttid] = o; } __global__ void gc_pos_vector_kernel(int* column, int* hist, int* offset, int centry_num, int* pos_list) { int ttid = TID; if (ttid >= CEIL(centry_num, 4)) return; int4* offset4 = (int4*)offset; int4* hist4 = (int4*)hist; int4 o = offset4[ttid]; int4 h = hist4[ttid]; if (h.x) pos_list[o.x] = ttid * 4; if (h.y) pos_list[o.y] = ttid * 4 + 1; if (h.z) pos_list[o.z] = ttid * 4 + 2; if (h.w) pos_list[o.w] = ttid * 4 + 3; } void gc_pos_vector(gcStream_t stream, int* column, char* val_vector, int centry_num, int** pos_list, int* num) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); int* hist = (int*)gc_malloc(sizeof(int) * CEIL(centry_num, 4) * 4); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(centry_num, 4), blockDim.x), blockDim.x); gc_pos_vector_hist_kernel<<<gridDim, blockDim, stream.stream>>>(column, val_vector, centry_num, hist); CUT_CHECK_ERROR("gc_pos_vector_hist"); int* offset = (int*)gc_malloc(sizeof(int) * CEIL(centry_num, 4) * 4); *num = prefixSum(hist, centry_num, offset, EXCLUSIVE); printf("%d \n", *num); *pos_list = (int*)gc_malloc(*num * sizeof(int)); gc_pos_vector_kernel<<<gridDim, blockDim, stream.stream>>>(column, hist, offset, centry_num, *pos_list); CUT_CHECK_ERROR("gc_pos_vector"); gc_free(offset); gc_free(hist); } __global__ void gc_val_vector_kernel(int* gpu_column, int entry_num, char* gpu_val_vector) { int ttid = TID; if (ttid >= entry_num) return; int v = gpu_column[ttid]; gpu_val_vector[v] = 1; } void gc_val_vector(gcStream_t stream, int* gpu_column, int entry_num, int max_val, char* gpu_val_vector) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); cudaMemset(gpu_val_vector, 0, max_val); THREAD_CONF(gridDim, blockDim, CEIL(entry_num, blockDim.x), blockDim.x); gc_val_vector_kernel<<<gridDim, blockDim, stream.stream>>>(gpu_column, entry_num, gpu_val_vector); CUT_CHECK_ERROR("gc_val_vector"); } __global__ void gc_select_char_ng_lt(char* gpu_column, int entry_num, char low, char high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; char4* raw4 = (char4*)gpu_column; int index = ttid + ttid; char4 v = raw4[index]; char c = 0; if (v.x < high) c |= 0x80; if (v.y < high) c |= 0x40; if (v.z < high) c |= 0x20; if (v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x < high) c |= 0x08; if (v.y < high) c |= 0x04; if (v.z < high) c |= 0x02; if (v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } __global__ void gc_select_char_ge_le(char* gpu_column, int entry_num, char low, char high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; char4* raw4 = (char4*)gpu_column; int index = ttid + ttid; char4 v = raw4[index]; char c = 0; if (v.x >= low && v.x <= high) c |= 0x80; if (v.y >= low && v.y <= high) c |= 0x40; if (v.z >= low && v.z <= high) c |= 0x20; if (v.w >= low && v.w <= high) c |= 0x10; v = raw4[index+1]; if (v.x >= low && v.x <= high) c |= 0x08; if (v.y >= low && v.y <= high) c |= 0x04; if (v.z >= low && v.z <= high) c |= 0x02; if (v.w >= low && v.w <= high) c |= 0x01; gpu_pos_vector[ttid] = c; } __global__ void gc_select_char_gt_lt(char* gpu_column, int entry_num, char low, char high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; char4* raw4 = (char4*)gpu_column; int index = ttid + ttid; char4 v = raw4[index]; char c = 0; if (v.x > low && v.x < high) c |= 0x80; if (v.y > low && v.y < high) c |= 0x40; if (v.z > low && v.z < high) c |= 0x20; if (v.w > low && v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x > low && v.x < high) c |= 0x08; if (v.y > low && v.y < high) c |= 0x04; if (v.z > low && v.z < high) c |= 0x02; if (v.w > low && v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } void gc_select_char(gcStream_t stream, char* gpu_column, int entry_num, char low, char op_low, char high, char op_high, char* gpu_pos_vector) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); if (op_low == GT && op_high == LT) { gc_select_char_gt_lt<<<gridDim, blockDim, stream.stream>>>(gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } else if (op_low == GE && op_high == LE) { gc_select_char_ge_le<<<gridDim, blockDim, stream.stream>>>(gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } else if (op_low == NG && op_high == LT) { gc_select_char_ng_lt<<<gridDim, blockDim, stream.stream>>>(gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } } __global__ void gc_select_short_ge_lt(short* gpu_column, int entry_num, short low, short high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; short4* raw4 = (short4*)gpu_column; int index = ttid + ttid; short4 v = raw4[index]; char c = 0; if (v.x >= low && v.x < high) c |= 0x80; if (v.y >= low && v.y < high) c |= 0x40; if (v.z >= low && v.z < high) c |= 0x20; if (v.w >= low && v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x >= low && v.x < high) c |= 0x08; if (v.y >= low && v.y < high) c |= 0x04; if (v.z >= low && v.z < high) c |= 0x02; if (v.w >= low && v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } void gc_select_short(gcStream_t stream, short* gpu_column, int entry_num, short low, char op_low, short high, char op_high, char* gpu_pos_vector) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); if (op_low == GE && op_high == LT) { gc_select_short_ge_lt<<<gridDim, blockDim, stream.stream>>>(gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } } __global__ void gc_select_int_ng_lt(int* gpu_column, int entry_num, int low, int high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int4* raw4 = (int4*)gpu_column; int index = ttid + ttid; int4 v = raw4[index]; char c = 0; if (v.x < high) c |= 0x80; if (v.y < high) c |= 0x40; if (v.z < high) c |= 0x20; if (v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x < high) c |= 0x08; if (v.y < high) c |= 0x04; if (v.z < high) c |= 0x02; if (v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } __global__ void gc_select_int_ge_le(int* gpu_column, int entry_num, int low, int high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int4* raw4 = (int4*)gpu_column; int index = ttid + ttid; int4 v = raw4[index]; char c = 0; if (v.x >= low && v.x <= high) c |= 0x80; if (v.y >= low && v.y <= high) c |= 0x40; if (v.z >= low && v.z <= high) c |= 0x20; if (v.w >= low && v.w <= high) c |= 0x10; v = raw4[index+1]; if (v.x >= low && v.x <= high) c |= 0x08; if (v.y >= low && v.y <= high) c |= 0x04; if (v.z >= low && v.z <= high) c |= 0x02; if (v.w >= low && v.w <= high) c |= 0x01; gpu_pos_vector[ttid] = c; } __global__ void gc_select_int_gt_ng(int* gpu_column, int entry_num, int low, int high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int4* raw4 = (int4*)gpu_column; int index = ttid + ttid; int4 v = raw4[index]; char c = 0; if (v.x > low) c |= 0x80; if (v.y > low) c |= 0x40; if (v.z > low) c |= 0x20; if (v.w > low) c |= 0x10; v = raw4[index+1]; if (v.x > low) c |= 0x08; if (v.y > low) c |= 0x04; if (v.z > low) c |= 0x02; if (v.w > low) c |= 0x01; gpu_pos_vector[ttid] = c; } __global__ void gc_select_int_ge_lt(int* gpu_column, int entry_num, int low, int high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int4* raw4 = (int4*)gpu_column; int index = ttid + ttid; int4 v = raw4[index]; char c = 0; if (v.x >= low && v.x < high) c |= 0x80; if (v.y >= low && v.y < high) c |= 0x40; if (v.z >= low && v.z < high) c |= 0x20; if (v.w >= low && v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x >= low && v.x < high) c |= 0x08; if (v.y >= low && v.y < high) c |= 0x04; if (v.z >= low && v.z < high) c |= 0x02; if (v.w >= low && v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } void gc_select_int(gcStream_t stream, int* gpu_column, int entry_num, int low, char op_low, int high, char op_high, char* gpu_pos_vector) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); if (op_low == GE && op_high == LT) { gc_select_int_ge_lt<<<gridDim, blockDim, stream.stream>>>(gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } else if (op_low == GE && op_high == LE) { gc_select_int_ge_le<<<gridDim, blockDim, stream.stream>>>(gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } else if (op_low == NG && op_high == LT) { gc_select_int_ng_lt<<<gridDim, blockDim, stream.stream>>>(gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } else if (op_low == GT && op_high == NG) { gc_select_int_gt_ng<<<gridDim, blockDim, stream.stream>>>(gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } } __global__ void gc_select_float_gt_lt(float* gpu_column, int entry_num, float low, float high, char* gpu_pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; float4* raw4 = (float4*)gpu_column; int index = ttid + ttid; float4 v = raw4[index]; char c = 0; if (v.x > low && v.x < high) c |= 0x80; if (v.y > low && v.y < high) c |= 0x40; if (v.z > low && v.z < high) c |= 0x20; if (v.w > low && v.w < high) c |= 0x10; v = raw4[index+1]; if (v.x > low && v.x < high) c |= 0x08; if (v.y > low && v.y < high) c |= 0x04; if (v.z > low && v.z < high) c |= 0x02; if (v.w > low && v.w < high) c |= 0x01; gpu_pos_vector[ttid] = c; } void gc_select_float(gcStream_t stream, float* gpu_column, int entry_num, float low, char op_low, float high, char op_high, char* gpu_pos_vector) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); if (op_low == GT && op_high == LT) { gc_select_float_gt_lt<<<gridDim, blockDim, stream.stream>>>(gpu_column, entry_num, low, high, gpu_pos_vector); CUT_CHECK_ERROR("gc_select_float_gt_lt"); } } __global__ void gc_filter_kernel(char* pos_vector1, char* pos_vector2, char* pos_vector3, int entry_num, char* pos_vector) { int ttid = TID; if (ttid >= CEIL(entry_num, 32)) return; char4* raw1 = (char4*)pos_vector1; char4* raw2 = (char4*)pos_vector2; char4* raw3 = (char4*)pos_vector3; char4* out = (char4*)pos_vector; char4 v1 = raw1[ttid]; char4 v2 = raw2[ttid]; char4 v3 = raw3[ttid]; char4 o; o.x = (v1.x & v2.x & v3.x); o.y = (v1.y & v2.y & v3.y); o.z = (v1.z & v2.z & v3.z); o.w = (v1.w & v2.w & v3.w); out[ttid] = o; } void gc_filter(gcStream_t stream, char* pos_vector1, char* pos_vector2, char* pos_vector3, char* pos_vector, int entry_num) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 32), blockDim.x), blockDim.x); gc_filter_kernel<<<gridDim, blockDim, stream.stream>>>(pos_vector1, pos_vector2, pos_vector3, entry_num, pos_vector); CUT_CHECK_ERROR("gc_filter_kernel"); } __global__ void gc_filter_float_product(float* column1, float* column2, int entry_num, char* pos_vector, float* product) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int index = ttid + ttid; char c = pos_vector[ttid]; float4* raw1 = (float4*)column1; float4* raw2 = (float4*)column2; float4 v1 = raw1[index]; float4 v2 = raw2[index]; float4* out = (float4*)product; float4 o; if (c & 0x80) o.x = v1.x * v2.x; else o.x = 0.0f; if (c & 0x40) o.y = v1.y * v2.y; else o.y = 0.0f; if (c & 0x20) o.z = v1.z * v2.z; else o.z = 0.0f; if (c & 0x10) o.w = v1.w * v2.w; else o.w = 0.0f; out[index] = o; v1 = raw1[index+1]; v2 = raw2[index+1]; if (c & 0x08) o.x = v1.x * v2.x; else o.x = 0.0f; if (c & 0x04) o.y = v1.y * v2.y; else o.y = 0.0f; if (c & 0x02) o.z = v1.z * v2.z; else o.z = 0.0f; if (c & 0x01) o.w = v1.w * v2.w; else o.w = 0.0f; out[index+1] = o; } void gc_filter_float_value(gcStream_t stream, float* column1, float* column2, int entry_num, char* pos_vector, float* out) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); gc_filter_float_product<<<gridDim, blockDim, stream.stream>>>(column1, column2, entry_num, pos_vector, out); CUT_CHECK_ERROR("gc_filter_float_product"); /* int* hist = (int*)gc_malloc(sizeof(int) * CEIL(entry_num, 8) * 8); gc_filter_float_hist_kernel<<<gridDim, blockDim, stream.stream>>>(pos_vector, hist) CUT_CHECK_ERROR("gc_filter_float_hist_kernel"); int* offset = (int*)gc_malloc( sizeof(int) * CEIL(entry_num,8) * 8); prefixSum(); gc_free(offset); gc_free(hist); */ } __global__ void gc_filter_float_product2(float* column1, char* column2, int entry_num, char* pos_vector, float* product) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int index = ttid + ttid; char c = pos_vector[ttid]; float4* raw1 = (float4*)column1; char4* raw2 = (char4*)column2; float4 v1 = raw1[index]; char4 v2 = raw2[index]; float4* out = (float4*)product; float4 o; int4 v22; v22.x = (int)v2.x; v22.y = (int)v2.y; v22.z = (int)v2.z; v22.w = (int)v2.w; if (c & 0x80) o.x = v1.x * (float)v22.x; else o.x = 0.0f; if (c & 0x40) o.y = v1.y * (float)v22.y; else o.y = 0.0f; if (c & 0x20) o.z = v1.z * (float)v22.z; else o.z = 0.0f; if (c & 0x10) o.w = v1.w * (float)v22.w; else o.w = 0.0f; out[index] = o; v1 = raw1[index+1]; v2 = raw2[index+1]; if (c & 0x08) o.x = v1.x * (float)v22.x; else o.x = 0.0f; if (c & 0x04) o.y = v1.y * (float)v22.y; else o.y = 0.0f; if (c & 0x02) o.z = v1.z * (float)v22.z; else o.z = 0.0f; if (c & 0x01) o.w = v1.w * (float)v22.w; else o.w = 0.0f; out[index+1] = o; } void gc_filter_float_value2(gcStream_t stream, float* column1, char* column2, int entry_num, char* pos_vector, float* out) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); gc_filter_float_product2<<<gridDim, blockDim, stream.stream>>>(column1, column2, entry_num, pos_vector, out); CUT_CHECK_ERROR("gc_filter_float_product"); /* int* hist = (int*)gc_malloc(sizeof(int) * CEIL(entry_num, 8) * 8); gc_filter_float_hist_kernel<<<gridDim, blockDim, stream.stream>>>(pos_vector, hist) CUT_CHECK_ERROR("gc_filter_float_hist_kernel"); int* offset = (int*)gc_malloc( sizeof(int) * CEIL(entry_num,8) * 8); prefixSum(); gc_free(offset); gc_free(hist); */ } //============================================================================= //Utils //============================================================================= int prefixSum(int* input, int num, int* output, char flag) { CUDPPConfiguration config; config.op = CUDPP_ADD; config.datatype = CUDPP_INT; config.algorithm = CUDPP_SCAN; if (flag == EXCLUSIVE) config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; else config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; CUDPPHandle scanplan = 0; CUDPPResult result = cudppPlan(&scanplan, config, num, 1, 0); if (CUDPP_SUCCESS != result) { printf("Error creating CUDPPPlan\n"); exit(-1); } cudppScan(scanplan, (void*)output, (void*)input, num); result = cudppDestroyPlan(scanplan); if (CUDPP_SUCCESS != result) { printf("Error destroying CUDPPPlan\n"); exit(-1); } int last_offset = 0; int last_hist = 0; cudaMemcpy(&last_offset, &output[num -1], sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&last_hist, &input[num -1], sizeof(int), cudaMemcpyDeviceToHost); return (last_offset + last_hist); } float sumFloat2(float* input, int num, float* output) { CUDPPConfiguration config; config.op = CUDPP_ADD; config.datatype = CUDPP_FLOAT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; CUDPPHandle scanplan = 0; CUDPPResult result = cudppPlan(&scanplan, config, num, 1, 0); if (CUDPP_SUCCESS != result) { printf("Error creating CUDPPPlan\n"); exit(-1); } cudppScan(scanplan, (void*)output, (void*)input, num); result = cudppDestroyPlan(scanplan); if (CUDPP_SUCCESS != result) { printf("Error destroying CUDPPPlan\n"); exit(-1); } float last_offset = 0; cudaMemcpy(&last_offset, &output[num -1], sizeof(float), cudaMemcpyDeviceToHost); return last_offset; } float sumFloat(float* input, int num) { float* input2 = 0; cudaMallocHost((void**)&input2, (sizeof(float)*num)); cudaMemcpy(input2, input, sizeof(float)*num, cudaMemcpyDeviceToHost); float sum = 0.0f; for (int i = 0; i < num; i++) { sum += input2[i]; //printf("%f, %f\n", input2[i], sum); } cudaFreeHost(input2); return sum; /* CUDPPConfiguration config; config.op = CUDPP_ADD; config.datatype = CUDPP_FLOAT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; CUDPPHandle scanplan = 0; CUDPPResult result = cudppPlan(&scanplan, config, num, 1, 0); if (CUDPP_SUCCESS != result) { printf("Error creating CUDPPPlan\n"); exit(-1); } cudppScan(scanplan, (void*)output, (void*)input, num); result = cudppDestroyPlan(scanplan); if (CUDPP_SUCCESS != result) { printf("Error destroying CUDPPPlan\n"); exit(-1); } float last_offset = 0; cudaMemcpy(&last_offset, &output[num -1], sizeof(float), cudaMemcpyDeviceToHost); return last_offset; */ } //============================================================================= //GPU compression //============================================================================= //----------------------------------------------------------------------------- //nsv //----------------------------------------------------------------------------- __device__ int byteNum(int num) { if (num > TWO_BYTE) return 3; if (num > ONE_BYTE) return 2; return 1; } __device__ int byteNumLong(long num) { if (num > THREE_BYTE) return 4; if (num > ONE_BYTE) return 2; return 1; } __global__ void gc_compress_nsv_long_kernel1(long* gpu_ubuf, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= entry_num) return; long v = gpu_ubuf[ttid]; gpu_hist[ttid] = byteNumLong(v); } __global__ void gc_compress_nsv_long_kernel2(long* gpu_ubuf, int* gpu_offset, int entry_num, char* gpu_value, char* gpu_len) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; __shared__ long2 sbuf[256]; long2* raw = (long2*)gpu_ubuf; sbuf[threadIdx.x] = raw[ttid*2]; __syncthreads(); int hist; int4* offset4 = (int4*)gpu_offset; int4 offset = offset4[ttid]; char len = 0; char* src = (char*)&sbuf[threadIdx.x].x; hist = byteNumLong(sbuf[threadIdx.x].x); for (int i = 0; i < hist; ++i) gpu_value[offset.x + i] = src[i]; len |= ((char)hist << 6); src = (char*)&sbuf[threadIdx.x].y; hist = byteNumLong(sbuf[threadIdx.x].y); for (int i = 0; i < hist; ++i) gpu_value[offset.y + i] = src[i]; len |= ((char)hist << 4); sbuf[threadIdx.x] = raw[ttid*2+1]; __syncthreads(); src = (char*)&sbuf[threadIdx.x].x; hist = byteNumLong(sbuf[threadIdx.x].x); for (int i = 0; i < hist; ++i) gpu_value[offset.z + i] = src[i]; len |= ((char)hist << 2); src = (char*)&sbuf[threadIdx.x].y; hist = byteNumLong(sbuf[threadIdx.x].y); for (int i = 0; i < hist; ++i) gpu_value[offset.w + i] = src[i]; len |= ((char)hist); gpu_len[ttid] = len; } void gc_compress_nsv_long(gcStream_t Stream, long* gpu_ubuf, int entry_num, char** gpu_value, char* gpu_len, int* size) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(entry_num, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * entry_num); gc_compress_nsv_long_kernel1<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); //cudaThreadSynchronize(); //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * entry_num); int totalSize = prefixSum(gpu_hist, entry_num, gpu_offset, EXCLUSIVE); *size = totalSize; //cudaThreadSynchronize(); /* int* cpu_offset = (int*)malloc(sizeof(int)*entry_num); int* cpu_hist = (int*)malloc(sizeof(int)*entry_num); cudaMemcpy(cpu_offset, gpu_offset, sizeof(int)*entry_num, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_hist, gpu_hist, sizeof(int)*entry_num, cudaMemcpyDeviceToHost); for (int i= 0; i < 10; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); *gpu_value = (char*)gc_malloc(totalSize); gc_compress_nsv_long_kernel2<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, gpu_offset, entry_num, *gpu_value, gpu_len); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_hist); gc_free(gpu_offset); } __global__ void gc_compress_nsv_kernel1(int* gpu_ubuf, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4 v = raw4[ttid]; int o = 0; o += byteNum(v.x); o += byteNum(v.y); o += byteNum(v.z); o += byteNum(v.w); gpu_hist[ttid] = o; } __global__ void gc_compress_nsv_kernel2(int* gpu_ubuf, int* gpu_offset, int entry_num, char* gpu_value, char* gpu_len) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; __shared__ int4 sbuf[256]; sbuf[threadIdx.x] = raw4[ttid]; __syncthreads(); int hist; int offset = gpu_offset[ttid]; char len = 0; char* src = (char*)&sbuf[threadIdx.x].x; hist = byteNum(sbuf[threadIdx.x].x); for (int i = 0; i < hist; ++i) gpu_value[offset + i] = src[i]; len |= ((char)hist << 6); offset += hist; src = (char*)&sbuf[threadIdx.x].y; hist = byteNum(sbuf[threadIdx.x].y); for (int i = 0; i < hist; ++i) gpu_value[offset + i] = src[i]; len |= ((char)hist << 4); offset += hist; src = (char*)&sbuf[threadIdx.x].z; hist = byteNum(sbuf[threadIdx.x].z); for (int i = 0; i < hist; ++i) gpu_value[offset + i] = src[i]; len |= ((char)hist << 2); offset += hist; src = (char*)&sbuf[threadIdx.x].w; hist = byteNum(sbuf[threadIdx.x].w); for (int i = 0; i < hist; ++i) gpu_value[offset + i] = src[i]; len |= ((char)hist); offset += hist; gpu_len[ttid] = len; } void gc_compress_nsv(gcStream_t Stream, int* gpu_ubuf, int entry_num, char** gpu_value, char* gpu_len, int* size) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * threadNum); gc_compress_nsv_kernel1<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * threadNum); int totalSize = prefixSum(gpu_hist, threadNum, gpu_offset, EXCLUSIVE); *size = totalSize; /* int* cpu_offset = (int*)malloc(sizeof(int)*entry_num); int* cpu_hist = (int*)malloc(sizeof(int)*entry_num); cudaMemcpy(cpu_offset, gpu_offset, sizeof(int)*entry_num, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_hist, gpu_hist, sizeof(int)*entry_num, cudaMemcpyDeviceToHost); for (int i= 0; i < 10; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter *gpu_value = (char*)gc_malloc(totalSize); gc_compress_nsv_kernel2<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, gpu_offset, entry_num, *gpu_value, gpu_len); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_hist); gc_free(gpu_offset); } #if 0 __global__ void gc_decompress_nsv_kernel1(char* gpu_len, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* hist = (int4*)gpu_hist; char v = gpu_len[ttid]; int4 o; o.x = ((v & 0xc0) >> 6); o.y = ((v & 0x30) >> 4); o.z = ((v & 0x0c) >> 2); o.w = ((v & 0x03)); hist[ttid] = o; } __global__ void gc_decompress_nsv_kernel2(int* gpu_ubuf, int* gpu_offset, int entry_num, char* gpu_value, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; __shared__ int4 ibuf[256]; ibuf[threadIdx.x].x = 0; ibuf[threadIdx.x].y = 0; ibuf[threadIdx.x].z = 0; ibuf[threadIdx.x].w = 0; __syncthreads(); int4* hist4 = (int4*)gpu_hist; int4 h = hist4[ttid]; int4* offset4 = (int4*)gpu_offset; int4 o = offset4[ttid]; char* cbuf = NULL; cbuf = (char*)&ibuf[threadIdx.x].x; for (int i = 0; i < h.x; i++) cbuf[i] = gpu_value[o.x + i]; cbuf = (char*)&ibuf[threadIdx.x].y; for (int i = 0; i < h.y; i++) cbuf[i] = gpu_value[o.y + i]; cbuf = (char*)&ibuf[threadIdx.x].z; for (int i = 0; i < h.z; i++) cbuf[i] = gpu_value[o.z + i]; cbuf = (char*)&ibuf[threadIdx.x].w; for (int i = 0; i < h.w; i++) cbuf[i] = gpu_value[o.w + i]; int4* raw4 = (int4*)gpu_ubuf; raw4[ttid] = ibuf[threadIdx.x]; __syncthreads(); } void gc_decompress_nsv(gcStream_t Stream, int* gpu_ubuf, int entry_num, char* gpu_value, char* gpu_len) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * entry_num); gc_decompress_nsv_kernel1<<<gridDim, blockDim, Stream.stream>>>(gpu_len, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * entry_num); int totalSize = prefixSum(gpu_hist, entry_num, gpu_offset, EXCLUSIVE); /* int* cpu_offset = (int*)malloc(sizeof(int)*threadNum); int* cpu_hist = (int*)malloc(sizeof(int)*threadNum); cudaMemcpy(cpu_offset, gpu_offset, sizeof(int)*threadNum, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_hist, gpu_hist, sizeof(int)*threadNum, cudaMemcpyDeviceToHost); for (int i= 0; i < threadNum; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter gc_decompress_nsv_kernel2<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, gpu_offset, entry_num, gpu_value, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_hist); gc_free(gpu_offset); } #endif __global__ void gc_decompress_nsv_kernel1(char* gpu_len, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; char v = gpu_len[ttid]; int o = 0; o += ((v & 0xc0) >> 6); o += ((v & 0x30) >> 4); o += ((v & 0x0c) >> 2); o += ((v & 0x03)); gpu_hist[ttid] = o; } __global__ void gc_decompress_nsv_kernel2(int* gpu_ubuf, int* gpu_offset, int entry_num, char* gpu_value, char* gpu_len) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; __shared__ int4 ibuf[256]; ibuf[threadIdx.x].x = 0; ibuf[threadIdx.x].y = 0; ibuf[threadIdx.x].z = 0; ibuf[threadIdx.x].w = 0; __syncthreads(); char v = gpu_len[ttid]; int h = 0; int o = gpu_offset[ttid]; char* cbuf = NULL; h = ((v & 0xc0) >> 6); cbuf = (char*)&ibuf[threadIdx.x].x; for (int i = 0; i < h; i++) cbuf[i] = gpu_value[o + i]; o += h; h = ((v & 0x30) >> 4); cbuf = (char*)&ibuf[threadIdx.x].y; for (int i = 0; i < h; i++) cbuf[i] = gpu_value[o + i]; o += h; h = ((v & 0x0c) >> 2); cbuf = (char*)&ibuf[threadIdx.x].z; for (int i = 0; i < h; i++) cbuf[i] = gpu_value[o + i]; o += h; h = ((v & 0x03)); cbuf = (char*)&ibuf[threadIdx.x].w; for (int i = 0; i < h; i++) cbuf[i] = gpu_value[o + i]; o += h; int4* raw4 = (int4*)gpu_ubuf; raw4[ttid] = ibuf[threadIdx.x]; __syncthreads(); } void gc_decompress_nsv(gcStream_t Stream, int* gpu_ubuf, int entry_num, char* gpu_value, char* gpu_len) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * threadNum); gc_decompress_nsv_kernel1<<<gridDim, blockDim, Stream.stream>>>(gpu_len, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * threadNum); int totalSize = prefixSum(gpu_hist, threadNum, gpu_offset, EXCLUSIVE); gc_free(gpu_hist); /* int* cpu_offset = (int*)malloc(sizeof(int)*threadNum); int* cpu_hist = (int*)malloc(sizeof(int)*threadNum); cudaMemcpy(cpu_offset, gpu_offset, sizeof(int)*threadNum, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_hist, gpu_hist, sizeof(int)*threadNum, cudaMemcpyDeviceToHost); for (int i= 0; i < threadNum; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter gc_decompress_nsv_kernel2<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, gpu_offset, entry_num, gpu_value, gpu_len); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_offset); } #if 0 __global__ void gc_decompress_nsv_kernel1(char* gpu_len, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; char v = gpu_len[ttid]; int4* hist4 = (int4*)gpu_hist; int4 o; o.x = ((v & 0xc0) >> 6); o.y = ((v & 0x30) >> 4); o.z = ((v & 0x0c) >> 2); o.w = ((v & 0x03)); hist4[ttid] = o; } __global__ void gc_decompress_nsv_kernel2(int* gpu_ubuf, int* gpu_offset, int* gpu_hist, int entry_num, char* gpu_value) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; __shared__ int4 ibuf[256]; ibuf[threadIdx.x].x = 0; ibuf[threadIdx.x].y = 0; ibuf[threadIdx.x].z = 0; ibuf[threadIdx.x].w = 0; __syncthreads(); int4* hist = (int4*)gpu_hist; int4 h = hist[ttid]; int4* offset = (int4*)gpu_offset; int4 o = offset[ttid]; char* cbuf = NULL; cbuf = (char*)&ibuf[threadIdx.x].x; for (int i = 0; i < h.x; i--) cbuf[3 - i] = gpu_value[o.x + h.x - 1 - i]; cbuf = (char*)&ibuf[threadIdx.x].y; for (int i = 0; i < h.y; i--) cbuf[3 - i] = gpu_value[o.y + h.y - 1 - i]; cbuf = (char*)&ibuf[threadIdx.x].z; for (int i = 0; i < h.z; i--) cbuf[3 - i] = gpu_value[o.z + h.z - 1 - i]; cbuf = (char*)&ibuf[threadIdx.x].w; for (int i = 0; i < h.w; i--) cbuf[3 - i] = gpu_value[o.w + h.w - 1 - i]; int4* raw4 = (int4*)gpu_ubuf; raw4[ttid] = ibuf[threadIdx.x]; } void gc_decompress_nsv(gcStream_t Stream, int* gpu_ubuf, int entry_num, char* gpu_value, char* gpu_len) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * entry_num); gc_decompress_nsv_kernel1<<<gridDim, blockDim, Stream.stream>>>(gpu_len, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * entry_num); int totalSize = prefixSum(gpu_hist, entry_num, gpu_offset, EXCLUSIVE); /* int* cpu_offset = (int*)malloc(sizeof(int)*entry_num); int* cpu_hist = (int*)malloc(sizeof(int)*entry_num); cudaMemcpy(cpu_offset, gpu_offset, sizeof(int)*entry_num, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_hist, gpu_hist, sizeof(int)*entry_num, cudaMemcpyDeviceToHost); for (int i= 0; i < 10; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter return; gc_decompress_nsv_kernel2<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, gpu_offset, gpu_hist, entry_num, gpu_value); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_offset); gc_free(gpu_hist); } #endif #if 0 __global__ void gc_compress_nsv_kernel1(int* gpu_ubuf, int entry_num, int* gpu_hist) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4 v = raw4[ttid]; int4 o; int4* out4 = (int4*)gpu_hist; o.x = byteNum(v.x); o.y = byteNum(v.y); o.z = byteNum(v.z); o.w = byteNum(v.w); out4[ttid] = o; } __global__ void gc_compress_nsv_kernel2(int* gpu_ubuf, int* gpu_offset, int* gpu_hist, int entry_num, char* gpu_value, char* gpu_len) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; __shared__ int4 sbuf[256]; sbuf[threadIdx.x] = raw4[ttid]; __syncthreads(); int4* hist4 = (int4*)gpu_hist; int4 hist = hist4[ttid]; int4* offset4 = (int4*)gpu_offset; int4 offset = offset4[ttid]; char len = 0; char* src = (char*)&sbuf[threadIdx.x].x; for (int i = 0; i < hist.x; ++i) gpu_value[offset.x + i] = src[i]; len |= ((char)hist.x << 6); src = (char*)&sbuf[threadIdx.x].y; for (int i = 0; i < hist.y; ++i) gpu_value[offset.y + i] = src[i]; len |= ((char)hist.x << 4); src = (char*)&sbuf[threadIdx.x].z; for (int i = 0; i < hist.z; ++i) gpu_value[offset.z + i] = src[i]; len |= ((char)hist.x << 2); src = (char*)&sbuf[threadIdx.x].w; for (int i = 0; i < hist.w; ++i) gpu_value[offset.w + i] = src[i]; len |= ((char)hist.x); gpu_len[ttid] = len; } void gc_compress_nsv(gcStream_t Stream, int* gpu_ubuf, int entry_num, char** gpu_value, char* gpu_len) { int threadNum = CEIL(entry_num, 4); //step 1: hist dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(threadNum, blockDim.x), blockDim.x); int* gpu_hist = (int*)gc_malloc(sizeof(int) * entry_num); gc_compress_nsv_kernel1<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_hist); CUT_CHECK_ERROR("gc_compress_nsv_kernel1"); return; //step 2: prefix sum int* gpu_offset = (int*)gc_malloc(sizeof(int) * entry_num); int totalSize = prefixSum(gpu_hist, entry_num, gpu_offset, EXCLUSIVE); /* int* cpu_offset = (int*)malloc(sizeof(int)*entry_num); int* cpu_hist = (int*)malloc(sizeof(int)*entry_num); cudaMemcpy(cpu_offset, gpu_offset, sizeof(int)*entry_num, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_hist, gpu_hist, sizeof(int)*entry_num, cudaMemcpyDeviceToHost); for (int i= 0; i < 10; i++) printf("hist:%d, offset:%d\n", cpu_hist[i], cpu_offset[i]); free(cpu_hist); free(cpu_offset); */ //step 3: scatter *gpu_value = (char*)gc_malloc(totalSize); gc_compress_nsv_kernel2<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, gpu_offset, gpu_hist, entry_num, *gpu_value, gpu_len); CUT_CHECK_ERROR("gc_compress_nsv_kernel2"); gc_free(gpu_hist); gc_free(gpu_offset); } #endif //----------------------------------------------------------------------------- //bitmap //----------------------------------------------------------------------------- //__device__ __constant__ char gpu_mode[84]; __global__ void gc_compress_bitmap_kernel(char* gpu_ubuf, int entry_num, char* gpu_r, char* gpu_a, char* gpu_n) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int2* raw2 = (int2*)gpu_ubuf; __shared__ int2 sbuf[256]; sbuf[threadIdx.x] = raw2[ttid]; __syncthreads(); char* raw = (char*)&sbuf[threadIdx.x]; char b = 0; if (raw[0] == 'R') b |= 0x80; if (raw[1] == 'R') b |= 0x40; if (raw[2] == 'R') b |= 0x20; if (raw[3] == 'R') b |= 0x10; if (raw[4] == 'R') b |= 0x08; if (raw[5] == 'R') b |= 0x04; if (raw[6] == 'R') b |= 0x02; if (raw[7] == 'R') b |= 0x01; gpu_r[ttid] = b; b = 0; if (raw[0] == 'A') b |= 0x80; if (raw[1] == 'A') b |= 0x40; if (raw[2] == 'A') b |= 0x20; if (raw[3] == 'A') b |= 0x10; if (raw[4] == 'A') b |= 0x08; if (raw[5] == 'A') b |= 0x04; if (raw[6] == 'A') b |= 0x02; if (raw[7] == 'A') b |= 0x01; gpu_a[ttid] = b; b = 0; if (raw[0] == 'N') b |= 0x80; if (raw[1] == 'N') b |= 0x40; if (raw[2] == 'N') b |= 0x20; if (raw[3] == 'N') b |= 0x10; if (raw[4] == 'N') b |= 0x08; if (raw[5] == 'N') b |= 0x04; if (raw[6] == 'N') b |= 0x02; if (raw[7] == 'N') b |= 0x01; gpu_n[ttid] = b; } void gc_compress_bitmap(gcStream_t Stream, char* gpu_ubuf, int entry_num, char* gpu_r, char* gpu_a, char* gpu_n) { // char mode[84] = {0}; // mode['R'] = 0; // mode['A'] = 1; // mode['N'] = 2; // cudaMemcpyToSymbol("gpu_mode", mode, 84, 0, cudaMemcpyHostToDevice); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); gc_compress_bitmap_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_r, gpu_a, gpu_n); CUT_CHECK_ERROR("gc_compress_bitmap_kernel"); } __global__ void gc_decompress_bitmap_kernel(char* gpu_ubuf, int entry_num, char* gpu_r, char* gpu_a, char* gpu_n) { int ttid = TID; if (ttid >= CEIL(entry_num, 8)) return; int2* raw2 = (int2*)gpu_ubuf; __shared__ int2 sbuf[256]; char* raw = (char*)&sbuf[threadIdx.x]; char r = gpu_r[ttid]; char a = gpu_a[ttid]; char n = gpu_n[ttid]; if (r & 0x80) raw[0] = 'R'; if (r & 0x40) raw[1] = 'R'; if (r & 0x20) raw[2] = 'R'; if (r & 0x10) raw[3] = 'R'; if (r & 0x08) raw[4] = 'R'; if (r & 0x04) raw[5] = 'R'; if (r & 0x02) raw[6] = 'R'; if (r & 0x01) raw[7] = 'R'; if (a & 0x80) raw[0] = 'A'; if (a & 0x40) raw[1] = 'A'; if (a & 0x20) raw[2] = 'A'; if (a & 0x10) raw[3] = 'A'; if (a & 0x08) raw[4] = 'A'; if (a & 0x04) raw[5] = 'A'; if (a & 0x02) raw[6] = 'A'; if (a & 0x01) raw[7] = 'A'; if (n & 0x80) raw[0] = 'N'; if (n & 0x40) raw[1] = 'N'; if (n & 0x20) raw[2] = 'N'; if (n & 0x10) raw[3] = 'N'; if (n & 0x08) raw[4] = 'N'; if (n & 0x04) raw[5] = 'N'; if (n & 0x02) raw[6] = 'N'; if (n & 0x01) raw[7] = 'N'; __syncthreads(); raw2[ttid]=sbuf[threadIdx.x]; __syncthreads(); } void gc_decompress_bitmap(gcStream_t Stream, char* gpu_ubuf, int entry_num, char* gpu_r, char* gpu_a, char* gpu_n) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 8), blockDim.x), blockDim.x); gc_decompress_bitmap_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_r, gpu_a, gpu_n); CUT_CHECK_ERROR("gc_compress_bitmap_kernel"); } //----------------------------------------------------------------------------- //RLE //----------------------------------------------------------------------------- __global__ void gc_compress_rle_kernel1(int* gpu_ubuf, int entry_num, int* hist, int* pos) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4* hist4 = (int4*)hist; int4* pos4 = (int4*)pos; int4 v = raw4[ttid]; int4 p; int4 h; if (v.x != v.y) { h.x = 1; p.x = ttid * 4; } else { h.x = 0; p.x = 0; } if (v.y != v.z) { h.y = 1; p.y = ttid * 4 + 1; } else { h.y = 0; p.y = 0; } if (v.z != v.w) { h.z = 1; p.z = ttid * 4 + 2; } else { h.z = 0; p.z = 0; } hist4[ttid] = h; pos4[ttid] = p; } __global__ void gc_compress_rle_kernel2(int* gpu_ubuf, int entry_num, int* hist, int* pos) { int ttid = TID; if (ttid > (CEIL(entry_num, 4) - 1)) return; if (ttid == (CEIL(entry_num, 4) - 1)) { hist[entry_num - 1] = 1; pos[entry_num - 1] = entry_num - 1; return; } int4* raw4 = (int4*)(gpu_ubuf + 1); int4* hist4 = (int4*)(hist + 1); int4* pos4 = (int4*)(pos + 1); int4 v = raw4[ttid]; if (v.w != v.z) { hist4[ttid].z = 1; pos4[ttid].z = ttid * 4 + 3; } else { hist4[ttid].z = 0; pos4[ttid].z = 0; } } __global__ void gc_compress_rle_kernel3(int* gpu_ubuf, int* hist, int* offset, int entry_num, int* gpu_valbuf, int* gpu_lenbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4* hist4 = (int4*)hist; int4* offset4 = (int4*)offset; int4 v = raw4[ttid]; int4 h = hist4[ttid]; int4 o = offset4[ttid]; if (h.x == 1) { gpu_valbuf[o.x] = v.x; gpu_lenbuf[o.x] = ttid * 4; } if (h.y == 1) { gpu_valbuf[o.y] = v.y; gpu_lenbuf[o.y] = ttid * 4 + 1; } if (h.z == 1) { gpu_valbuf[o.z] = v.z; gpu_lenbuf[o.z] = ttid * 4 + 2; } if (h.w == 1) { gpu_valbuf[o.w] = v.w; gpu_lenbuf[o.w] = ttid * 4 + 3; } } __global__ void gc_compress_rle_kernel4(int* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4* out4 = (int4*)gpu_cbuf; int4 v = raw4[ttid]; int4 o; o.x = v.x; o.y = v.y - v.x; o.z = v.z - v.y; o.w = v.w - v.z; out4[ttid] = o; } __global__ void gc_compress_rle_kernel5(int* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= (CEIL(entry_num, 4) - 1)) return; if (ttid == 0) gpu_cbuf[0] = gpu_ubuf[0] + 1; int4* raw4 = (int4*)(gpu_ubuf + 1); int4* out4 = (int4*)(gpu_cbuf + 1); int4 v = raw4[ttid]; out4[ttid].w = v.w - v.z; } void gc_compress_rle(gcStream_t Stream, int* gpu_ubuf, int entry_num, int** gpu_valbuf, int** gpu_lenbuf, int* centry_num) { //step 1: get hist and pos int* hist = (int*)gc_malloc(sizeof(int)*entry_num); int* pos = (int*)gc_malloc(sizeof(int)*entry_num); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_compress_rle_kernel1<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, hist, pos); CUT_CHECK_ERROR("gc_compress_rle_kernel1"); gc_compress_rle_kernel2<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, hist, pos); CUT_CHECK_ERROR("gc_compress_rle_kernel2"); gc_free(pos); //step 2: get offset int* offset = (int*)gc_malloc(sizeof(int)*entry_num); cudaMemset(offset, 0, sizeof(int)*entry_num); *centry_num = prefixSum(hist, entry_num, offset, EXCLUSIVE); printf("centry_num:%d\n", *centry_num); //Step 3: get value and pos2 int* gpu_pos2 = (int*)gc_malloc(sizeof(int) * (*centry_num)); * gpu_valbuf = (int*)gc_malloc(sizeof(int)*(*centry_num)); * gpu_lenbuf = (int*)gc_malloc(sizeof(int)*(*centry_num)); gc_compress_rle_kernel3<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, hist, offset, entry_num, *gpu_valbuf, gpu_pos2); CUT_CHECK_ERROR("gc_compress_rle_kernel3"); //Step 4: get len int cnum = *centry_num; THREAD_CONF(gridDim, blockDim, CEIL(CEIL(cnum, 4), blockDim.x), blockDim.x); gc_compress_rle_kernel4<<<gridDim, blockDim, Stream.stream>>>(gpu_pos2, *centry_num, *gpu_lenbuf); CUT_CHECK_ERROR("gc_compress_rle_kernel4"); gc_compress_rle_kernel5<<<gridDim, blockDim, Stream.stream>>>(gpu_pos2, *centry_num, *gpu_lenbuf); CUT_CHECK_ERROR("gc_compress_rle_kernel5"); gc_free(hist); gc_free(gpu_pos2); gc_free(offset); } __global__ void gc_decompress_rle_kernel(int* gpu_val, int* gpu_len, int centry_num, int* offset, int* gpu_ubuf) { int ttid = TID; if (ttid >= centry_num) return; int val = gpu_val[ttid]; int len = gpu_len[ttid]; int off = offset[ttid]; int* base = gpu_ubuf + off; for (int i = 0; i < len; i++) base[i] = val; } __global__ void gc_decompress_rle_kernel1(int* offset, int* hist, int centry_num) { int ttid = TID; if (ttid >= centry_num) return; hist[offset[ttid]] = 1; } __global__ void gc_decompress_rle_kernel2(int* gpu_ubuf, int* gpu_val, int* offset, int entry_num) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* offset4 = (int4*)offset; int4 o4 = offset4[ttid]; int4 o; o.x = gpu_val[o4.x]; o.y = gpu_val[o4.y]; o.z = gpu_val[o4.z]; o.w = gpu_val[o4.w]; int4* ubuf4 = (int4*)gpu_ubuf; ubuf4[ttid] = o; } void gc_decompress_rle(gcStream_t Stream, int** gpu_ubuf, int* entry_num, int* gpu_valbuf, int* gpu_lenbuf, int centry_num) { //step 1: get offset for centry int* offset = (int*)gc_malloc(sizeof(int)*centry_num); *entry_num = prefixSum(gpu_lenbuf, centry_num, offset, EXCLUSIVE); printf("%d\n", *entry_num); //step 2: mark boundary int* hist = (int*)gc_malloc(sizeof(int) * (*entry_num)); cudaMemset(hist, 0, sizeof(int) * (*entry_num)); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(centry_num, blockDim.x), blockDim.x); gc_decompress_rle_kernel1<<<gridDim, blockDim, Stream.stream>>>(offset, hist, centry_num); CUT_CHECK_ERROR("gc_compress_rle_kernel"); gc_free(offset); offset = (int*)gc_malloc(sizeof(int) * (*entry_num)); prefixSum(hist, *entry_num, offset, INCLUSIVE); cudaFree(hist); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(*entry_num,4), blockDim.x), blockDim.x); *gpu_ubuf = (int*)gc_malloc(*entry_num * sizeof(int)); gc_decompress_rle_kernel2<<<gridDim, blockDim, Stream.stream>>>(*gpu_ubuf, gpu_valbuf, offset, *entry_num); cudaFree(offset); } /* void gc_decompress_rle(gcStream_t Stream, int** gpu_ubuf, int* entry_num, int* gpu_valbuf, int* gpu_lenbuf, int centry_num) { //step 1: get offset for centry int* offset = (int*)gc_malloc(sizeof(int)*centry_num); *entry_num = prefixSum(gpu_lenbuf, centry_num, offset, EXCLUSIVE); *gpu_ubuf = (int*)gc_malloc(sizeof(int)* (*entry_num)); //step 2: scatter dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(centry_num, blockDim.x), blockDim.x); gc_decompress_rle_kernel<<<gridDim, blockDim>>>(gpu_valbuf, gpu_lenbuf, centry_num, offset, *gpu_ubuf); CUT_CHECK_ERROR("gc_compress_rle_kernel"); gc_free(offset); } */ //----------------------------------------------------------------------------- //Scale //----------------------------------------------------------------------------- __global__ void gc_decompress_scale_kernel(float* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* raw4 = (float4*)gpu_ubuf; int4* out = (int4*)gpu_cbuf; int4 v = out[ttid]; float4 o; o.x = (float)(v.x) / 100.0f; o.y = (float)(v.y) / 100.0f; o.z = (float)(v.z) / 100.0f; o.w = (float)(v.w) / 100.0f; raw4[ttid] = o; } void gc_decompress_scale(gcStream_t Stream, float* gpu_ubuf, int entry_num, int* gpu_cbuf) { //printf("max_num:%d, byteNum:%d\n", max_num, byteNum); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_decompress_scale_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_ns_kernel"); } __global__ void gc_compress_scale_kernel(float* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* raw4 = (float4*)gpu_ubuf; int4* out = (int4*)gpu_cbuf; float4 v = raw4[ttid]; int4 o; o.x = (int)(v.x * 100.0f); o.y = (int)(v.y * 100.0f); o.z = (int)(v.z * 100.0f); o.w = (int)(v.w * 100.0f); out[ttid] = o; } void gc_compress_scale(gcStream_t Stream, float* gpu_ubuf, int entry_num, int* gpu_cbuf) { //printf("max_num:%d, byteNum:%d\n", max_num, byteNum); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_compress_scale_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_ns_kernel"); } //----------------------------------------------------------------------------- //NS //----------------------------------------------------------------------------- __global__ void gc_compress_ns_kernel(int* gpu_ubuf, int entry_num, short* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; short4* out = (short4*)gpu_cbuf; int4 v = raw4[ttid]; short4 o; o.x = (short)v.x; o.y = (short)v.y; o.z = (short)v.z; o.w = (short)v.w; out[ttid] = o; } void gc_compress_ns2(gcStream_t Stream, int* gpu_ubuf, int entry_num, short* gpu_cbuf, int byteNum) { //printf("max_num:%d, byteNum:%d\n", max_num, byteNum); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_compress_ns_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_ns_kernel"); } __global__ void gc_decompress_ns2_kernel(int* gpu_ubuf, int entry_num, short* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; short4* out = (short4*)gpu_cbuf; short4 v = out[ttid]; int4 o; o.x = (int)v.x; o.y = (int)v.y; o.z = (int)v.z; o.w = (int)v.w; raw4[ttid] = o; } void gc_decompress_ns2(gcStream_t Stream, int* gpu_ubuf, int entry_num, short* gpu_cbuf, int byteNum) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_decompress_ns2_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_decompress_ns_kernel"); } __global__ void gc_compress_ns_long_kernel(long* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 2)) return; long2* raw4 = (long2*)gpu_ubuf; int2* out = (int2*)gpu_cbuf; long2 v = raw4[ttid]; int2 o; o.x = (int)v.x; o.y = (int)v.y; out[ttid] = o; } void gc_compress_ns_long(gcStream_t Stream, long* gpu_ubuf, int entry_num, int* gpu_cbuf, int byteNum) { //printf("max_num:%d, byteNum:%d\n", max_num, byteNum); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 2), blockDim.x), blockDim.x); gc_compress_ns_long_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_ns_kernel"); } __global__ void gc_compress_ns_kernel(int* gpu_ubuf, int entry_num, char* gpu_cbuf, int byteNum) { int ttid = TID; #ifdef NS3 if (ttid >= entry_num) return; #else if (ttid >= CEIL(entry_num, 4)) return; #endif int4* raw4 = (int4*)gpu_ubuf; #ifndef NS3 char4* out = (char4*)gpu_cbuf; char4 o; int4 v = raw4[ttid]; o.x = (char)v.x; o.y = (char)v.y; o.z = (char)v.z; o.w = (char)v.w; out[ttid] = o; #else char* src = (char*)&gpu_ubuf[ttid]; char* dest = (char*)&gpu_cbuf[ttid*byteNum]; for (int i = 0; i < byteNum; i++) dest[i] = src[i]; #endif } void gc_compress_ns(gcStream_t Stream, int* gpu_ubuf, int entry_num, char* gpu_cbuf, int byteNum) { //printf("max_num:%d, byteNum:%d\n", max_num, byteNum); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); #ifdef NS3 THREAD_CONF(gridDim, blockDim, CEIL(entry_num , blockDim.x), blockDim.x); #else THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num , 4), blockDim.x), blockDim.x); #endif gc_compress_ns_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf, byteNum); CUT_CHECK_ERROR("gc_compress_ns_kernel"); } __global__ void gc_decompress_ns_long_kernel(long* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 2)) return; long2* raw4 = (long2*)gpu_ubuf; int2* out = (int2*)gpu_cbuf; int2 v = out[ttid]; long2 o; o.x = (long)v.x; o.y = (long)v.y; raw4[ttid] = o; } void gc_decompress_ns_long(gcStream_t Stream, long* gpu_ubuf, int entry_num, int* gpu_cbuf, int byteNum) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 2), blockDim.x), blockDim.x); gc_decompress_ns_long_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_decompress_ns_kernel"); } __global__ void gc_decompress_ns_kernel(int* gpu_ubuf, int entry_num, char* gpu_cbuf, int byteNum) { int ttid = TID; #ifdef NS3 if (ttid >= entry_num) return; #else if (ttid >= CEIL(entry_num,4)) return; #endif #ifdef NS3 char* dest = (char*)&gpu_ubuf[ttid]; char* src = (char*)&gpu_cbuf[ttid*byteNum]; //int4* raw4 = (int4*)gpu_ubuf; for (int i = 0; i < byteNum; i++) dest[i] = src[i]; #else char4* out = (char4*)gpu_cbuf; int4* raw4 = (int4*)gpu_ubuf; char4 v = out[ttid]; v.x = v.y = v.z = v.w = 0; int4 o; o.x = o.y = o.z = o.w = 0; /* o.x = (int)v.x; o.y = (int)v.y; o.z = (int)v.z; o.w = (int)v.w; */ out[ttid] = v; raw4[ttid] = o; #endif /* char3* src = (char3*)gpu_cbuf; __shared__ char s[7200]; char3* sbuf = (char3*)src; sbuf[threadIdx.x*4] = src[ttid*4]; sbuf[threadIdx.x*4+1] = src[ttid*4+1]; sbuf[threadIdx.x*4+2] = src[ttid*4+2]; sbuf[threadIdx.x*4+3] = src[ttid*4+3]; char4* dbuf = (char4*)(s + 256 * 3); __syncthreads(); int4* ddbuf = (int4*)dbuf; int4* out = (int4*)gpu_ubuf; char3* dest = (char3*)&ddbuf[threadIdx.x].x; dest[0] = sbuf[threadIdx.x*4]; dest = (char3*)&ddbuf[threadIdx.x].y; dest[0] = sbuf[threadIdx.x*4+1]; dest = (char3*)&ddbuf[threadIdx.x].z; dest[0] = sbuf[threadIdx.x*4+2]; dest = (char3*)&ddbuf[threadIdx.x].w; dest[0] = sbuf[threadIdx.x*4+3]; __syncthreads(); out[ttid] = ddbuf[threadIdx.x]; */ } void gc_decompress_ns(gcStream_t Stream, int* gpu_ubuf, int entry_num, char* gpu_cbuf, int byteNum) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); #ifdef NS3 THREAD_CONF(gridDim, blockDim, CEIL(entry_num, blockDim.x), blockDim.x); #else THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); #endif gc_decompress_ns_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf, byteNum); CUT_CHECK_ERROR("gc_decompress_ns_kernel"); } //----------------------------------------------------------------------------- //dict //----------------------------------------------------------------------------- __device__ __constant__ char gpu_dict[56]; __global__ void gc_decompress_dict_kernel(char* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= entry_num) return; int* raw = (int*)gpu_cbuf; int v = raw[ttid]; long* dict4 = (long*)gpu_dict; long* out4 = (long*)gpu_ubuf; long* str; str = dict4 + v; out4[ttid] = str[0]; /* int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_cbuf; int4 v = raw4[ttid]; long* dict4 = (long*)gpu_dict; long* out4 = (long*)gpu_ubuf + (ttid << 2); out4[0] = dict4[v.x]; out4[1] = dict4[v.y]; out4[2] = dict4[v.z]; out4[3] = dict4[v.w]; */ /* int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_cbuf; int4 v = raw4[ttid]; long* dict4 = (long*)gpu_dict; long* out4 = (long*)gpu_ubuf + (BLOCK_ID << 10); __shared__ long sbuf[1024]; sbuf[threadIdx.x] = dict4[v.x]; sbuf[threadIdx.x+1] = dict4[v.y]; sbuf[threadIdx.x+2] = dict4[v.z]; sbuf[threadIdx.x+3] = dict4[v.w]; __syncthreads(); //for (int i = threadIdx.x; i < 1024; i+=256) for (int i = 0; i < 4; i++) out4[threadIdx.x + i] = sbuf[threadIdx.x+i]; __syncthreads(); */ } void gc_decompress_dict(gcStream_t Stream, char* gpu_ubuf, int entry_num, int* gpu_cbuf, char* dict) { cudaMemcpyToSymbol("gpu_dict", dict, 56, 0, cudaMemcpyHostToDevice); //printf("%d\n", sizeof(long)); // for (int i = 0; i < 7; i++) // printf("%s\n", &dict[i * 8]); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); //THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); THREAD_CONF(gridDim, blockDim, CEIL(entry_num, blockDim.x), blockDim.x); gc_decompress_dict_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); //int ssize = sizeof(long) * 4 * 256; //gc_decompress_dict_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_dict_kernel"); } //----------------------------------------------------------------------------- //SEP //----------------------------------------------------------------------------- __global__ void gc_decompress_sep_kernel(float* gpu_ubuf, int entry_num, int* gpu_left, int* gpu_right) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* raw4 = (float4*)gpu_ubuf; int4* left4 = (int4*)gpu_left; int4* right4 = (int4*)gpu_right; int4 l = left4[ttid]; int4 r = right4[ttid]; float4 v; v.x = (float)l.x + (float)r.x / 100.0f; v.y = (float)l.y + (float)r.y / 100.0f; v.z = (float)l.z + (float)r.z / 100.0f; v.w = (float)l.w + (float)r.w / 100.0f; raw4[ttid] = v; } void gc_decompress_sep(gcStream_t Stream, float* gpu_ubuf, int entry_num, int* gpu_left, int* gpu_right) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_decompress_sep_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_left, gpu_right); CUT_CHECK_ERROR("gc_compress_sep_kernel"); } __global__ void gc_compress_sep_kernel(float* gpu_ubuf, int entry_num, int* gpu_left, int* gpu_right) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; float4* raw4 = (float4*)gpu_ubuf; float4 v = raw4[ttid]; int4 l; int4 r; int4* left4 = (int4*)gpu_left; int4* right4 = (int4*)gpu_right; l.x = (int)v.x; r.x = (int)((v.x - (float)l.x) * 100.0f); l.y = (int)v.y; r.y = (int)((v.y - (float)l.y) * 100.0f); l.z = (int)v.z; r.z = (int)((v.z - (float)l.z) * 100.0f); l.w = (int)v.w; r.w = (int)((v.w - (float)l.w) * 100.0f); left4[ttid] = l; right4[ttid] = r; } void gc_compress_sep(gcStream_t Stream, float* gpu_ubuf, int entry_num, int* gpu_left, int* gpu_right) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_compress_sep_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_left, gpu_right); CUT_CHECK_ERROR("gc_compress_sep_kernel"); } //----------------------------------------------------------------------------- //DELTA //----------------------------------------------------------------------------- __global__ void gc_compress_delta_kernel1(int* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int4* raw4 = (int4*)gpu_ubuf; int4* out4 = (int4*)gpu_cbuf; int4 v = raw4[ttid]; int4 o; o.x = v.x; o.y = v.y - v.x; o.z = v.z - v.y; o.w = v.w - v.z; out4[ttid] = o; } __global__ void gc_compress_delta_kernel2(int* gpu_ubuf, int entry_num, int* gpu_cbuf) { int ttid = TID; if (ttid >= (CEIL(entry_num, 4))) return; /* int2* raw4 = (int2*)(gpu_ubuf + 3); int* out4 = (gpu_cbuf + 4); int2 v = raw4[ttid]; out4[ttid] = v.y - v.x; */ int4* raw4 = (int4*)(gpu_ubuf + 1); int4* out4 = (int4*)(gpu_cbuf + 1); int4 v = raw4[ttid]; out4[ttid].w = v.w - v.z; } void gc_compress_delta(gcStream_t Stream, int* gpu_ubuf, int entry_num, int* gpu_cbuf, int* first_elem) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); //printf("grid.x:%d, grid.y:%d, block.x:%d, threadNum:%d\n", gridDim.x, gridDim.y, blockDim.x, blockDim.x * gridDim.x * gridDim.y); gc_compress_delta_kernel1<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_delta_kernel1"); gc_compress_delta_kernel2<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf); CUT_CHECK_ERROR("gc_compress_delta_kernel2"); } void gc_decompress_delta(gcStream_t stream, int* gpu_ubuf, int entry_num, int* gpu_cbuf, int first_elem) { int* first = (int*)malloc(sizeof(int)); cudaMemcpy(first, gpu_cbuf, sizeof(int), cudaMemcpyDeviceToHost); *first += first_elem; //printf("cur_first:%d, first:%d\n", *first, first_elem); cudaMemcpy(gpu_cbuf, first, sizeof(int), cudaMemcpyHostToDevice); free(first); prefixSum(gpu_cbuf, entry_num, gpu_ubuf, INCLUSIVE); /* CUDPPConfiguration config; config.op = CUDPP_ADD; config.datatype = CUDPP_INT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; CUDPPHandle scanplan = 0; CUDPPResult result = cudppPlan(&scanplan, config, entry_num, 1, 0); if (CUDPP_SUCCESS != result) { printf("Error creating CUDPPPlan\n"); exit(-1); } cudppScan(scanplan, (void*)gpu_ubuf, (void*)gpu_cbuf, entry_num); result = cudppDestroyPlan(scanplan); if (CUDPP_SUCCESS != result) { printf("Error destroying CUDPPPlan\n"); exit(-1); } */ } //----------------------------------------------------------------------------- //FOR //----------------------------------------------------------------------------- __global__ void gc_compress_for_kernel(int* gpu_ubuf, int entry_num, int* gpu_cbuf, int reference) { int ttid = TID; if (ttid >= CEIL(entry_num, 4)) return; int lastid = entry_num/4; if (ttid != lastid) { int4* gpu_ubuf4 = (int4*)gpu_ubuf; int4* gpu_cbuf4 = (int4*)gpu_cbuf; int4 v = gpu_ubuf4[ttid]; v.x -= reference; v.y -= reference; v.z -= reference; v.w -= reference; gpu_cbuf4[ttid] = v; } else { // printf("**wenbin: lastid - %d - (%x, %x), %d\n", lastid, gridDim.x, gridDim.y, blockDim.x); int leftNum = entry_num % 4; int* gpu_ubuf_left = gpu_ubuf + ttid * 4; int* gpu_cbuf_left = gpu_cbuf + ttid * 4; for (int i = 0; i < leftNum; ++i) gpu_cbuf_left[i] = gpu_ubuf_left[i] - reference; } } void gc_compress_for(gcStream_t Stream, int* gpu_ubuf, int entry_num, int* gpu_cbuf, int reference) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); gc_compress_for_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_ubuf, entry_num, gpu_cbuf, reference); CUT_CHECK_ERROR("gc_compress_for_kernel"); } void gc_decompress_for(gcStream_t Stream, int* gpu_ubuf, int entry_num, int* gpu_cbuf, int reference) { dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(CEIL(entry_num, 4), blockDim.x), blockDim.x); int ref = -reference; gc_compress_for_kernel<<<gridDim, blockDim, Stream.stream>>>(gpu_cbuf, entry_num, gpu_ubuf, ref); CUT_CHECK_ERROR("gc_compress_for_kernel"); } //============================================================================= //Stream Management //============================================================================= void gc_stream_start(gcStream_t* Stream) { CUDA_SAFE_CALL(cudaStreamCreate((cudaStream_t*)&Stream->stream)); CUDA_SAFE_CALL(cudaEventCreate((cudaEvent_t*)&Stream->event)); CUDA_SAFE_CALL(cudaEventCreate((cudaEvent_t*)&Stream->start)); CUDA_SAFE_CALL(cudaEventRecord((cudaEvent_t)Stream->start, (cudaStream_t)Stream->stream)); } void gc_stream_stop(gcStream_t* Stream) { CUDA_SAFE_CALL(cudaEventRecord((cudaEvent_t)Stream->event, (cudaStream_t)Stream->stream)); CUDA_SAFE_CALL(cudaEventSynchronize((cudaEvent_t)Stream->event)); float etime = 0.0f; cudaEventElapsedTime(&etime, Stream->start, Stream->event); printf("***%f ms\n", etime); CUDA_SAFE_CALL(cudaEventDestroy((cudaEvent_t)Stream->event)); CUDA_SAFE_CALL(cudaEventDestroy((cudaEvent_t)Stream->start)); CUDA_SAFE_CALL(cudaStreamDestroy((cudaStream_t)Stream->stream)); } //============================================================================= //Memory Management //============================================================================= void* gc_malloc(size_t bufsize) { void* gpu_buf = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_buf, bufsize)); return gpu_buf; } void gc_free(void* gpu_buf) { CUDA_SAFE_CALL(cudaFree(gpu_buf)); } void* gc_host2device(gcStream_t Stream, void* cpu_buf, size_t bufsize) { void* gpu_buf = NULL; int round_bufsize = CEIL(bufsize, 16) * 16 + 4; CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_buf, round_bufsize)); CUDA_SAFE_CALL(cudaMemcpyAsync(gpu_buf, cpu_buf, bufsize, cudaMemcpyHostToDevice, Stream.stream)); return gpu_buf; } void* gc_device2host(gcStream_t Stream, void* gpu_buf, size_t bufsize) { void* pinned = NULL; CUDA_SAFE_CALL(cudaMallocHost((void**)&pinned, bufsize)); CUDA_SAFE_CALL(cudaMemcpyAsync(pinned, gpu_buf, bufsize, cudaMemcpyDeviceToHost, Stream.stream)); //void* cpu_buf = malloc(bufsize); //memcpy(cpu_buf, pinned, bufsize); //CUDA_SAFE_CALL(cudaFreeHost(pinned)); //return cpu_buf; return pinned; } //============================================================================= //Testing //============================================================================= __global__ void test_kernel(int* d_input, int num) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid >= num) return; d_input[tid] = d_input[tid] * 2; } extern "C" void test_gpu(int num, int print_num) { if (num < print_num) return; int *d_input = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_input, num * sizeof(int))); int *h_input = (int*)malloc(num * sizeof(int)); for (int i = 0; i < num; i++) h_input[i] = i; CUDA_SAFE_CALL(cudaMemcpy(d_input, h_input, sizeof(int)*num, cudaMemcpyHostToDevice)); int block_dim = 256; int grid_dim = (num / 256 + (int)(num % 256 != 0)); test_kernel<<<grid_dim, block_dim>>>(d_input, num); CUDA_SAFE_CALL(cudaMemcpy(h_input, d_input, sizeof(int)*num, cudaMemcpyDeviceToHost)); for (int i = 0; i < print_num; i++) printf("%d - %d\n", i, h_input[i]); CUDA_SAFE_CALL(cudaFree(d_input)); free(h_input); } void test_malloc(int size, int nloop) { unsigned timer; cutCreateTimer(&timer); cutStartTimer(timer); for (int i = 0; i < nloop; i++) { char* buf; cudaMalloc((void**)&buf, size); cudaFree(buf); } cutStopTimer(timer); double ctime = cutGetTimerValue(timer); printf("allocate&free %d bytes buf: %f ms\n", size, ctime / (double)nloop); } __global__ void int2char(int* intSrc, char* charSrc, int num) { int ttid = TID; if (ttid >= num) return; __shared__ int s[256]; //__shared__ char s[256]; // intSrc[ttid] = (int)charSrc[ttid]; // charSrc[ttid] = (char)intSrc[ttid]; { int d; s[threadIdx.x] = d; //charSrc[ttid] = s[threadIdx.x]; //intSrc[ttid] = charSrc[ttid]; //charSrc[ttid] = intSrc[ttid]; //s[threadIdx.x] = intSrc[ttid]; //intSrc[ttid] = s[threadIdx.x]; // intSrc[ttid] = v; //char v = charSrc[ttid]; //charSrc[ttid] = ttid; //intSrc[ttid] = i; // charSrc[ttid] = i; } } #define NUM 100000000 void test_int2char() { int* intSrc = (int*)gc_malloc(NUM * sizeof(int)); char* charSrc = (char*)gc_malloc(NUM * sizeof(char)); unsigned timer; cutCreateTimer(&timer); cutStartTimer(timer); dim3 blockDim(256, 1, 1); dim3 gridDim(1, 1, 1); THREAD_CONF(gridDim, blockDim, CEIL(NUM, blockDim.x), blockDim.x); int2char<<<gridDim, blockDim>>>(intSrc, charSrc, NUM); cudaThreadSynchronize(); CUT_CHECK_ERROR("gc_intersect"); cutStopTimer(timer); double atime = cutGetTimerValue(timer); printf("%f ms\n", atime); gc_free(intSrc); gc_free(charSrc); }
293290efb4afd33b3303153e9762892ab0c4bff8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "box2d3r-256-2-128_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 97 #define BENCH_RAD 3 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 7 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = 0.01530f * A[t%2][i-3][j-3] + 0.01531f * A[t%2][i-3][j-2] + 0.01532f * A[t%2][i-3][j-1] + 0.01533f * A[t%2][i-3][j] + 0.01534f * A[t%2][i-3][j+1] + 0.01535f * A[t%2][i-3][j+2] + 0.01536f * A[t%2][i-3][j+3] + 0.01537f * A[t%2][i-2][j-3] + 0.01538f * A[t%2][i-2][j-2] + 0.01539f * A[t%2][i-2][j-1] + 0.01540f * A[t%2][i-2][j] + 0.01541f * A[t%2][i-2][j+1] + 0.01542f * A[t%2][i-2][j+2] + 0.01543f * A[t%2][i-2][j+3] + 0.01544f * A[t%2][i-1][j-3] + 0.01545f * A[t%2][i-1][j-2] + 0.01546f * A[t%2][i-1][j-1] + 0.01546f * A[t%2][i-1][j] + 0.01547f * A[t%2][i-1][j+1] + 0.01548f * A[t%2][i-1][j+2] + 0.01549f * A[t%2][i-1][j+3] + 0.01550f * A[t%2][i][j-3] + 0.01551f * A[t%2][i][j-2] + 0.01552f * A[t%2][i][j-1] + 0.25424f * A[t%2][i][j] + 0.01554f * A[t%2][i][j+1] + 0.01555f * A[t%2][i][j+2] + 0.01556f * A[t%2][i][j+3] + 0.01557f * A[t%2][i+1][j-3] + 0.01558f * A[t%2][i+1][j-2] + 0.01559f * A[t%2][i+1][j-1] + 0.01560f * A[t%2][i+1][j] + 0.01561f * A[t%2][i+1][j+1] + 0.01562f * A[t%2][i+1][j+2] + 0.01564f * A[t%2][i+1][j+3] + 0.01565f * A[t%2][i+2][j-3] + 0.01566f * A[t%2][i+2][j-2] + 0.01567f * A[t%2][i+2][j-1] + 0.01568f * A[t%2][i+2][j] + 0.01569f * A[t%2][i+2][j+1] + 0.01570f * A[t%2][i+2][j+2] + 0.01571f * A[t%2][i+2][j+3] + 0.01572f * A[t%2][i+3][j-3] + 0.01573f * A[t%2][i+3][j-2] + 0.01574f * A[t%2][i+3][j-1] + 0.01575f * A[t%2][i+3][j] + 0.01576f * A[t%2][i+3][j+1] + 0.01577f * A[t%2][i+3][j+2] + 0.01578f * A[t%2][i+3][j+3]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
293290efb4afd33b3303153e9762892ab0c4bff8.cu
#include <assert.h> #include <stdio.h> #include "box2d3r-256-2-128_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 97 #define BENCH_RAD 3 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 7 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = 0.01530f * A[t%2][i-3][j-3] + 0.01531f * A[t%2][i-3][j-2] + 0.01532f * A[t%2][i-3][j-1] + 0.01533f * A[t%2][i-3][j] + 0.01534f * A[t%2][i-3][j+1] + 0.01535f * A[t%2][i-3][j+2] + 0.01536f * A[t%2][i-3][j+3] + 0.01537f * A[t%2][i-2][j-3] + 0.01538f * A[t%2][i-2][j-2] + 0.01539f * A[t%2][i-2][j-1] + 0.01540f * A[t%2][i-2][j] + 0.01541f * A[t%2][i-2][j+1] + 0.01542f * A[t%2][i-2][j+2] + 0.01543f * A[t%2][i-2][j+3] + 0.01544f * A[t%2][i-1][j-3] + 0.01545f * A[t%2][i-1][j-2] + 0.01546f * A[t%2][i-1][j-1] + 0.01546f * A[t%2][i-1][j] + 0.01547f * A[t%2][i-1][j+1] + 0.01548f * A[t%2][i-1][j+2] + 0.01549f * A[t%2][i-1][j+3] + 0.01550f * A[t%2][i][j-3] + 0.01551f * A[t%2][i][j-2] + 0.01552f * A[t%2][i][j-1] + 0.25424f * A[t%2][i][j] + 0.01554f * A[t%2][i][j+1] + 0.01555f * A[t%2][i][j+2] + 0.01556f * A[t%2][i][j+3] + 0.01557f * A[t%2][i+1][j-3] + 0.01558f * A[t%2][i+1][j-2] + 0.01559f * A[t%2][i+1][j-1] + 0.01560f * A[t%2][i+1][j] + 0.01561f * A[t%2][i+1][j+1] + 0.01562f * A[t%2][i+1][j+2] + 0.01564f * A[t%2][i+1][j+3] + 0.01565f * A[t%2][i+2][j-3] + 0.01566f * A[t%2][i+2][j-2] + 0.01567f * A[t%2][i+2][j-1] + 0.01568f * A[t%2][i+2][j] + 0.01569f * A[t%2][i+2][j+1] + 0.01570f * A[t%2][i+2][j+2] + 0.01571f * A[t%2][i+2][j+3] + 0.01572f * A[t%2][i+3][j-3] + 0.01573f * A[t%2][i+3][j-2] + 0.01574f * A[t%2][i+3][j-1] + 0.01575f * A[t%2][i+3][j] + 0.01576f * A[t%2][i+3][j+1] + 0.01577f * A[t%2][i+3][j+2] + 0.01578f * A[t%2][i+3][j+3]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
d2b6e551b511e4d41a9c4473bb1e8c7f3f054587.hip
// !!! This is a file automatically generated by hipify!!! /*! \file launcher.cu \brief Controller code that launches Hamiltonian generation and Lanczos diagonalization */ #include "lattice.h" #include <cstdlib> #include "hip/hip_runtime.h" #include <fstream> #include <iostream> #include "lanczos.h" //#include"hamiltonian.h" int main() { for(int i = 0; i < 1; i++) { int** Bond; //cout<<i<<" "<<endl; int how_many; /*if (i == 1) { how_many = 5; }*/ ifstream fin; fin.open("data.dat"); fin >> how_many ; Bond = (int**)malloc(how_many*sizeof(int*)); d_hamiltonian* hamil_lancz = (d_hamiltonian*)malloc(how_many*sizeof(d_hamiltonian)); parameters* data = (parameters*)malloc(how_many*sizeof(parameters)); double** groundstates = (double**)malloc(how_many*sizeof(double*)); double** eigenvalues = (double**)malloc(how_many*sizeof(double*)); if (data == NULL) { cerr<<"Malloc of parameter container failed!"<<endl; return 1; } int* num_Elem = (int*)malloc(how_many*sizeof(int)); //hipSetDevice(1); int device = 1; //i%2; for(int i = 0; i < how_many; i++) { fin >> data[i].nsite >> data[i].Sz >> data[i].J1 >> data[i].J2 >> data[i].modelType >> data[i].dimension; switch (data[i].dimension) { case 1: Bond[i] = (int*)malloc(2*data[i].nsite*sizeof(int)); for( int j = 0; j < data[i].nsite; j++ ){ Bond[i][j] = j; Bond[i][j+ data[i].nsite] = (j+1)%data[i].nsite; } break; case 2: Bond[i] = (int*)malloc(3*data[i].nsite*sizeof(int)); Fill_Bonds_16B(Bond[i]); break; } eigenvalues[i] = (double*)malloc(3*sizeof(double)); } /*hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop);*/ //float time; //hipEventRecord(start,0); ConstructSparseMatrix(how_many, Bond, hamil_lancz, data, num_Elem, device); /*hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); cout<<"Time to construct Hamiltonians: "<<time<<endl; hipEventRecord(start,0); */ lanczos(how_many, num_Elem, hamil_lancz, groundstates, eigenvalues, 200, 3, 1e-12); /* hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); cout<<"Time to perform Lanczos: "<<time<<endl;*/ for(int j = 0; j<how_many; j++) { hipFree(hamil_lancz[j].rows); hipFree(hamil_lancz[j].cols); hipFree(hamil_lancz[j].vals); hipFree(groundstates[j]); } //hipEventDestroy(start); //hipEventDestroy(stop); free(data); free(Bond); free(hamil_lancz); free(num_Elem); free(groundstates); } return 0; }
d2b6e551b511e4d41a9c4473bb1e8c7f3f054587.cu
/*! \file launcher.cu \brief Controller code that launches Hamiltonian generation and Lanczos diagonalization */ #include "lattice.h" #include <cstdlib> #include "cuda.h" #include <fstream> #include <iostream> #include "lanczos.h" //#include"hamiltonian.h" int main() { for(int i = 0; i < 1; i++) { int** Bond; //cout<<i<<" "<<endl; int how_many; /*if (i == 1) { how_many = 5; }*/ ifstream fin; fin.open("data.dat"); fin >> how_many ; Bond = (int**)malloc(how_many*sizeof(int*)); d_hamiltonian* hamil_lancz = (d_hamiltonian*)malloc(how_many*sizeof(d_hamiltonian)); parameters* data = (parameters*)malloc(how_many*sizeof(parameters)); double** groundstates = (double**)malloc(how_many*sizeof(double*)); double** eigenvalues = (double**)malloc(how_many*sizeof(double*)); if (data == NULL) { cerr<<"Malloc of parameter container failed!"<<endl; return 1; } int* num_Elem = (int*)malloc(how_many*sizeof(int)); //cudaSetDevice(1); int device = 1; //i%2; for(int i = 0; i < how_many; i++) { fin >> data[i].nsite >> data[i].Sz >> data[i].J1 >> data[i].J2 >> data[i].modelType >> data[i].dimension; switch (data[i].dimension) { case 1: Bond[i] = (int*)malloc(2*data[i].nsite*sizeof(int)); for( int j = 0; j < data[i].nsite; j++ ){ Bond[i][j] = j; Bond[i][j+ data[i].nsite] = (j+1)%data[i].nsite; } break; case 2: Bond[i] = (int*)malloc(3*data[i].nsite*sizeof(int)); Fill_Bonds_16B(Bond[i]); break; } eigenvalues[i] = (double*)malloc(3*sizeof(double)); } /*cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop);*/ //float time; //cudaEventRecord(start,0); ConstructSparseMatrix(how_many, Bond, hamil_lancz, data, num_Elem, device); /*cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cout<<"Time to construct Hamiltonians: "<<time<<endl; cudaEventRecord(start,0); */ lanczos(how_many, num_Elem, hamil_lancz, groundstates, eigenvalues, 200, 3, 1e-12); /* cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cout<<"Time to perform Lanczos: "<<time<<endl;*/ for(int j = 0; j<how_many; j++) { cudaFree(hamil_lancz[j].rows); cudaFree(hamil_lancz[j].cols); cudaFree(hamil_lancz[j].vals); cudaFree(groundstates[j]); } //cudaEventDestroy(start); //cudaEventDestroy(stop); free(data); free(Bond); free(hamil_lancz); free(num_Elem); free(groundstates); } return 0; }
dcd8717febf999b9bec7c471fb28cb10a3f290cc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" // CUDA #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "rocblas.h" #include "cudaCommon.h" void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // At least 2 arguments expected // Input and result if((nlhs != 1) || (nrhs != 2)) { mexErrMsgTxt("Form: slab reference = GPU_getslab(gputag, which to get)."); } CHECK_CUDA_ERROR("entering GPU_getslab"); MGArray m; MGA_accessMatlabArrays(prhs, 0, 0, &m); int x = (int)*mxGetPr(prhs[1]); int sub[6]; int i; MGArray slab = m; for(i = 0; i < m.nGPUs; i++) { calcPartitionExtent(&m, i, &sub[0]); // number of bytes per slab int64_t slabsize = sub[3]*sub[4]*sub[5] * sizeof(double); // round up to make a pleasantly CUDA-aligned amount int64_t slabpitch = slabsize / 256; slabpitch += (256*slabpitch < slabsize); slabpitch *= 256; slabpitch /= sizeof(double); slab.devicePtr[i] += x*slabpitch; slab.numSlabs = -x; } MGA_returnOneArray(plhs, &slab); return; }
dcd8717febf999b9bec7c471fb28cb10a3f290cc.cu
#include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" // CUDA #include "cuda.h" #include "cuda_runtime.h" #include "cublas.h" #include "cudaCommon.h" void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // At least 2 arguments expected // Input and result if((nlhs != 1) || (nrhs != 2)) { mexErrMsgTxt("Form: slab reference = GPU_getslab(gputag, which to get)."); } CHECK_CUDA_ERROR("entering GPU_getslab"); MGArray m; MGA_accessMatlabArrays(prhs, 0, 0, &m); int x = (int)*mxGetPr(prhs[1]); int sub[6]; int i; MGArray slab = m; for(i = 0; i < m.nGPUs; i++) { calcPartitionExtent(&m, i, &sub[0]); // number of bytes per slab int64_t slabsize = sub[3]*sub[4]*sub[5] * sizeof(double); // round up to make a pleasantly CUDA-aligned amount int64_t slabpitch = slabsize / 256; slabpitch += (256*slabpitch < slabsize); slabpitch *= 256; slabpitch /= sizeof(double); slab.devicePtr[i] += x*slabpitch; slab.numSlabs = -x; } MGA_returnOneArray(plhs, &slab); return; }
0d43fb10c18b944a626d008eac67f9f7574f589a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * FileName: RayTracer_Kernel.cu * * Programmer: Jiayin Cao */ //the sum for scan int* g_ScanSum[2]; //some helper functions __device__ void d_normalize( float4* v ) { float s = v->x * v->x + v->y * v->y + v->z * v->z; s = sqrt(s); v->x /= s; v->y /= s; v->z /= s; } //cross product __device__ float4 d_cross( const float4& v1 , const float4& v2 ) { float4 r; r.x = v1.y * v2.z - v1.z * v2.y; r.y = v1.z * v2.x - v1.x * v2.z; r.z = v1.x * v2.y - v1.y * v2.x; r.w = 0.0f; return r; } //clamp the value __device__ float d_clamp( const float v ) { if( v > 1.0f ) return 1.0f; if( v < 0.0f ) return 0.0f; return v; } //clamp the float4 __device__ float4 d_saturate( const float4& v ) { return make_float4( d_clamp( v.x ) , d_clamp( v.y ) , d_clamp( v.z ) , d_clamp( v.w ) ); } //dot product __device__ float d_dot( const float4& v1 , const float4& v2 ) { return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z ; } //the length of the vector __device__ float d_length( const float4& v ) { return sqrt( v.x * v.x + v.y * v.y + v.z * v.z ); } //define some useful operators for float4 __device__ float4 operator+ ( const float4& v1 , const float4& v2 ) { return make_float4( v1.x + v2.x , v1.y + v2.y , v1.z + v2.z , v1.w + v2.w ); } __device__ float4 operator- ( const float4& v1 , const float4& v2 ) { return make_float4( v1.x - v2.x , v1.y - v2.y , v1.z - v2.z , v1.w - v2.w ); } __device__ float4 operator* ( const float4& v , const float d ) { return make_float4( v.x * d , v.y * d , v.z * d , v.w * d ); } __device__ float4 operator* ( const float d , const float4& v ) { return make_float4( v.x * d , v.y * d , v.z * d , v.w * d ); } __device__ float4 operator* ( const float4& v1 , const float4& v2 ) { return make_float4( v1.x * v2.x , v1.y * v2.y , v1.z * v2.z , v1.w * v2.w ); } __device__ float4 operator+= ( float4& v1 , const float4& v2 ) { v1 = v1 + v2; return v1; } __device__ float2 operator * ( const float d , const float2& v ) { return make_float2( d * v.x , d * v.y ); } __device__ float2 operator + ( const float2& v1 , const float2& v2 ) { return make_float2( v1.x + v2.x , v1.y + v2.y ); } __device__ float2 operator - ( const float2& v1 , const float2& v2 ) { return make_float2( v1.x - v2.x , v1.y - v2.y ); } __device__ float2 floor( const float2& v ) { int x = (int) v.x ; int y = (int) v.y ; return make_float2( x , y ); } //reflect direction __device__ float4 d_reflect( const float4& dir , const float4& normal ) { float dotProduct = ( -2.0f ) * d_dot( dir , normal ); float4 r = dir + dotProduct * normal; return make_float4( r.x , r.y , r.z , 0.0f ); } //refraction direction __device__ float4 d_refract( const float4& dir , float4 normal , float rate ) { float4 r; if( d_dot( dir , normal ) > 0 ) { normal = -1.0f * normal; rate = 1.0f / rate; } float cos = -1.0f * d_dot( dir , normal ); float t = 1 - rate * rate * ( 1 - cos * cos ); if( t < 0 ) { r = d_reflect( dir , normal ); }else { float cos2 = sqrt( t ); r = rate * dir + ( rate * cos - cos2 ) * normal ; } return r; } //check if the ray intersects with bounding box __device__ float4 kernelIntersectBoundingBox( float4& ori , float4& dir , float4& min , float4& max , float length ) { //the result float4 result = make_float4( 0.0f , 9999999.0f , 0.0f , 0.0f ); //limit the maxium value if( length > 0 ) result.y = length; //the variables float t1 , t2; if( fabs( dir.x ) < 0.0000001f ) { if( ori.x > max.x || ori.x < min.x ) return result; }else { t1 = ( max.x - ori.x ) / dir.x; t2 = ( min.x - ori.x ) / dir.x; if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; } //clamp if( t1 > result.x ) result.x = t1; if( t2 < result.y ) result.y = t2; if( result.x > result.y ) return result; } if( fabs( dir.y ) < 0.0000001f ) { if( ori.y > max.y || ori.y < min.y ) return result; }else { t1 = ( max.y - ori.y ) / dir.y; t2 = ( min.y - ori.y ) / dir.y; if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; } //clamp if( t1 > result.x ) result.x = t1; if( t2 < result.y ) result.y = t2; if( result.x > result.y ) return result; } if( fabs( dir.y ) < 0.0000001f ) { if( ori.z > max.z || ori.z < min.z ) return result; }else { t1 = ( max.z - ori.z ) / dir.z; t2 = ( min.z - ori.z ) / dir.z; if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; } //clamp if( t1 > result.x ) result.x = t1; if( t2 < result.y ) result.y = t2; if( result.x > result.y ) return result; } //enable the intersected point result.z = 1.0f; return result; } //check if the ray intersects with a plane __device__ float4 kernelIntersectPlane( const float4& v1 , const float4& v2 , const float4& v3 , const float4& ori , const float4& dir ) { //w : >= 0 ( intersected point enable ) , < 0 ( disable ) float4 result = make_float4( 0.0f , 0.0f , 0.0f , 0.0f ); //get the normal of the plane float4 normal = d_cross( v2 - v1 , v3 - v1 ); //get the factor float t = d_dot( normal , ori - v1 ) / d_dot( normal , dir ); //set the result result = ori - t * dir; if( t <= 0.0f ) result.w = -t; else result.w = -1; return result; } //check if the ray intersects with a triangle __device__ float4 kernelIntersectTriangle( const float4& v1 , const float4& v2 , const float4& v3 , const float4& ori , const float4& dir ) { //the result float4 result = kernelIntersectPlane( v1 , v2 , v3 , ori , dir ); if( result.w < 0 ) return result; //get the factor float4 d1 = d_cross( result - v2 , v1 - v2 ); float4 d2 = d_cross( result - v3 , v2 - v3 ); float4 d3 = d_cross( result - v1 , v3 - v1 ); float f1 = d_dot( d1 , d2 ); float f2 = d_dot( d2 , d3 ); if( !( f1 >= -0.000000000000001f && f2 >= -0.000000000000001f ) ) result.w = -1.0f; return result; } //check if the current point is in the bounding box __device__ int kernelPointInBoundingBox( const float4& p , const float4& min , const float4& max ) { float threshold = 0.00001f; if( p.x < min.x - threshold || p.y < min.y - threshold || p.z < min.z - threshold || p.x > max.x + threshold || p.y > max.y + threshold || p.z > max.z + threshold ) return false; return true; } //do interplotation __device__ float4 kernelInterploted( const float4& v1 , const float4& v2 , const float4& v3 , const float4& intersected ) { //get the vectors float4 e1 = intersected - v1; float4 e2 = intersected - v2; float4 e3 = intersected - v3; //compute the areas float4 area; area.x = d_length( d_cross( e2 , e3 ) ); area.y = d_length( d_cross( e3 , e1 ) ); area.z = d_length( d_cross( e1 , e2 ) ); float d = 1.0f / ( area.x + area.y + area.z ); return area * d; } //clear and initialize buffer __global__ void kernelInitBuffer( float4* buffer , int* markedBuffer , int pixelNum ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= pixelNum ) return; buffer[tid] = make_float4( 0.0f , 0.0f , 0.0f , 0.0f ); markedBuffer[tid] = tid; } //generate primary ray intersected result __global__ void kernelGenerateIntersectedPoint( float4* rayOri , float4* rayDir , float4* vertexBuffer , int rayNum , int* index , float4* result ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= rayNum ) return; //Load the vertex int triId = index[tid]; //get the vertex int id = 3 * triId; float4 v1 = vertexBuffer[id]; float4 v2 = vertexBuffer[id+1]; float4 v3 = vertexBuffer[id+2]; //ray ori and dir float4 ori = rayOri[tid]; float4 dir = rayDir[tid]; //get the intersected result result[tid] = kernelIntersectPlane( v1 , v2 , v3 , ori , dir ); result[tid].w = triId; } //Generate primary rays __global__ void kernelGeneratePrimaryRays( float4 viewInfo , float* invViewMatrix , float4* rayOri , float4* rayDir ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= (int)viewInfo.x * (int)viewInfo.y ) return; // get the pixel coorindate first uint2 coord; coord.x = tid % (int) viewInfo.x; coord.y = tid / (int)viewInfo.x; // compute the vector of the ray in screen space float2 v; v.x = ( ( ( 2.0f * coord.x ) / viewInfo.x ) - 1.0f ) / viewInfo.z; v.y = -1.0f * ( ( ( 2.0f * coord.y ) / viewInfo.y ) - 1.0f ) / viewInfo.w; //copy the original point of the rays rayOri[tid] = make_float4( invViewMatrix[12] , invViewMatrix[13] , invViewMatrix[14] , tid ); //compute the direction of the ray float4 dir; dir.x = ( v.x * invViewMatrix[0] + v.y * invViewMatrix[4] + invViewMatrix[8] ); dir.y = ( v.x * invViewMatrix[1] + v.y * invViewMatrix[5] + invViewMatrix[9] ); dir.z = ( v.x * invViewMatrix[2] + v.y * invViewMatrix[6] + invViewMatrix[10] ); dir.w = 0.0f; d_normalize( &dir ); rayDir[tid] = make_float4( dir.x , dir.y , dir.z , 1.0f ); } //traverse the ray through kd-tree __device__ float4 kernelTraverseRay( float4* kdTree , int* indexMap , int* offsetBuffer , float4* vertexBuffer , float4& rayOri , float4& rayDir , float length ) { //the intersected result float4 result = make_float4( 0.0f , 0.0f , 0.0f , -1.0f ); //tree node information float4 header; float4 splitInfo; //the bounding box float4 minBB = kdTree[2]; float4 maxBB = kdTree[3]; //check if the ray intersects with the current bounding box of the root result = kernelIntersectBoundingBox( rayOri , rayDir , minBB , maxBB , length ); //if the ray doesn't cross the kd-tree , just return if( result.z < 0.5f ) { result = make_float4( 0.0f , 0.0f , 0.0f , -1.0f ); return result; } //current traversing node int currentNodeIndex = 0; //the mask to mark the traversed node unsigned int mask = 0; //current traverse depth int currentTraverseDepth = 0; //current inPonit when traversing the node float4 inPoint = rayOri + result.x * rayDir ; while( currentTraverseDepth >= 0 ) { //traverse the current node do { //the current node offset int currentNodeOffset = currentNodeIndex * 4; //get the current node information header = kdTree[ currentNodeOffset ]; splitInfo = kdTree[currentNodeOffset + 1 ]; //check if it's a leaf node if( splitInfo.x < 0 ) break; //get the split axis int splitAxis = (int) splitInfo.x; //get the pointer of the inPoint float sPos = 0.0f; if( splitAxis == 0 ) sPos = inPoint.x; else if( splitAxis == 1 ) sPos = inPoint.y; else if( splitAxis == 2 ) sPos = inPoint.z; //update the virtual stack and traverse the node if( splitInfo.y > sPos ) currentNodeIndex = (int)header.y; else currentNodeIndex = (int)header.z; //increase the current traverse depth currentTraverseDepth++; }while( true ); //get the offset and triangle number int triOffset = offsetBuffer[currentNodeIndex]; int triNumber = (int)header.w; //min value float minFactor = 9999999.0f; if( length > 0 ) minFactor = length; //triangle index int oriTriIndex = -1; //the bounding box minBB = kdTree[currentNodeIndex*4+2]; maxBB = kdTree[currentNodeIndex*4+3]; //intersect with the current triangles for( int i = 0 ; i < triNumber ; i++ ) { //get the triangles int triIndex = indexMap[triOffset+i]; //get the vertex float4 v1 = vertexBuffer[3*triIndex]; float4 v2 = vertexBuffer[3*triIndex+1]; float4 v3 = vertexBuffer[3*triIndex+2]; //get the intersected point result = kernelIntersectTriangle( v1 , v2 , v3 , rayOri , rayDir ); //limit the factor if( result.w > 0.0f && result.w < minFactor ) { if( kernelPointInBoundingBox( result , minBB , maxBB ) ) { minFactor = result.w; oriTriIndex = triIndex; if( length > 0 ) break; } } } if( oriTriIndex >= 0 ) { result = rayOri + minFactor * rayDir; result.w = (float)oriTriIndex; return result; } //back track here while( currentTraverseDepth >= 0 ) { if( currentTraverseDepth == 0 ) return make_float4( 0 , 0 , 0 , -1.0f ); //get the current mask if( mask & ( 0x00000001 << currentTraverseDepth ) ) { //update the mask mask &= ~(0x00000001 << currentTraverseDepth ); //decrease the current depth; currentTraverseDepth--; //get to the father node currentNodeIndex = (int)kdTree[ 4 * currentNodeIndex ].x; //continue to next level continue; } //check the other node int otherNode = currentNodeIndex + 1; if( currentNodeIndex % 2 == 0 ) otherNode -= 2; //get the bounding box of the other node int otherNodeOffset = 4 * otherNode; minBB = kdTree[ otherNodeOffset + 2 ]; maxBB = kdTree[ otherNodeOffset + 3 ]; //get the intersected result float4 bi = kernelIntersectBoundingBox( rayOri , rayDir , minBB , maxBB , length ); if( bi.z > 0.5f ) { //update the current traverse node currentNodeIndex = otherNode; //update the inPoint inPoint = rayOri + bi.x * rayDir ; //update the mask mask |= 0x00000001 << currentTraverseDepth; break; }else { //update the mask mask &= ~( 0x00000001 << currentTraverseDepth ); //decrease current depth currentTraverseDepth--; //get to the father node currentNodeIndex = (int) kdTree[ 4 * currentNodeIndex ].x; } } } result.w = -1.0f; return result; } //get the interseced point __global__ void kernelGetIntersectedPoint( float4* rayOri , float4* rayDir , float4* kdTree , int* indexMap , int* offsetBuffer , float4* vertexBuffer , int rayNumber , float4* result ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= rayNumber ) return; //get the triangle result[tid] = kernelTraverseRay( kdTree , indexMap , offsetBuffer , vertexBuffer , rayOri[tid] , rayDir[tid] , -1.0f ); } //do pixel shader here __global__ void kernelPixelShader( float4* intersected , float4* vertexBuffer , float4* normalBuffer , float2* texCoordinateBuffer , float4* kdTree , int* indexMap , int* offsetIndexBuffer, float4* lightBuffer , int* attributeBuffer , float4* materialBuffer , int* textureOffset , float4* customTexture , int pixelNum , float4* rayDir , int* offsetBuffer , float4* destNormalBuffer , float4* imageBuffer ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= pixelNum ) return; //get the triangle index int triIndex = (int)intersected[tid].w; int triOffset = 3 * triIndex; float4 color = make_float4( 0.0f , 0.0f , 0.0f , 0.0f ); //load the density of the pixel if( triIndex < 0 ) return; //get the material index int matIndex = attributeBuffer[triIndex]; //the material buffer float4 ambient = materialBuffer[ 4 * matIndex ]; float4 diffuse = materialBuffer[ 4 * matIndex + 1 ]; float4 specular = materialBuffer[ 4 * matIndex + 2 ]; float4 matprop = materialBuffer[ 4 * matIndex + 3 ]; //load the vertex float4 v1 = vertexBuffer[ triOffset ]; float4 v2 = vertexBuffer[ triOffset + 1 ]; float4 v3 = vertexBuffer[ triOffset + 2 ]; //get the interploted float4 interploted = kernelInterploted( v1 , v2 , v3 , intersected[tid] ); //get the normal float4 n1 = normalBuffer[ triOffset ]; float4 n2 = normalBuffer[ triOffset + 1 ]; float4 n3 = normalBuffer[ triOffset + 2 ]; float4 normal = n1 * interploted.x + n2 * interploted.y + n3 * interploted.z; d_normalize( &normal ); //update the normal buffer destNormalBuffer[tid] = normal; destNormalBuffer[tid].w = matIndex; //the density for the pixel float density = rayDir[tid].w; if( matprop.x > -0.5f ) { //load the texture coordinate float2 t1 = texCoordinateBuffer[ triOffset ]; float2 t2 = texCoordinateBuffer[ triOffset + 1 ]; float2 t3 = texCoordinateBuffer[ triOffset + 2 ]; float2 texCoord = interploted.x * t1 + interploted.y * t2 + interploted.z * t3; texCoord = texCoord - floor( texCoord ); if( texCoord.x < 0.0f ) texCoord.x += 1.0f; if( texCoord.y < 0.0f ) texCoord.y += 1.0f; //load the texture float4* imgData = customTexture + textureOffset[(int)matprop.x]; int x = imgData[0].y * texCoord.x ; int y = imgData[0].z * texCoord.y ; int texOffset = y * imgData[0].y + x + 1; diffuse = diffuse * (*(imgData + texOffset)) ; } //initialize the image buffer color = ambient; //shade the pixels for( int i = 0 ; i < 2 ; i++ ) { if( lightBuffer[i].w < 0.01f ) continue; //the light direction float4 lightDir = intersected[tid] - lightBuffer[i]; //check if the point is in the shadow float shadowLen = 0.98f * d_length(lightDir); d_normalize( &lightDir ); //the dot product float dotProduct = d_dot( lightDir , normal ); if( dotProduct > 0.0f ) continue; { float4 shadowFactor = kernelTraverseRay( kdTree , indexMap , offsetIndexBuffer , vertexBuffer , lightBuffer[i] , lightDir , shadowLen ); if( shadowFactor.w >= 0.0f ) continue; } //the light density float lightDensity = d_clamp( -1.0f * dotProduct ) * lightBuffer[i].w; //load the density of current pixel color += diffuse * lightDensity ; //add specular if possible if( specular.w > 0 ) { //reflect direction float4 reflectDir = d_reflect( lightDir , normal ); d_normalize( &reflectDir ); //get the dot product float d = d_clamp(-d_dot( reflectDir , rayDir[tid] )); if( d > 0 ) color += pow( d , specular.w ) * specular; } } int offset = offsetBuffer[tid]; imageBuffer[offset] = d_saturate( imageBuffer[offset] + d_saturate( color * density ) ); } //generate next level rays __global__ void kernelGenerateNextLevelRays( float4* materialInfo , float4* intersected , float4* backNormalBuffer , float4* rayOri , float4* rayDir , int rayNumber , float4* destRayOri , float4* destRayDir , int* markedBuffer ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= rayNumber ) return; //set marked buffer zero markedBuffer[tid] = 0; //load the intersected point float4 intersectedPoint = intersected[tid]; //get the intersected triangle index int triIndex = (int)intersectedPoint.w; if( triIndex < 0 ) return; //load the normal float4 normal = backNormalBuffer[tid]; //get the material index int matIndex = (int)normal.w; //get the material float4 matInfo = materialInfo[4*matIndex+3]; //load the ray direction float4 ori = rayOri[tid]; float4 dir = rayDir[tid]; //if there is reflection , mark result as true if( matInfo.y > 0 ) { float4 reflectDir = d_reflect( dir , normal ); d_normalize( &reflectDir ); reflectDir.w = dir.w * matInfo.y; destRayDir[tid] = reflectDir; destRayOri[tid] = intersectedPoint + reflectDir * 0.1f; destRayOri[tid].w = ori.w; markedBuffer[tid] = 1; }else if( matInfo.z > 0 ) { float4 refractDir = d_refract( dir , normal , 1.0f / matInfo.w ); d_normalize( &refractDir ); refractDir.w = dir.w * matInfo.z; destRayDir[tid] = refractDir; destRayOri[tid] = intersectedPoint + refractDir * 0.02f; destRayOri[tid].w = ori.w; markedBuffer[tid] = 1; } } //copy new rays __global__ void kernelCopyNewRays( float4* srcRayOri , float4* srcRayDir , int* scanResult , int rayNumber , float4* destRayOri , float4* destRayDir , int* offsets ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= rayNumber ) return; //load the offset int offset = scanResult[tid]; if( offset != scanResult[tid+1] ) { //set the result destRayOri[offset] = srcRayOri[tid]; destRayDir[offset] = srcRayDir[tid]; offsets[offset] = (int)srcRayOri[tid].w; } } //Do scan on GPU __global__ void kernelScan( int* data , int number , int oBlockRes , int* blockRes ) { //the shared memory __shared__ int sharedMem[512]; //get the thread id int ltid = threadIdx.x; int gtid = ltid + blockDim.x * blockIdx.x; //the block sum int blocksum = 0; //zero the rest of the memory if( 2 * gtid >= number ) { data[ 2 * gtid ] = 0; data[ 2 * gtid + 1 ] = 0; }else if( 2 * gtid == number - 1 ) data[ 2 * gtid + 1 ] = 0; //Load the data into the shared memory sharedMem[2*ltid] = data[2*gtid]; sharedMem[2*ltid+1] = data[2*gtid+1]; //the offset int offset = 1; for( int d = 256 ; d > 1 ; d >>= 1 ) { //sync the threads in a group __syncthreads(); if( ltid < d ) { int ai = offset * ( 2 * ltid + 1 ) - 1; int bi = ai + offset; sharedMem[bi] += sharedMem[ai]; } offset *= 2; } //the block sum blocksum = sharedMem[511] + sharedMem[255]; //clear the last element if( ltid == 0 ) { sharedMem[511] = sharedMem[255]; sharedMem[255] = 0; } for( int d = 2 ; d < 512 ; d *= 2 ) { __syncthreads(); offset >>= 1; if( ltid < d ) { int ai = offset * ( 2 * ltid + 1 ) - 1 ; int bi = ai + offset ; int t = sharedMem[ai]; sharedMem[ai] = sharedMem[bi]; sharedMem[bi] += t; } } __syncthreads(); data[ 2 * gtid ] = sharedMem[ 2 * ltid ]; data[ 2 * gtid + 1 ] = sharedMem[ 2 * ltid + 1 ]; //Output Block Result if( oBlockRes > 0 ) { if( ltid == 0 ) { //copy the result blockRes[blockIdx.x] = blocksum; } } } //Add the block result to the segmented scan result __global__ void kernelUniformAdd( int* data , int* blockResult ) { //get the thread id int ltid = threadIdx.x; int gtid = ltid + blockDim.x * blockIdx.x; //add the result data[gtid] += blockResult[gtid/512]; } //clear the noise of the image __global__ void kernelClearNoise( float4* imgData , int width , int height , float4* targetData ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= width * height ) return; //threshold float threshold = 0.4f; //the difference int difference = 0; //current index int currentIndex = tid; int leftIndex = tid - 1; int rightIndex = tid + 1; int upIndex = tid - width ; int downIndex = tid + width ; //the coordinate int i = tid % width; int j = tid / width; //current color float4 color = imgData[currentIndex]; float4 sum = make_float4( 0 , 0 , 0 , 0 ); if( i > 0 ) { if( d_length( color - imgData[leftIndex] ) > threshold ) difference++; sum += imgData[leftIndex]; } if( i < width - 1 ) { if( d_length( color - imgData[rightIndex] ) > threshold ) difference++; sum += imgData[rightIndex]; } if( j > 0 ) { if( d_length( color - imgData[upIndex] ) > threshold ) difference++; sum += imgData[upIndex]; } if( j < height - 1 ) { if( d_length( color - imgData[downIndex] ) > threshold ) difference++; sum += imgData[downIndex]; } if( difference >= 2 ) color = sum * 0.25f; targetData[tid] = color; } //////////////////////////////////////////////////////////////////////////////////////////////////// //initialize buffer extern "C" void cudaInitBuffer( float4* buffer , int* markedBuffer , int pixelNum ) { //the block number int threadNum = 256; int blockNum = ( pixelNum + threadNum - 1 ) / threadNum; //call the kenrel hipLaunchKernelGGL(( kernelInitBuffer), dim3(blockNum),dim3(threadNum), 0, 0, buffer , markedBuffer , pixelNum ); } //generate primary ray intersected result extern "C" void cudaGenerateIntersectedPoint( float4* rayOri , float4* rayDir , float4* vertexBuffer , int rayNum , int* index , float4* result ) { //the block number int threadNum = 256; int blockNum = ( rayNum + threadNum - 1 ) / threadNum; //call the kernel hipLaunchKernelGGL(( kernelGenerateIntersectedPoint), dim3(blockNum) , dim3(threadNum), 0, 0, rayOri , rayDir , vertexBuffer , rayNum , index , result ); } //Generate primary rays extern "C" void cudaGeneratePrimaryRays( float4 viewInfo , float* invViewMatrix , float4* rayOri , float4* rayDir ) { //get the number of data int rayNum = (int)( viewInfo.x * viewInfo.y ); //the block number int threadNum = 256; int blockNum = ( rayNum + threadNum - 1 ) / threadNum; //call the kernel hipLaunchKernelGGL(( kernelGeneratePrimaryRays), dim3(blockNum) , dim3(threadNum), 0, 0, viewInfo , invViewMatrix , rayOri , rayDir ); } //get intersected point extern "C" void cudaGetIntersectedPoint( float4* rayOri , float4* rayDir , float4* kdTree , int* indexMap , int* offsetBuffer , float4* vertexBuffer , int rayNumber , float4* result ) { //the block and thread number int threadNum = 256; int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ; //call the kernel hipLaunchKernelGGL(( kernelGetIntersectedPoint), dim3(blockNum) , dim3(threadNum), 0, 0, rayOri , rayDir , kdTree , indexMap , offsetBuffer , vertexBuffer , rayNumber , result ); } //do pixel shader extern "C" void cudaPixelShader( float4* interseced , float4* vertexBuffer , float4* normalBuffer , float2* texCoordinateBuffer , float4* kdTree , int* indexMap , int* offsetIndexBuffer , float4* lightBuffer , int* attributeBuffer , float4* materialBuffer , int* textureOffset , float4* customTexture , int pixelNum , float4* rayDir , int* offsetBuffer , float4* destNormalBuffer , float4* imageBuffer ) { //the block and thread number int threadNum = 256; int blockNum = ( pixelNum + threadNum - 1 ) / threadNum ; //call the kernel hipLaunchKernelGGL(( kernelPixelShader), dim3(blockNum) , dim3(threadNum), 0, 0, interseced , vertexBuffer , normalBuffer , texCoordinateBuffer , kdTree , indexMap , offsetIndexBuffer , lightBuffer , attributeBuffer , materialBuffer , textureOffset , customTexture , pixelNum , rayDir , offsetBuffer , destNormalBuffer , imageBuffer ); } //generate next level rays extern "C" void cudaGenerateNextLevelRays( float4* materialInfo , float4* intersected , float4* backNormalBuffer , float4* rayOri , float4* rayDir , int rayNumber , float4* destRayOri , float4* destRayDir , int* markedBuffer ) { //the block and thread number int threadNum = 256; int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ; //call the kernel hipLaunchKernelGGL(( kernelGenerateNextLevelRays), dim3(blockNum) , dim3(threadNum), 0, 0, materialInfo , intersected , backNormalBuffer , rayOri , rayDir , rayNumber , destRayOri , destRayDir , markedBuffer ); } //do scan on gpu extern "C" void cudaScan( int* data , int num , int level ) { /* //allocate the number of data int* cpuData = new int[num]; //pass the data from gpu to cpu hipMemcpy( cpuData , data , sizeof( int ) * ( num - 1 ) , hipMemcpyDeviceToHost ); int last = 0; for( int i = 0 ; i < num ; i++ ) { int oldLast = last; last += cpuData[i]; cpuData[i] = oldLast; } //pass the data back from cpu to gpu hipMemcpy( data , cpuData , sizeof( int ) * num , hipMemcpyHostToDevice ); //delete the data delete[] cpuData;*/ //the dimension of the kernel dim3 threads( 256 ); dim3 blocks( ( num + 511 ) / 512 ); //call the kernel hipLaunchKernelGGL(( kernelScan), dim3(blocks) , dim3(threads), 0, 0, data , num , 1 , g_ScanSum[level] ); //scan the block Result if( num <= 262144 ) hipLaunchKernelGGL(( kernelScan), dim3(1) , dim3(threads), 0, 0, g_ScanSum[level] , blocks.x , -1 , data ); else cudaScan( g_ScanSum[level] , blocks.x , level + 1 ); //add the offset threads.x = 512; hipLaunchKernelGGL(( kernelUniformAdd), dim3(blocks) , dim3(threads) , 0, 0, data , g_ScanSum[level] ); } //copy new rays extern "C" void cudaCopyNewRays( float4* srcRayOri , float4* srcRayDir , int* scanResult , int rayNumber , float4* destRayOri , float4* destRayDir , int* offsets ) { //the block and thread number int threadNum = 256; int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ; //call the kernel hipLaunchKernelGGL(( kernelCopyNewRays), dim3(blockNum) , dim3(threadNum), 0, 0, srcRayOri , srcRayDir , scanResult , rayNumber , destRayOri , destRayDir , offsets ); } //clear the noise of the image extern "C" void cudaClearNoise( float4* imgData , int width , int height , float4* targetData ) { //the block and thread number int threadNum = 256; int blockNum = ( width * height + 255 ) / 256; //call the kernel hipLaunchKernelGGL(( kernelClearNoise), dim3(blockNum) , dim3(threadNum), 0, 0, imgData , width , height , targetData ); }
0d43fb10c18b944a626d008eac67f9f7574f589a.cu
/* * FileName: RayTracer_Kernel.cu * * Programmer: Jiayin Cao */ //the sum for scan int* g_ScanSum[2]; //some helper functions __device__ void d_normalize( float4* v ) { float s = v->x * v->x + v->y * v->y + v->z * v->z; s = sqrt(s); v->x /= s; v->y /= s; v->z /= s; } //cross product __device__ float4 d_cross( const float4& v1 , const float4& v2 ) { float4 r; r.x = v1.y * v2.z - v1.z * v2.y; r.y = v1.z * v2.x - v1.x * v2.z; r.z = v1.x * v2.y - v1.y * v2.x; r.w = 0.0f; return r; } //clamp the value __device__ float d_clamp( const float v ) { if( v > 1.0f ) return 1.0f; if( v < 0.0f ) return 0.0f; return v; } //clamp the float4 __device__ float4 d_saturate( const float4& v ) { return make_float4( d_clamp( v.x ) , d_clamp( v.y ) , d_clamp( v.z ) , d_clamp( v.w ) ); } //dot product __device__ float d_dot( const float4& v1 , const float4& v2 ) { return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z ; } //the length of the vector __device__ float d_length( const float4& v ) { return sqrt( v.x * v.x + v.y * v.y + v.z * v.z ); } //define some useful operators for float4 __device__ float4 operator+ ( const float4& v1 , const float4& v2 ) { return make_float4( v1.x + v2.x , v1.y + v2.y , v1.z + v2.z , v1.w + v2.w ); } __device__ float4 operator- ( const float4& v1 , const float4& v2 ) { return make_float4( v1.x - v2.x , v1.y - v2.y , v1.z - v2.z , v1.w - v2.w ); } __device__ float4 operator* ( const float4& v , const float d ) { return make_float4( v.x * d , v.y * d , v.z * d , v.w * d ); } __device__ float4 operator* ( const float d , const float4& v ) { return make_float4( v.x * d , v.y * d , v.z * d , v.w * d ); } __device__ float4 operator* ( const float4& v1 , const float4& v2 ) { return make_float4( v1.x * v2.x , v1.y * v2.y , v1.z * v2.z , v1.w * v2.w ); } __device__ float4 operator+= ( float4& v1 , const float4& v2 ) { v1 = v1 + v2; return v1; } __device__ float2 operator * ( const float d , const float2& v ) { return make_float2( d * v.x , d * v.y ); } __device__ float2 operator + ( const float2& v1 , const float2& v2 ) { return make_float2( v1.x + v2.x , v1.y + v2.y ); } __device__ float2 operator - ( const float2& v1 , const float2& v2 ) { return make_float2( v1.x - v2.x , v1.y - v2.y ); } __device__ float2 floor( const float2& v ) { int x = (int) v.x ; int y = (int) v.y ; return make_float2( x , y ); } //reflect direction __device__ float4 d_reflect( const float4& dir , const float4& normal ) { float dotProduct = ( -2.0f ) * d_dot( dir , normal ); float4 r = dir + dotProduct * normal; return make_float4( r.x , r.y , r.z , 0.0f ); } //refraction direction __device__ float4 d_refract( const float4& dir , float4 normal , float rate ) { float4 r; if( d_dot( dir , normal ) > 0 ) { normal = -1.0f * normal; rate = 1.0f / rate; } float cos = -1.0f * d_dot( dir , normal ); float t = 1 - rate * rate * ( 1 - cos * cos ); if( t < 0 ) { r = d_reflect( dir , normal ); }else { float cos2 = sqrt( t ); r = rate * dir + ( rate * cos - cos2 ) * normal ; } return r; } //check if the ray intersects with bounding box __device__ float4 kernelIntersectBoundingBox( float4& ori , float4& dir , float4& min , float4& max , float length ) { //the result float4 result = make_float4( 0.0f , 9999999.0f , 0.0f , 0.0f ); //limit the maxium value if( length > 0 ) result.y = length; //the variables float t1 , t2; if( fabs( dir.x ) < 0.0000001f ) { if( ori.x > max.x || ori.x < min.x ) return result; }else { t1 = ( max.x - ori.x ) / dir.x; t2 = ( min.x - ori.x ) / dir.x; if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; } //clamp if( t1 > result.x ) result.x = t1; if( t2 < result.y ) result.y = t2; if( result.x > result.y ) return result; } if( fabs( dir.y ) < 0.0000001f ) { if( ori.y > max.y || ori.y < min.y ) return result; }else { t1 = ( max.y - ori.y ) / dir.y; t2 = ( min.y - ori.y ) / dir.y; if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; } //clamp if( t1 > result.x ) result.x = t1; if( t2 < result.y ) result.y = t2; if( result.x > result.y ) return result; } if( fabs( dir.y ) < 0.0000001f ) { if( ori.z > max.z || ori.z < min.z ) return result; }else { t1 = ( max.z - ori.z ) / dir.z; t2 = ( min.z - ori.z ) / dir.z; if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; } //clamp if( t1 > result.x ) result.x = t1; if( t2 < result.y ) result.y = t2; if( result.x > result.y ) return result; } //enable the intersected point result.z = 1.0f; return result; } //check if the ray intersects with a plane __device__ float4 kernelIntersectPlane( const float4& v1 , const float4& v2 , const float4& v3 , const float4& ori , const float4& dir ) { //w : >= 0 ( intersected point enable ) , < 0 ( disable ) float4 result = make_float4( 0.0f , 0.0f , 0.0f , 0.0f ); //get the normal of the plane float4 normal = d_cross( v2 - v1 , v3 - v1 ); //get the factor float t = d_dot( normal , ori - v1 ) / d_dot( normal , dir ); //set the result result = ori - t * dir; if( t <= 0.0f ) result.w = -t; else result.w = -1; return result; } //check if the ray intersects with a triangle __device__ float4 kernelIntersectTriangle( const float4& v1 , const float4& v2 , const float4& v3 , const float4& ori , const float4& dir ) { //the result float4 result = kernelIntersectPlane( v1 , v2 , v3 , ori , dir ); if( result.w < 0 ) return result; //get the factor float4 d1 = d_cross( result - v2 , v1 - v2 ); float4 d2 = d_cross( result - v3 , v2 - v3 ); float4 d3 = d_cross( result - v1 , v3 - v1 ); float f1 = d_dot( d1 , d2 ); float f2 = d_dot( d2 , d3 ); if( !( f1 >= -0.000000000000001f && f2 >= -0.000000000000001f ) ) result.w = -1.0f; return result; } //check if the current point is in the bounding box __device__ int kernelPointInBoundingBox( const float4& p , const float4& min , const float4& max ) { float threshold = 0.00001f; if( p.x < min.x - threshold || p.y < min.y - threshold || p.z < min.z - threshold || p.x > max.x + threshold || p.y > max.y + threshold || p.z > max.z + threshold ) return false; return true; } //do interplotation __device__ float4 kernelInterploted( const float4& v1 , const float4& v2 , const float4& v3 , const float4& intersected ) { //get the vectors float4 e1 = intersected - v1; float4 e2 = intersected - v2; float4 e3 = intersected - v3; //compute the areas float4 area; area.x = d_length( d_cross( e2 , e3 ) ); area.y = d_length( d_cross( e3 , e1 ) ); area.z = d_length( d_cross( e1 , e2 ) ); float d = 1.0f / ( area.x + area.y + area.z ); return area * d; } //clear and initialize buffer __global__ void kernelInitBuffer( float4* buffer , int* markedBuffer , int pixelNum ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= pixelNum ) return; buffer[tid] = make_float4( 0.0f , 0.0f , 0.0f , 0.0f ); markedBuffer[tid] = tid; } //generate primary ray intersected result __global__ void kernelGenerateIntersectedPoint( float4* rayOri , float4* rayDir , float4* vertexBuffer , int rayNum , int* index , float4* result ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= rayNum ) return; //Load the vertex int triId = index[tid]; //get the vertex int id = 3 * triId; float4 v1 = vertexBuffer[id]; float4 v2 = vertexBuffer[id+1]; float4 v3 = vertexBuffer[id+2]; //ray ori and dir float4 ori = rayOri[tid]; float4 dir = rayDir[tid]; //get the intersected result result[tid] = kernelIntersectPlane( v1 , v2 , v3 , ori , dir ); result[tid].w = triId; } //Generate primary rays __global__ void kernelGeneratePrimaryRays( float4 viewInfo , float* invViewMatrix , float4* rayOri , float4* rayDir ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= (int)viewInfo.x * (int)viewInfo.y ) return; // get the pixel coorindate first uint2 coord; coord.x = tid % (int) viewInfo.x; coord.y = tid / (int)viewInfo.x; // compute the vector of the ray in screen space float2 v; v.x = ( ( ( 2.0f * coord.x ) / viewInfo.x ) - 1.0f ) / viewInfo.z; v.y = -1.0f * ( ( ( 2.0f * coord.y ) / viewInfo.y ) - 1.0f ) / viewInfo.w; //copy the original point of the rays rayOri[tid] = make_float4( invViewMatrix[12] , invViewMatrix[13] , invViewMatrix[14] , tid ); //compute the direction of the ray float4 dir; dir.x = ( v.x * invViewMatrix[0] + v.y * invViewMatrix[4] + invViewMatrix[8] ); dir.y = ( v.x * invViewMatrix[1] + v.y * invViewMatrix[5] + invViewMatrix[9] ); dir.z = ( v.x * invViewMatrix[2] + v.y * invViewMatrix[6] + invViewMatrix[10] ); dir.w = 0.0f; d_normalize( &dir ); rayDir[tid] = make_float4( dir.x , dir.y , dir.z , 1.0f ); } //traverse the ray through kd-tree __device__ float4 kernelTraverseRay( float4* kdTree , int* indexMap , int* offsetBuffer , float4* vertexBuffer , float4& rayOri , float4& rayDir , float length ) { //the intersected result float4 result = make_float4( 0.0f , 0.0f , 0.0f , -1.0f ); //tree node information float4 header; float4 splitInfo; //the bounding box float4 minBB = kdTree[2]; float4 maxBB = kdTree[3]; //check if the ray intersects with the current bounding box of the root result = kernelIntersectBoundingBox( rayOri , rayDir , minBB , maxBB , length ); //if the ray doesn't cross the kd-tree , just return if( result.z < 0.5f ) { result = make_float4( 0.0f , 0.0f , 0.0f , -1.0f ); return result; } //current traversing node int currentNodeIndex = 0; //the mask to mark the traversed node unsigned int mask = 0; //current traverse depth int currentTraverseDepth = 0; //current inPonit when traversing the node float4 inPoint = rayOri + result.x * rayDir ; while( currentTraverseDepth >= 0 ) { //traverse the current node do { //the current node offset int currentNodeOffset = currentNodeIndex * 4; //get the current node information header = kdTree[ currentNodeOffset ]; splitInfo = kdTree[currentNodeOffset + 1 ]; //check if it's a leaf node if( splitInfo.x < 0 ) break; //get the split axis int splitAxis = (int) splitInfo.x; //get the pointer of the inPoint float sPos = 0.0f; if( splitAxis == 0 ) sPos = inPoint.x; else if( splitAxis == 1 ) sPos = inPoint.y; else if( splitAxis == 2 ) sPos = inPoint.z; //update the virtual stack and traverse the node if( splitInfo.y > sPos ) currentNodeIndex = (int)header.y; else currentNodeIndex = (int)header.z; //increase the current traverse depth currentTraverseDepth++; }while( true ); //get the offset and triangle number int triOffset = offsetBuffer[currentNodeIndex]; int triNumber = (int)header.w; //min value float minFactor = 9999999.0f; if( length > 0 ) minFactor = length; //triangle index int oriTriIndex = -1; //the bounding box minBB = kdTree[currentNodeIndex*4+2]; maxBB = kdTree[currentNodeIndex*4+3]; //intersect with the current triangles for( int i = 0 ; i < triNumber ; i++ ) { //get the triangles int triIndex = indexMap[triOffset+i]; //get the vertex float4 v1 = vertexBuffer[3*triIndex]; float4 v2 = vertexBuffer[3*triIndex+1]; float4 v3 = vertexBuffer[3*triIndex+2]; //get the intersected point result = kernelIntersectTriangle( v1 , v2 , v3 , rayOri , rayDir ); //limit the factor if( result.w > 0.0f && result.w < minFactor ) { if( kernelPointInBoundingBox( result , minBB , maxBB ) ) { minFactor = result.w; oriTriIndex = triIndex; if( length > 0 ) break; } } } if( oriTriIndex >= 0 ) { result = rayOri + minFactor * rayDir; result.w = (float)oriTriIndex; return result; } //back track here while( currentTraverseDepth >= 0 ) { if( currentTraverseDepth == 0 ) return make_float4( 0 , 0 , 0 , -1.0f ); //get the current mask if( mask & ( 0x00000001 << currentTraverseDepth ) ) { //update the mask mask &= ~(0x00000001 << currentTraverseDepth ); //decrease the current depth; currentTraverseDepth--; //get to the father node currentNodeIndex = (int)kdTree[ 4 * currentNodeIndex ].x; //continue to next level continue; } //check the other node int otherNode = currentNodeIndex + 1; if( currentNodeIndex % 2 == 0 ) otherNode -= 2; //get the bounding box of the other node int otherNodeOffset = 4 * otherNode; minBB = kdTree[ otherNodeOffset + 2 ]; maxBB = kdTree[ otherNodeOffset + 3 ]; //get the intersected result float4 bi = kernelIntersectBoundingBox( rayOri , rayDir , minBB , maxBB , length ); if( bi.z > 0.5f ) { //update the current traverse node currentNodeIndex = otherNode; //update the inPoint inPoint = rayOri + bi.x * rayDir ; //update the mask mask |= 0x00000001 << currentTraverseDepth; break; }else { //update the mask mask &= ~( 0x00000001 << currentTraverseDepth ); //decrease current depth currentTraverseDepth--; //get to the father node currentNodeIndex = (int) kdTree[ 4 * currentNodeIndex ].x; } } } result.w = -1.0f; return result; } //get the interseced point __global__ void kernelGetIntersectedPoint( float4* rayOri , float4* rayDir , float4* kdTree , int* indexMap , int* offsetBuffer , float4* vertexBuffer , int rayNumber , float4* result ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= rayNumber ) return; //get the triangle result[tid] = kernelTraverseRay( kdTree , indexMap , offsetBuffer , vertexBuffer , rayOri[tid] , rayDir[tid] , -1.0f ); } //do pixel shader here __global__ void kernelPixelShader( float4* intersected , float4* vertexBuffer , float4* normalBuffer , float2* texCoordinateBuffer , float4* kdTree , int* indexMap , int* offsetIndexBuffer, float4* lightBuffer , int* attributeBuffer , float4* materialBuffer , int* textureOffset , float4* customTexture , int pixelNum , float4* rayDir , int* offsetBuffer , float4* destNormalBuffer , float4* imageBuffer ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= pixelNum ) return; //get the triangle index int triIndex = (int)intersected[tid].w; int triOffset = 3 * triIndex; float4 color = make_float4( 0.0f , 0.0f , 0.0f , 0.0f ); //load the density of the pixel if( triIndex < 0 ) return; //get the material index int matIndex = attributeBuffer[triIndex]; //the material buffer float4 ambient = materialBuffer[ 4 * matIndex ]; float4 diffuse = materialBuffer[ 4 * matIndex + 1 ]; float4 specular = materialBuffer[ 4 * matIndex + 2 ]; float4 matprop = materialBuffer[ 4 * matIndex + 3 ]; //load the vertex float4 v1 = vertexBuffer[ triOffset ]; float4 v2 = vertexBuffer[ triOffset + 1 ]; float4 v3 = vertexBuffer[ triOffset + 2 ]; //get the interploted float4 interploted = kernelInterploted( v1 , v2 , v3 , intersected[tid] ); //get the normal float4 n1 = normalBuffer[ triOffset ]; float4 n2 = normalBuffer[ triOffset + 1 ]; float4 n3 = normalBuffer[ triOffset + 2 ]; float4 normal = n1 * interploted.x + n2 * interploted.y + n3 * interploted.z; d_normalize( &normal ); //update the normal buffer destNormalBuffer[tid] = normal; destNormalBuffer[tid].w = matIndex; //the density for the pixel float density = rayDir[tid].w; if( matprop.x > -0.5f ) { //load the texture coordinate float2 t1 = texCoordinateBuffer[ triOffset ]; float2 t2 = texCoordinateBuffer[ triOffset + 1 ]; float2 t3 = texCoordinateBuffer[ triOffset + 2 ]; float2 texCoord = interploted.x * t1 + interploted.y * t2 + interploted.z * t3; texCoord = texCoord - floor( texCoord ); if( texCoord.x < 0.0f ) texCoord.x += 1.0f; if( texCoord.y < 0.0f ) texCoord.y += 1.0f; //load the texture float4* imgData = customTexture + textureOffset[(int)matprop.x]; int x = imgData[0].y * texCoord.x ; int y = imgData[0].z * texCoord.y ; int texOffset = y * imgData[0].y + x + 1; diffuse = diffuse * (*(imgData + texOffset)) ; } //initialize the image buffer color = ambient; //shade the pixels for( int i = 0 ; i < 2 ; i++ ) { if( lightBuffer[i].w < 0.01f ) continue; //the light direction float4 lightDir = intersected[tid] - lightBuffer[i]; //check if the point is in the shadow float shadowLen = 0.98f * d_length(lightDir); d_normalize( &lightDir ); //the dot product float dotProduct = d_dot( lightDir , normal ); if( dotProduct > 0.0f ) continue; { float4 shadowFactor = kernelTraverseRay( kdTree , indexMap , offsetIndexBuffer , vertexBuffer , lightBuffer[i] , lightDir , shadowLen ); if( shadowFactor.w >= 0.0f ) continue; } //the light density float lightDensity = d_clamp( -1.0f * dotProduct ) * lightBuffer[i].w; //load the density of current pixel color += diffuse * lightDensity ; //add specular if possible if( specular.w > 0 ) { //reflect direction float4 reflectDir = d_reflect( lightDir , normal ); d_normalize( &reflectDir ); //get the dot product float d = d_clamp(-d_dot( reflectDir , rayDir[tid] )); if( d > 0 ) color += pow( d , specular.w ) * specular; } } int offset = offsetBuffer[tid]; imageBuffer[offset] = d_saturate( imageBuffer[offset] + d_saturate( color * density ) ); } //generate next level rays __global__ void kernelGenerateNextLevelRays( float4* materialInfo , float4* intersected , float4* backNormalBuffer , float4* rayOri , float4* rayDir , int rayNumber , float4* destRayOri , float4* destRayDir , int* markedBuffer ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= rayNumber ) return; //set marked buffer zero markedBuffer[tid] = 0; //load the intersected point float4 intersectedPoint = intersected[tid]; //get the intersected triangle index int triIndex = (int)intersectedPoint.w; if( triIndex < 0 ) return; //load the normal float4 normal = backNormalBuffer[tid]; //get the material index int matIndex = (int)normal.w; //get the material float4 matInfo = materialInfo[4*matIndex+3]; //load the ray direction float4 ori = rayOri[tid]; float4 dir = rayDir[tid]; //if there is reflection , mark result as true if( matInfo.y > 0 ) { float4 reflectDir = d_reflect( dir , normal ); d_normalize( &reflectDir ); reflectDir.w = dir.w * matInfo.y; destRayDir[tid] = reflectDir; destRayOri[tid] = intersectedPoint + reflectDir * 0.1f; destRayOri[tid].w = ori.w; markedBuffer[tid] = 1; }else if( matInfo.z > 0 ) { float4 refractDir = d_refract( dir , normal , 1.0f / matInfo.w ); d_normalize( &refractDir ); refractDir.w = dir.w * matInfo.z; destRayDir[tid] = refractDir; destRayOri[tid] = intersectedPoint + refractDir * 0.02f; destRayOri[tid].w = ori.w; markedBuffer[tid] = 1; } } //copy new rays __global__ void kernelCopyNewRays( float4* srcRayOri , float4* srcRayDir , int* scanResult , int rayNumber , float4* destRayOri , float4* destRayDir , int* offsets ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= rayNumber ) return; //load the offset int offset = scanResult[tid]; if( offset != scanResult[tid+1] ) { //set the result destRayOri[offset] = srcRayOri[tid]; destRayDir[offset] = srcRayDir[tid]; offsets[offset] = (int)srcRayOri[tid].w; } } //Do scan on GPU __global__ void kernelScan( int* data , int number , int oBlockRes , int* blockRes ) { //the shared memory __shared__ int sharedMem[512]; //get the thread id int ltid = threadIdx.x; int gtid = ltid + blockDim.x * blockIdx.x; //the block sum int blocksum = 0; //zero the rest of the memory if( 2 * gtid >= number ) { data[ 2 * gtid ] = 0; data[ 2 * gtid + 1 ] = 0; }else if( 2 * gtid == number - 1 ) data[ 2 * gtid + 1 ] = 0; //Load the data into the shared memory sharedMem[2*ltid] = data[2*gtid]; sharedMem[2*ltid+1] = data[2*gtid+1]; //the offset int offset = 1; for( int d = 256 ; d > 1 ; d >>= 1 ) { //sync the threads in a group __syncthreads(); if( ltid < d ) { int ai = offset * ( 2 * ltid + 1 ) - 1; int bi = ai + offset; sharedMem[bi] += sharedMem[ai]; } offset *= 2; } //the block sum blocksum = sharedMem[511] + sharedMem[255]; //clear the last element if( ltid == 0 ) { sharedMem[511] = sharedMem[255]; sharedMem[255] = 0; } for( int d = 2 ; d < 512 ; d *= 2 ) { __syncthreads(); offset >>= 1; if( ltid < d ) { int ai = offset * ( 2 * ltid + 1 ) - 1 ; int bi = ai + offset ; int t = sharedMem[ai]; sharedMem[ai] = sharedMem[bi]; sharedMem[bi] += t; } } __syncthreads(); data[ 2 * gtid ] = sharedMem[ 2 * ltid ]; data[ 2 * gtid + 1 ] = sharedMem[ 2 * ltid + 1 ]; //Output Block Result if( oBlockRes > 0 ) { if( ltid == 0 ) { //copy the result blockRes[blockIdx.x] = blocksum; } } } //Add the block result to the segmented scan result __global__ void kernelUniformAdd( int* data , int* blockResult ) { //get the thread id int ltid = threadIdx.x; int gtid = ltid + blockDim.x * blockIdx.x; //add the result data[gtid] += blockResult[gtid/512]; } //clear the noise of the image __global__ void kernelClearNoise( float4* imgData , int width , int height , float4* targetData ) { //get the thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; //limit the thread id if( tid >= width * height ) return; //threshold float threshold = 0.4f; //the difference int difference = 0; //current index int currentIndex = tid; int leftIndex = tid - 1; int rightIndex = tid + 1; int upIndex = tid - width ; int downIndex = tid + width ; //the coordinate int i = tid % width; int j = tid / width; //current color float4 color = imgData[currentIndex]; float4 sum = make_float4( 0 , 0 , 0 , 0 ); if( i > 0 ) { if( d_length( color - imgData[leftIndex] ) > threshold ) difference++; sum += imgData[leftIndex]; } if( i < width - 1 ) { if( d_length( color - imgData[rightIndex] ) > threshold ) difference++; sum += imgData[rightIndex]; } if( j > 0 ) { if( d_length( color - imgData[upIndex] ) > threshold ) difference++; sum += imgData[upIndex]; } if( j < height - 1 ) { if( d_length( color - imgData[downIndex] ) > threshold ) difference++; sum += imgData[downIndex]; } if( difference >= 2 ) color = sum * 0.25f; targetData[tid] = color; } //////////////////////////////////////////////////////////////////////////////////////////////////// //initialize buffer extern "C" void cudaInitBuffer( float4* buffer , int* markedBuffer , int pixelNum ) { //the block number int threadNum = 256; int blockNum = ( pixelNum + threadNum - 1 ) / threadNum; //call the kenrel kernelInitBuffer<<<blockNum,threadNum>>>( buffer , markedBuffer , pixelNum ); } //generate primary ray intersected result extern "C" void cudaGenerateIntersectedPoint( float4* rayOri , float4* rayDir , float4* vertexBuffer , int rayNum , int* index , float4* result ) { //the block number int threadNum = 256; int blockNum = ( rayNum + threadNum - 1 ) / threadNum; //call the kernel kernelGenerateIntersectedPoint<<<blockNum , threadNum>>>( rayOri , rayDir , vertexBuffer , rayNum , index , result ); } //Generate primary rays extern "C" void cudaGeneratePrimaryRays( float4 viewInfo , float* invViewMatrix , float4* rayOri , float4* rayDir ) { //get the number of data int rayNum = (int)( viewInfo.x * viewInfo.y ); //the block number int threadNum = 256; int blockNum = ( rayNum + threadNum - 1 ) / threadNum; //call the kernel kernelGeneratePrimaryRays<<<blockNum , threadNum>>>( viewInfo , invViewMatrix , rayOri , rayDir ); } //get intersected point extern "C" void cudaGetIntersectedPoint( float4* rayOri , float4* rayDir , float4* kdTree , int* indexMap , int* offsetBuffer , float4* vertexBuffer , int rayNumber , float4* result ) { //the block and thread number int threadNum = 256; int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ; //call the kernel kernelGetIntersectedPoint<<<blockNum , threadNum>>>( rayOri , rayDir , kdTree , indexMap , offsetBuffer , vertexBuffer , rayNumber , result ); } //do pixel shader extern "C" void cudaPixelShader( float4* interseced , float4* vertexBuffer , float4* normalBuffer , float2* texCoordinateBuffer , float4* kdTree , int* indexMap , int* offsetIndexBuffer , float4* lightBuffer , int* attributeBuffer , float4* materialBuffer , int* textureOffset , float4* customTexture , int pixelNum , float4* rayDir , int* offsetBuffer , float4* destNormalBuffer , float4* imageBuffer ) { //the block and thread number int threadNum = 256; int blockNum = ( pixelNum + threadNum - 1 ) / threadNum ; //call the kernel kernelPixelShader<<<blockNum , threadNum>>>( interseced , vertexBuffer , normalBuffer , texCoordinateBuffer , kdTree , indexMap , offsetIndexBuffer , lightBuffer , attributeBuffer , materialBuffer , textureOffset , customTexture , pixelNum , rayDir , offsetBuffer , destNormalBuffer , imageBuffer ); } //generate next level rays extern "C" void cudaGenerateNextLevelRays( float4* materialInfo , float4* intersected , float4* backNormalBuffer , float4* rayOri , float4* rayDir , int rayNumber , float4* destRayOri , float4* destRayDir , int* markedBuffer ) { //the block and thread number int threadNum = 256; int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ; //call the kernel kernelGenerateNextLevelRays<<<blockNum , threadNum>>>( materialInfo , intersected , backNormalBuffer , rayOri , rayDir , rayNumber , destRayOri , destRayDir , markedBuffer ); } //do scan on gpu extern "C" void cudaScan( int* data , int num , int level ) { /* //allocate the number of data int* cpuData = new int[num]; //pass the data from gpu to cpu cudaMemcpy( cpuData , data , sizeof( int ) * ( num - 1 ) , cudaMemcpyDeviceToHost ); int last = 0; for( int i = 0 ; i < num ; i++ ) { int oldLast = last; last += cpuData[i]; cpuData[i] = oldLast; } //pass the data back from cpu to gpu cudaMemcpy( data , cpuData , sizeof( int ) * num , cudaMemcpyHostToDevice ); //delete the data delete[] cpuData;*/ //the dimension of the kernel dim3 threads( 256 ); dim3 blocks( ( num + 511 ) / 512 ); //call the kernel kernelScan<<<blocks , threads>>>( data , num , 1 , g_ScanSum[level] ); //scan the block Result if( num <= 262144 ) kernelScan<<<1 , threads>>>( g_ScanSum[level] , blocks.x , -1 , data ); else cudaScan( g_ScanSum[level] , blocks.x , level + 1 ); //add the offset threads.x = 512; kernelUniformAdd<<< blocks , threads >>> ( data , g_ScanSum[level] ); } //copy new rays extern "C" void cudaCopyNewRays( float4* srcRayOri , float4* srcRayDir , int* scanResult , int rayNumber , float4* destRayOri , float4* destRayDir , int* offsets ) { //the block and thread number int threadNum = 256; int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ; //call the kernel kernelCopyNewRays<<<blockNum , threadNum>>>( srcRayOri , srcRayDir , scanResult , rayNumber , destRayOri , destRayDir , offsets ); } //clear the noise of the image extern "C" void cudaClearNoise( float4* imgData , int width , int height , float4* targetData ) { //the block and thread number int threadNum = 256; int blockNum = ( width * height + 255 ) / 256; //call the kernel kernelClearNoise<<<blockNum , threadNum>>>( imgData , width , height , targetData ); }
c7f1d9706927ebad36e5627637997ac7a1484b53.hip
// !!! This is a file automatically generated by hipify!!! #include "MdiscrKernels.cu.h" #include "HelpersHost.cu.h" #include <thrust/scan.h> #include <thrust/execution_policy.h> #include <hip/hip_runtime.h> __global__ void copyKernel(int* in_array, int* out_array, unsigned int d_size ) { const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < d_size) { out_array[gid] = in_array[gid]; } } int main(int argc, char** argv) { if (argc != 2) { printf("The program takes <num_elems> as argument!\n"); return EXIT_FAILURE; } const unsigned int num_elems = strtoul(argv[1], NULL, 10); typedef Mod4 DISCR; // Allocate memory. typename DISCR::InType* h_in = (typename DISCR::InType*) malloc(num_elems * sizeof(typename DISCR::InType)); typename DISCR::InType* h_out = (typename DISCR::InType*) malloc(num_elems * sizeof(typename DISCR::InType)); { // Initialize array. std::srand(time(NULL)); for(unsigned int i = 0; i < num_elems; i++) { h_in[i] = std::rand() % 20; } } printIntArray(num_elems, "h_in", h_in); int *d_in, *d_out; { // Device allocation. hipMalloc((void**)&d_in, num_elems * sizeof(int)); hipMalloc((void**)&d_out, num_elems * sizeof(int)); // Copy host memory to device. hipMemcpy(d_in, h_in, num_elems * sizeof(int), hipMemcpyHostToDevice); hipDeviceSynchronize(); } // Kernels etc. unsigned int block_size = getBlockSize(num_elems); unsigned int num_blocks = getNumBlocks(num_elems, block_size); int *classes, *indices; typename DISCR::TupleType *columns, *scan_results; typename DISCR::TupleType reduction, offsets; // Allocate memory for the intermediate results. hipMalloc((void**)&classes, num_elems*sizeof(int)); hipMalloc((void**)&indices, num_elems*sizeof(int)); hipMalloc((void**)&columns, num_elems*sizeof(typename DISCR::TupleType)); hipMalloc((void**)&scan_results, num_elems*sizeof(typename DISCR::TupleType)); hipLaunchKernelGGL(( discrKernel<DISCR>), dim3(num_blocks), dim3(block_size), 0, 0, d_in, classes, num_elems); hipDeviceSynchronize(); hipLaunchKernelGGL(( tupleKernel<typename DISCR::TupleType>), dim3(num_blocks), dim3(block_size), 0, 0, classes, columns, num_elems); hipDeviceSynchronize(); thrust::inclusive_scan(thrust::device, columns, columns + num_elems, scan_results); hipDeviceSynchronize(); //thrust::inclusive_scan(thrust::device, d_in, d_in + num_elems, d_out); hipMemcpy(&reduction, &scan_results[num_elems-1], sizeof(typename DISCR::TupleType), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // "Exclusive scan" of the reduction tuple to produce the offsets. unsigned int tmp = 0; for(int k = 0; k < DISCR::TupleType::cardinal; k++) { offsets[k] = tmp; tmp += reduction[k]; } hipLaunchKernelGGL(( indicesKernel<typename DISCR::TupleType>), dim3(num_blocks), dim3(block_size), 0, 0, classes, scan_results, offsets, indices, num_elems); hipDeviceSynchronize(); hipLaunchKernelGGL(( permuteKernel<typename DISCR::InType>), dim3(num_blocks), dim3(block_size), 0, 0, d_in, indices, d_out, num_elems); hipDeviceSynchronize(); hipMemcpy(h_out, d_out, num_elems * sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(classes); hipFree(indices); hipFree(columns); hipFree(scan_results); hipFree(d_in); hipFree(d_out); printIntArray(num_elems, "h_out", h_out); // Validate? free(h_in); free(h_out); return EXIT_SUCCESS; }
c7f1d9706927ebad36e5627637997ac7a1484b53.cu
#include "MdiscrKernels.cu.h" #include "HelpersHost.cu.h" #include <thrust/scan.h> #include <thrust/execution_policy.h> #include <cuda_runtime.h> __global__ void copyKernel(int* in_array, int* out_array, unsigned int d_size ) { const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < d_size) { out_array[gid] = in_array[gid]; } } int main(int argc, char** argv) { if (argc != 2) { printf("The program takes <num_elems> as argument!\n"); return EXIT_FAILURE; } const unsigned int num_elems = strtoul(argv[1], NULL, 10); typedef Mod4 DISCR; // Allocate memory. typename DISCR::InType* h_in = (typename DISCR::InType*) malloc(num_elems * sizeof(typename DISCR::InType)); typename DISCR::InType* h_out = (typename DISCR::InType*) malloc(num_elems * sizeof(typename DISCR::InType)); { // Initialize array. std::srand(time(NULL)); for(unsigned int i = 0; i < num_elems; i++) { h_in[i] = std::rand() % 20; } } printIntArray(num_elems, "h_in", h_in); int *d_in, *d_out; { // Device allocation. cudaMalloc((void**)&d_in, num_elems * sizeof(int)); cudaMalloc((void**)&d_out, num_elems * sizeof(int)); // Copy host memory to device. cudaMemcpy(d_in, h_in, num_elems * sizeof(int), cudaMemcpyHostToDevice); cudaThreadSynchronize(); } // Kernels etc. unsigned int block_size = getBlockSize(num_elems); unsigned int num_blocks = getNumBlocks(num_elems, block_size); int *classes, *indices; typename DISCR::TupleType *columns, *scan_results; typename DISCR::TupleType reduction, offsets; // Allocate memory for the intermediate results. cudaMalloc((void**)&classes, num_elems*sizeof(int)); cudaMalloc((void**)&indices, num_elems*sizeof(int)); cudaMalloc((void**)&columns, num_elems*sizeof(typename DISCR::TupleType)); cudaMalloc((void**)&scan_results, num_elems*sizeof(typename DISCR::TupleType)); discrKernel<DISCR><<<num_blocks, block_size>>>(d_in, classes, num_elems); cudaThreadSynchronize(); tupleKernel<typename DISCR::TupleType><<<num_blocks, block_size>>> (classes, columns, num_elems); cudaThreadSynchronize(); thrust::inclusive_scan(thrust::device, columns, columns + num_elems, scan_results); cudaThreadSynchronize(); //thrust::inclusive_scan(thrust::device, d_in, d_in + num_elems, d_out); cudaMemcpy(&reduction, &scan_results[num_elems-1], sizeof(typename DISCR::TupleType), cudaMemcpyDeviceToHost); cudaThreadSynchronize(); // "Exclusive scan" of the reduction tuple to produce the offsets. unsigned int tmp = 0; for(int k = 0; k < DISCR::TupleType::cardinal; k++) { offsets[k] = tmp; tmp += reduction[k]; } indicesKernel<typename DISCR::TupleType><<<num_blocks, block_size>>> (classes, scan_results, offsets, indices, num_elems); cudaThreadSynchronize(); permuteKernel<typename DISCR::InType><<<num_blocks, block_size>>> (d_in, indices, d_out, num_elems); cudaThreadSynchronize(); cudaMemcpy(h_out, d_out, num_elems * sizeof(int), cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaFree(classes); cudaFree(indices); cudaFree(columns); cudaFree(scan_results); cudaFree(d_in); cudaFree(d_out); printIntArray(num_elems, "h_out", h_out); // Validate? free(h_in); free(h_out); return EXIT_SUCCESS; }
sigmoid_cross_entropy_loss_op.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "caffe2/core/context_gpu.h" #include "sigmoid_cross_entropy_loss_op.h" namespace caffe2 { namespace { __global__ void ElementwiseMaxKernel(const int n, float* data, const float a) { CUDA_1D_KERNEL_LOOP(index, n) { data[index] = (data[index] > a) ? data[index] : a; } } __global__ void SigmoidCrossEntropyLossKernel( const int n, const float* logits, const int* targets, float* losses, float* counts) { CUDA_1D_KERNEL_LOOP(index, n) { if (targets[index] == -1) { losses[index] = 0.; counts[index] = 0.; } else { losses[index] = -1. * logits[index] * (targets[index] - (logits[index] >= 0)) + logf( 1 + expf(logits[index] - 2 * logits[index] * (logits[index] >= 0))); counts[index] = 1.; } } } __global__ void SigmoidCrossEntropyLossGradientKernel( const int n, const float* logits, const int* targets, float* d_logits, float* counts) { CUDA_1D_KERNEL_LOOP(index, n) { if (targets[index] == -1) { d_logits[index] = 0.; counts[index] = 0.; } else { d_logits[index] = 1. / (1. + expf(-logits[index])) - targets[index]; counts[index] = 1.; } } } } // namespace template <> bool SigmoidCrossEntropyLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& T = Input(1); CAFFE_ENFORCE( X.size() == T.size(), "Logit and target must have the same size", "(", X.size(), " vs. ", T.size(), ")"); auto* avg_loss = Output(0, vector<int64_t>(), at::dtype<float>()); counts_.ResizeLike(X); losses_.ResizeLike(X); ReinitializeTensor(&normalizer_, vector<int64_t>(), at::dtype<float>().device(CUDA)); hipLaunchKernelGGL(( SigmoidCrossEntropyLossKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), T.data<int>(), losses_.mutable_data<float>(), counts_.mutable_data<float>()); float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); if (normalize_) { float* normalizer_data = normalizer_.mutable_data<float>(); math::Sum<float, CUDAContext>( counts_.size(), counts_.data<float>(), normalizer_data, &context_); // Prevent division by zero is all counts are zero hipLaunchKernelGGL(( ElementwiseMaxKernel), dim3(CAFFE_GET_BLOCKS(normalizer_.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), normalizer_.size(), normalizer_data, 1e-5); math::Div<float, CUDAContext>( 1, avg_loss_data, normalizer_data, avg_loss_data, &context_); } math::Scale<float, float, CUDAContext>( 1, scale_, avg_loss_data, avg_loss_data, &context_); return true; } template <> bool SigmoidCrossEntropyLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& T = Input(1); auto& d_avg_loss = Input(2); auto* dX = Output(0); dX->ResizeLike(X); counts_.ResizeLike(X); ReinitializeTensor(&normalizer_, vector<int64_t>(), at::dtype<float>().device(CUDA)); hipLaunchKernelGGL(( SigmoidCrossEntropyLossGradientKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), T.data<int>(), dX->mutable_data<float>(), counts_.mutable_data<float>()); if (normalize_) { float* normalizer_data = normalizer_.mutable_data<float>(); math::Sum<float, CUDAContext>( counts_.size(), counts_.data<float>(), normalizer_data, &context_); // Prevent division by zero is all counts are zero hipLaunchKernelGGL(( ElementwiseMaxKernel), dim3(CAFFE_GET_BLOCKS(normalizer_.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), normalizer_.size(), normalizer_data, 1e-5); math::Div<float, CUDAContext>( 1, d_avg_loss.data<float>(), normalizer_data, normalizer_data, &context_); math::Scale<float, float, CUDAContext>( 1, scale_, normalizer_data, normalizer_data, &context_); math::Scale<float, float, CUDAContext>( dX->size(), normalizer_data, dX->data<float>(), dX->mutable_data<float>(), &context_); } else { math::Scale<float, float, CUDAContext>( dX->size(), scale_, dX->data<float>(), dX->mutable_data<float>(), &context_); math::Scale<float, float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } return true; } REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyLoss, SigmoidCrossEntropyLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyLossGradient, SigmoidCrossEntropyLossGradientOp<float, CUDAContext>); } // namespace caffe2
sigmoid_cross_entropy_loss_op.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "caffe2/core/context_gpu.h" #include "sigmoid_cross_entropy_loss_op.h" namespace caffe2 { namespace { __global__ void ElementwiseMaxKernel(const int n, float* data, const float a) { CUDA_1D_KERNEL_LOOP(index, n) { data[index] = (data[index] > a) ? data[index] : a; } } __global__ void SigmoidCrossEntropyLossKernel( const int n, const float* logits, const int* targets, float* losses, float* counts) { CUDA_1D_KERNEL_LOOP(index, n) { if (targets[index] == -1) { losses[index] = 0.; counts[index] = 0.; } else { losses[index] = -1. * logits[index] * (targets[index] - (logits[index] >= 0)) + logf( 1 + expf(logits[index] - 2 * logits[index] * (logits[index] >= 0))); counts[index] = 1.; } } } __global__ void SigmoidCrossEntropyLossGradientKernel( const int n, const float* logits, const int* targets, float* d_logits, float* counts) { CUDA_1D_KERNEL_LOOP(index, n) { if (targets[index] == -1) { d_logits[index] = 0.; counts[index] = 0.; } else { d_logits[index] = 1. / (1. + expf(-logits[index])) - targets[index]; counts[index] = 1.; } } } } // namespace template <> bool SigmoidCrossEntropyLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& T = Input(1); CAFFE_ENFORCE( X.size() == T.size(), "Logit and target must have the same size", "(", X.size(), " vs. ", T.size(), ")"); auto* avg_loss = Output(0, vector<int64_t>(), at::dtype<float>()); counts_.ResizeLike(X); losses_.ResizeLike(X); ReinitializeTensor(&normalizer_, vector<int64_t>(), at::dtype<float>().device(CUDA)); SigmoidCrossEntropyLossKernel<<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), T.data<int>(), losses_.mutable_data<float>(), counts_.mutable_data<float>()); float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); if (normalize_) { float* normalizer_data = normalizer_.mutable_data<float>(); math::Sum<float, CUDAContext>( counts_.size(), counts_.data<float>(), normalizer_data, &context_); // Prevent division by zero is all counts are zero ElementwiseMaxKernel<<< CAFFE_GET_BLOCKS(normalizer_.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(normalizer_.size(), normalizer_data, 1e-5); math::Div<float, CUDAContext>( 1, avg_loss_data, normalizer_data, avg_loss_data, &context_); } math::Scale<float, float, CUDAContext>( 1, scale_, avg_loss_data, avg_loss_data, &context_); return true; } template <> bool SigmoidCrossEntropyLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& T = Input(1); auto& d_avg_loss = Input(2); auto* dX = Output(0); dX->ResizeLike(X); counts_.ResizeLike(X); ReinitializeTensor(&normalizer_, vector<int64_t>(), at::dtype<float>().device(CUDA)); SigmoidCrossEntropyLossGradientKernel<<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), T.data<int>(), dX->mutable_data<float>(), counts_.mutable_data<float>()); if (normalize_) { float* normalizer_data = normalizer_.mutable_data<float>(); math::Sum<float, CUDAContext>( counts_.size(), counts_.data<float>(), normalizer_data, &context_); // Prevent division by zero is all counts are zero ElementwiseMaxKernel<<< CAFFE_GET_BLOCKS(normalizer_.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(normalizer_.size(), normalizer_data, 1e-5); math::Div<float, CUDAContext>( 1, d_avg_loss.data<float>(), normalizer_data, normalizer_data, &context_); math::Scale<float, float, CUDAContext>( 1, scale_, normalizer_data, normalizer_data, &context_); math::Scale<float, float, CUDAContext>( dX->size(), normalizer_data, dX->data<float>(), dX->mutable_data<float>(), &context_); } else { math::Scale<float, float, CUDAContext>( dX->size(), scale_, dX->data<float>(), dX->mutable_data<float>(), &context_); math::Scale<float, float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } return true; } REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyLoss, SigmoidCrossEntropyLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyLossGradient, SigmoidCrossEntropyLossGradientOp<float, CUDAContext>); } // namespace caffe2
7126120e2516df62e885862472c8dac76667891c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _COPY_KERNEL_H_ #define _COPY_KERNEL_H_ #include <stdio.h> // Keeping this bank conflict ctuff for reference later /* #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) \ cutilBankChecker((reinterpret_cast<float *>(&As[0][0])), (block_size * i + j)) #define BS(i, j) \ cutilBankChecker((reinterpret_cast<float *>(&Bs[0][0])), (block_size * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif */ template <int block_size, typename size_type> __device__ void bp_aos_to_aos_test(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { // TODO: remove this later //c_sz = 8; // Actual thread id size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); size_type dst_base = 0; size_type lt = c_sz / fid_count; size_type loop_term = elem_count/lt; size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; size_type src_base = 0; for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ src_base = t_id*c_sz; dst_base = t_id*c_sz; #pragma unroll for (size_type i = 0; i < lt; ++i){ d_B[dst_base + 0 + i*fid_count] = d_A[src_base + 0 + i*fid_count]; d_B[dst_base + 1 + i*fid_count] = d_A[src_base + 1 + i*fid_count]; d_B[dst_base + 2 + i*fid_count] = d_A[src_base + 2 + i*fid_count]; d_B[dst_base + 3 + i*fid_count] = d_A[src_base + 3 + i*fid_count]; } } } template <int block_size, typename size_type> __device__ void bp_soa_to_soa_test(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); size_type dst_base0 = 0; size_type dst_base1 = elem_count; size_type dst_base2 = elem_count*2; size_type dst_base3 = elem_count*3; size_type src_base0 = 0; size_type src_base1 = elem_count; size_type src_base2 = elem_count*2; size_type src_base3 = elem_count*3; size_type loop_term = (elem_count*fid_count)/c_sz; size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; size_type lt = c_sz / fid_count; for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ #pragma unroll for (size_type i = 0; i < lt; ++i){ d_B[dst_base0 + t_id*lt + i] = d_A[src_base0 + t_id*lt + i]; d_B[dst_base1 + t_id*lt + i] = d_A[src_base1 + t_id*lt + i]; d_B[dst_base2 + t_id*lt + i] = d_A[src_base2 + t_id*lt + i]; d_B[dst_base3 + t_id*lt + i] = d_A[src_base3 + t_id*lt + i]; } } } /* h_0, h_1, h_2, h_3: base pointers for the host data. d_dst: pointer to device memory. elem_size: unused artefact, will be removed. elem_count: the number of elements per field. fid_count: the number of fields. c_sz: the total number of elements (e.g. 8 means 2 elements per 4 fields) to be copied per thread. */ template <int block_size, typename size_type> __device__ void bp_soa_to_aos(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { // Actual thread id size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); // Destination is array of struct layout. // Memory is assumed to be contiguous on device. size_type dst_base = 0; size_type src_base0 = 0; size_type src_base1 = elem_count; size_type src_base2 = elem_count*2; size_type src_base3 = elem_count*3; // loop_term: terminating condition for the outer loop, // equal to the total number of elements divided by the number of elements copied per thread. size_type loop_term = (elem_count*fid_count)/c_sz; // inc: increments outer loop by the total number of threads that are available. size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; // lt: inner loop terminating condition. size_type lt = c_sz / fid_count; // Iterate over all available threads, t_id is the logical thread id for indexing into the arrays. //#pragma unroll for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ // Each thread copies a total of c_sz elements, and d_dst is assumed contiguous and // will be AoS layout, so the logical t_id times c_sz results in the correct base // for the iteration of the inner loop. dst_base = t_id*c_sz; // Since lt is the number of full fid_count-sized elements being copied, // t_id*lt gives the correct base index for each field in the host subarrays. // This kernel is hard coded to handle an instance with 4 fields. // Some of the arithmetic could probably be removed from this loop, // but that's not likely to be the limiting factor for performance. #pragma unroll for (size_type i = 0; i < lt; ++i){ d_B[dst_base + 0 + i*fid_count] = d_A[src_base0 + t_id*lt + i]; d_B[dst_base + 1 + i*fid_count] = d_A[src_base1 + t_id*lt + i]; d_B[dst_base + 2 + i*fid_count] = d_A[src_base2 + t_id*lt + i]; d_B[dst_base + 3 + i*fid_count] = d_A[src_base3 + t_id*lt + i]; } } } /* Same as above, except for going from array of struct on cpu to struct of array on GPU. */ template <int block_size, typename size_type> __device__ void bp_aos_to_soa(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { // TODO: remove this later //c_sz = 8; // Actual thread id size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); size_type dst_base0 = 0; size_type dst_base1 = elem_count; size_type dst_base2 = elem_count*2; size_type dst_base3 = elem_count*3; size_type lt = c_sz / fid_count; size_type loop_term = elem_count/lt; size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; size_type src_base = 0; for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ src_base = t_id*c_sz; #pragma unroll for (size_type i = 0; i < lt; ++i){ d_B[dst_base0 + t_id*lt + i] = d_A[src_base + 0 + i*fid_count]; d_B[dst_base1 + t_id*lt + i] = d_A[src_base + 1 + i*fid_count]; d_B[dst_base2 + t_id*lt + i] = d_A[src_base + 2 + i*fid_count]; d_B[dst_base3 + t_id*lt + i] = d_A[src_base + 3 + i*fid_count]; } } } template <int block_size, typename size_type> __device__ void bp_aos_to_soa_single(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { // TODO: remove this later //c_sz = 8; // Actual thread id size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); size_type dst_base0 = 0; size_type dst_base1 = elem_count; size_type dst_base2 = elem_count*2; size_type dst_base3 = elem_count*3; size_type lt = c_sz / fid_count; size_type loop_term = elem_count/lt; size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; size_type src_base = 0; for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ src_base = t_id*c_sz; #pragma unroll for (size_type i = 0; i < lt; ++i){ d_B[dst_base0 + t_id*lt + i] = d_A[src_base + 0 + i*fid_count]; d_B[dst_base1 + t_id*lt + i] = d_A[src_base + 1 + i*fid_count]; d_B[dst_base2 + t_id*lt + i] = d_A[src_base + 2 + i*fid_count]; d_B[dst_base3 + t_id*lt + i] = d_A[src_base + 3 + i*fid_count]; } } } template <int block_size, typename size_type> __device__ void bp_soa_to_aos_single(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; size_type of = (real_tid % fid_count); size_type offset = of*elem_count; size_type loop_term = elem_count*fid_count; for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ d_B[t_id] = d_A[offset + t_id/4]; } //d_B[dst_base + 0 + i*fid_count] = d_A[src_base0 + t_id*lt + i]; } // C wrappers around our template kernel extern "C" __global__ void bp_soa_to_aos_single(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_soa_to_aos_single<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } extern "C" __global__ void bp_aos_to_soa_single(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_aos_to_soa_single<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } extern "C" __global__ void bp_soa_to_aos(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_soa_to_aos<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } extern "C" __global__ void bp_aos_to_soa(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_aos_to_soa<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } extern "C" __global__ void bp_aos_to_aos_test(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_aos_to_aos_test<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } extern "C" __global__ void bp_soa_to_soa_test(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_soa_to_soa_test<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } #endif // #ifndef _COPY_KERNEL_H_
7126120e2516df62e885862472c8dac76667891c.cu
#ifndef _COPY_KERNEL_H_ #define _COPY_KERNEL_H_ #include <stdio.h> // Keeping this bank conflict ctuff for reference later /* #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) \ cutilBankChecker((reinterpret_cast<float *>(&As[0][0])), (block_size * i + j)) #define BS(i, j) \ cutilBankChecker((reinterpret_cast<float *>(&Bs[0][0])), (block_size * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif */ template <int block_size, typename size_type> __device__ void bp_aos_to_aos_test(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { // TODO: remove this later //c_sz = 8; // Actual thread id size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); size_type dst_base = 0; size_type lt = c_sz / fid_count; size_type loop_term = elem_count/lt; size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; size_type src_base = 0; for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ src_base = t_id*c_sz; dst_base = t_id*c_sz; #pragma unroll for (size_type i = 0; i < lt; ++i){ d_B[dst_base + 0 + i*fid_count] = d_A[src_base + 0 + i*fid_count]; d_B[dst_base + 1 + i*fid_count] = d_A[src_base + 1 + i*fid_count]; d_B[dst_base + 2 + i*fid_count] = d_A[src_base + 2 + i*fid_count]; d_B[dst_base + 3 + i*fid_count] = d_A[src_base + 3 + i*fid_count]; } } } template <int block_size, typename size_type> __device__ void bp_soa_to_soa_test(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); size_type dst_base0 = 0; size_type dst_base1 = elem_count; size_type dst_base2 = elem_count*2; size_type dst_base3 = elem_count*3; size_type src_base0 = 0; size_type src_base1 = elem_count; size_type src_base2 = elem_count*2; size_type src_base3 = elem_count*3; size_type loop_term = (elem_count*fid_count)/c_sz; size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; size_type lt = c_sz / fid_count; for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ #pragma unroll for (size_type i = 0; i < lt; ++i){ d_B[dst_base0 + t_id*lt + i] = d_A[src_base0 + t_id*lt + i]; d_B[dst_base1 + t_id*lt + i] = d_A[src_base1 + t_id*lt + i]; d_B[dst_base2 + t_id*lt + i] = d_A[src_base2 + t_id*lt + i]; d_B[dst_base3 + t_id*lt + i] = d_A[src_base3 + t_id*lt + i]; } } } /* h_0, h_1, h_2, h_3: base pointers for the host data. d_dst: pointer to device memory. elem_size: unused artefact, will be removed. elem_count: the number of elements per field. fid_count: the number of fields. c_sz: the total number of elements (e.g. 8 means 2 elements per 4 fields) to be copied per thread. */ template <int block_size, typename size_type> __device__ void bp_soa_to_aos(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { // Actual thread id size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); // Destination is array of struct layout. // Memory is assumed to be contiguous on device. size_type dst_base = 0; size_type src_base0 = 0; size_type src_base1 = elem_count; size_type src_base2 = elem_count*2; size_type src_base3 = elem_count*3; // loop_term: terminating condition for the outer loop, // equal to the total number of elements divided by the number of elements copied per thread. size_type loop_term = (elem_count*fid_count)/c_sz; // inc: increments outer loop by the total number of threads that are available. size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; // lt: inner loop terminating condition. size_type lt = c_sz / fid_count; // Iterate over all available threads, t_id is the logical thread id for indexing into the arrays. //#pragma unroll for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ // Each thread copies a total of c_sz elements, and d_dst is assumed contiguous and // will be AoS layout, so the logical t_id times c_sz results in the correct base // for the iteration of the inner loop. dst_base = t_id*c_sz; // Since lt is the number of full fid_count-sized elements being copied, // t_id*lt gives the correct base index for each field in the host subarrays. // This kernel is hard coded to handle an instance with 4 fields. // Some of the arithmetic could probably be removed from this loop, // but that's not likely to be the limiting factor for performance. #pragma unroll for (size_type i = 0; i < lt; ++i){ d_B[dst_base + 0 + i*fid_count] = d_A[src_base0 + t_id*lt + i]; d_B[dst_base + 1 + i*fid_count] = d_A[src_base1 + t_id*lt + i]; d_B[dst_base + 2 + i*fid_count] = d_A[src_base2 + t_id*lt + i]; d_B[dst_base + 3 + i*fid_count] = d_A[src_base3 + t_id*lt + i]; } } } /* Same as above, except for going from array of struct on cpu to struct of array on GPU. */ template <int block_size, typename size_type> __device__ void bp_aos_to_soa(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { // TODO: remove this later //c_sz = 8; // Actual thread id size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); size_type dst_base0 = 0; size_type dst_base1 = elem_count; size_type dst_base2 = elem_count*2; size_type dst_base3 = elem_count*3; size_type lt = c_sz / fid_count; size_type loop_term = elem_count/lt; size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; size_type src_base = 0; for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ src_base = t_id*c_sz; #pragma unroll for (size_type i = 0; i < lt; ++i){ d_B[dst_base0 + t_id*lt + i] = d_A[src_base + 0 + i*fid_count]; d_B[dst_base1 + t_id*lt + i] = d_A[src_base + 1 + i*fid_count]; d_B[dst_base2 + t_id*lt + i] = d_A[src_base + 2 + i*fid_count]; d_B[dst_base3 + t_id*lt + i] = d_A[src_base + 3 + i*fid_count]; } } } template <int block_size, typename size_type> __device__ void bp_aos_to_soa_single(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { // TODO: remove this later //c_sz = 8; // Actual thread id size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); size_type dst_base0 = 0; size_type dst_base1 = elem_count; size_type dst_base2 = elem_count*2; size_type dst_base3 = elem_count*3; size_type lt = c_sz / fid_count; size_type loop_term = elem_count/lt; size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; size_type src_base = 0; for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ src_base = t_id*c_sz; #pragma unroll for (size_type i = 0; i < lt; ++i){ d_B[dst_base0 + t_id*lt + i] = d_A[src_base + 0 + i*fid_count]; d_B[dst_base1 + t_id*lt + i] = d_A[src_base + 1 + i*fid_count]; d_B[dst_base2 + t_id*lt + i] = d_A[src_base + 2 + i*fid_count]; d_B[dst_base3 + t_id*lt + i] = d_A[src_base + 3 + i*fid_count]; } } } template <int block_size, typename size_type> __device__ void bp_soa_to_aos_single(float *d_A, float *d_B, size_type elem_size, size_type elem_count, int fid_count, int c_sz) { size_type real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x); size_type inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y; size_type of = (real_tid % fid_count); size_type offset = of*elem_count; size_type loop_term = elem_count*fid_count; for (size_type t_id = real_tid; t_id < loop_term; t_id += inc){ d_B[t_id] = d_A[offset + t_id/4]; } //d_B[dst_base + 0 + i*fid_count] = d_A[src_base0 + t_id*lt + i]; } // C wrappers around our template kernel extern "C" __global__ void bp_soa_to_aos_single(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_soa_to_aos_single<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } extern "C" __global__ void bp_aos_to_soa_single(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_aos_to_soa_single<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } extern "C" __global__ void bp_soa_to_aos(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_soa_to_aos<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } extern "C" __global__ void bp_aos_to_soa(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_aos_to_soa<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } extern "C" __global__ void bp_aos_to_aos_test(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_aos_to_aos_test<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } extern "C" __global__ void bp_soa_to_soa_test(float *d_A, float *d_B, int e_size, int e_count, int fid_count, int c_sz) { bp_soa_to_soa_test<32, int>(d_A, d_B, e_size, e_count, fid_count, c_sz); } #endif // #ifndef _COPY_KERNEL_H_
a30a057c16bb33c4b8bd593997c1e2c36c5e1edd.hip
// !!! This is a file automatically generated by hipify!!! /*********************************** *********************************** CUDA PART *********************************** **********************************/ #define CUDA_COMPILE //to enable cuda types in gpucard.h #include "gpucard.h" #undef CUDA_COMPILE #include "terminal.h" #include "reduction.h" #include <memory.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <math.h> #include <iostream> #include <time.h> #define FLOATIZE_X 2 static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "CUDA fail: %s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit(1); } } #define CHK( err ) (HandleError( err, __FILE__, __LINE__ )) //Print GPU properties //inputs: // prop: pointer to structure containing device properties // dev: device number void printDeviceProperties(hipDeviceProp_t * prop, int dev){ CHK(hipGetDeviceProperties(prop, dev)); printf("\nGPU properties \n====================\n"); printf("Version number: %d.%d\n", prop->major, prop->minor); printf("Name: %s\n", prop->name); printf("Total global memory: %lu\n", prop->totalGlobalMem); printf("Total shared memory per block: %lu\n", prop->sharedMemPerBlock); printf("Total registers per block: %d\n", prop->regsPerBlock); printf("Warp size: %d\n", prop->warpSize); printf("Maximum memory pitch: %lu\n", prop->memPitch); printf("Maximum threads per block: %d\n", prop->maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, prop->maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, prop->maxGridSize[i]); printf("Clock rate: %d\n", prop->clockRate); printf("Total constant memory: %lu\n", prop->totalConstMem); printf("Texture alignment: %lu\n", prop->textureAlignment); printf("Concurrent copy and execution: %s\n", (prop->deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", prop->multiProcessorCount); printf("Kernel execution timeout: %s\n\n", (prop->kernelExecTimeoutEnabled ? "Yes" : "No")); } //Initialize instance of GPUCARD //Input: // gc: instance of GPUCARD to initialize // set: settings void gpuCardInit (GPUCARD *gc, SETTINGS *set) { //print out gpu device properties gc->devProp = (hipDeviceProp_t *)malloc(sizeof(hipDeviceProp_t)); printDeviceProperties(gc->devProp, 0); gc->calibrating=false; gc->calibrated=false; gc->calibok=0; int nchan=gc->nchan=1+(set->channel_mask==3); if ((nchan==2) and (FLOATIZE_X%2==1)) { printf ("Need FLOATIZE_X even for two channels\n"); exit(1); } if(!(OPS_PER_THREAD>0) || !((OPS_PER_THREAD & (OPS_PER_THREAD-1)) == 0)){ printf("Need OPS_PER_THREAD to be a power of 2.\n"); exit(1); } printf ("\n\nInitializing GPU\n"); printf ("====================\n"); printf ("Allocating GPU buffers\n"); int nStreams=set->cuda_streams; int nCards=(set->card_mask==3) + 1; gc->fftsize=set->fft_size; uint32_t bufsize=gc->bufsize=set->fft_size*nchan; uint32_t transform_size=gc->transform_size=(set->fft_size/2+1); float nunyq=set->sample_rate/2; float dnu=nunyq/(set->fft_size/2+1); gc->tot_pssize=0; gc->ncuts=set->n_cuts; for (int i=0; i<gc->ncuts; i++) { printf ("Cutout %i:\n",i); gc->fftavg[i]=set->fft_avg[i]; // first sort reflections etc. float numin, numax; numin=set->nu_min[i]; numax=set->nu_max[i]; while (fabs(numin)>nunyq) numin-=set->sample_rate; while (fabs(numax)>nunyq) numax-=set->sample_rate; numin=abs(numin); numax=abs(numax); if (numax<numin) { float t=numin; numin=numax; numax=t; } printf (" Frequencies %f - %f Mhz appear as %f - %f \n",set->nu_min[i]/1e6, set->nu_max[i]/1e6, numin/1e6, numax/1e6); int imin=int(numin/dnu); if (imin==0) imin=1; int imax=int(numax/dnu)+1; gc->pssize1[i]=(imax-imin)/set->fft_avg[i]; gc->ndxofs[i]=imin; if ((imax-imin)%set->fft_avg[i]>0) gc->pssize1[i]+=1; imax=imin+gc->pssize1[i]*set->fft_avg[i]; numin=imin*dnu; numax=imax*dnu; set->nu_min[i]=numin; set->nu_max[i]=numax; set->pssize[i]=gc->pssize1[i]; if (nchan==2){ if (nCards==1) gc->pssize[i]=gc->pssize1[i]*4; // for two channels and two crosses else gc->pssize[i]=gc->pssize1[i]*16; // for 4 channels and 6*2 crosses } else { gc->pssize[i]=gc->pssize1[i]*nCards; // just one power spectrum } gc->tot_pssize+=gc->pssize[i]; printf (" Actual freq range: %f - %f MHz (edges!)\n",numin/1e6, numax/1e6); printf (" # PS offset, #PS bins: %i %i\n",gc->ndxofs[i],gc->pssize1[i]); } CHK(hipHostMalloc(&gc->outps, gc->tot_pssize*sizeof(float), hipHostMallocDefault)); //allocating GPU buffers gc->cbuf=(int8_t***)malloc(nStreams*sizeof(int8_t**)); gc->cfbuf=(hipfftReal**)malloc(nStreams*sizeof(hipfftReal*)); gc->cfft=(hipfftComplex**)malloc(nStreams*sizeof(hipfftComplex*)); gc->coutps=(float**)malloc(nStreams*sizeof(float*)); for (int i=0;i<nStreams;i++) { gc->cbuf[i]=(int8_t**)malloc(nCards*sizeof(int8_t*)); for(int j=0; j<nCards; j++) CHK(hipMalloc(&(gc->cbuf[i][j]),bufsize)); CHK(hipMalloc(&gc->cfbuf[i], bufsize*nCards*sizeof(hipfftReal))); CHK(hipMalloc(&gc->cfft[i],transform_size*nchan*nCards*sizeof(hipfftComplex))); CHK(hipMalloc(&gc->coutps[i],gc->tot_pssize*sizeof(float))); CHK(hipMalloc(&gc->cmeasured_delay,nStreams*sizeof(int))); } printf ("Setting up CUFFT\n"); // int status=hipfftPlanMany(&gc->plan, 1, (int*)&(set->fft_size), NULL, 0, 0, // NULL, 2*transform_size,1, HIPFFT_R2C, nchan); int status=hipfftPlan1d(&gc->plan, set->fft_size, HIPFFT_R2C, nchan); if (status!=HIPFFT_SUCCESS) { printf ("Plan failed:"); if (status==HIPFFT_ALLOC_FAILED) printf("HIPFFT_ALLOC_FAILED"); if (status==HIPFFT_INVALID_VALUE) printf ("HIPFFT_INVALID_VALUE"); if (status==HIPFFT_INTERNAL_ERROR) printf ("HIPFFT_INTERNAL_ERROR"); if (status==HIPFFT_SETUP_FAILED) printf ("HIPFFT_SETUP_FAILED"); if (status==HIPFFT_INVALID_SIZE) printf ("HIPFFT_INVALID_SIZE"); printf("\n"); exit(1); } status=hipfftPlan1d(&gc->iplan, set->fft_size, HIPFFT_C2R, 1); // inverse transform always for one channel only if (status!=HIPFFT_SUCCESS) { printf ("Plan failed:"); if (status==HIPFFT_ALLOC_FAILED) printf("HIPFFT_ALLOC_FAILED"); if (status==HIPFFT_INVALID_VALUE) printf ("HIPFFT_INVALID_VALUE"); if (status==HIPFFT_INTERNAL_ERROR) printf ("HIPFFT_INTERNAL_ERROR"); if (status==HIPFFT_SETUP_FAILED) printf ("HIPFFT_SETUP_FAILED"); if (status==HIPFFT_INVALID_SIZE) printf ("HIPFFT_INVALID_SIZE"); printf("\n"); exit(1); } printf ("Setting up CUDA streams & events\n"); gc->nstreams = set->cuda_streams; gc->threads=set->cuda_threads; if (gc->nstreams<1) { printf ("Cannot really work with less than one stream.\n"); exit(1); } gc->streams=(hipStream_t*)malloc(gc->nstreams*sizeof(hipStream_t)); gc->eStart=(hipEvent_t*)malloc(gc->nstreams*sizeof(hipEvent_t)); gc->eDoneCopy=(hipEvent_t*)malloc(gc->nstreams*sizeof(hipEvent_t)); gc->eDoneFloatize=(hipEvent_t*)malloc(gc->nstreams*sizeof(hipEvent_t)); gc->eDoneFFT=(hipEvent_t*)malloc(gc->nstreams*sizeof(hipEvent_t)); gc->eDonePost=(hipEvent_t*)malloc(gc->nstreams*sizeof(hipEvent_t)); gc->eDoneCalib=(hipEvent_t*)malloc(gc->nstreams*sizeof(hipEvent_t)); gc->eDoneStream=(hipEvent_t*)malloc(gc->nstreams*sizeof(hipEvent_t)); for (int i=0;i<gc->nstreams;i++) { //create stream CHK(hipStreamCreate(&gc->streams[i])); //create events for stream CHK(hipEventCreate(&gc->eStart[i])); CHK(hipEventCreate(&gc->eDoneCopy[i])); CHK(hipEventCreate(&gc->eDoneFloatize[i])); CHK(hipEventCreate(&gc->eDoneFFT[i])); CHK(hipEventCreate(&gc->eDonePost[i])); CHK(hipEventCreate(&gc->eDoneCalib[i])); CHK(hipEventCreate(&gc->eDoneStream[i])); } gc->fstream = 0; //oldest running stream gc->bstream = -1; //newest stream (will become 0 when we actually start with first real stream) gc->active_streams = 0; //number of streams currently running printf ("GPU ready.\n"); } //Convert bytes to floats, 1 channel version //Inputs: // sample: array of bytes // fsample: array of floats to put output in __global__ void floatize_1chan(int8_t* sample, hipfftReal* fsample) { int i = FLOATIZE_X*(blockDim.x * blockIdx.x + threadIdx.x); for (int j=0; j<FLOATIZE_X; j++) fsample[i+j]=float(sample[i+j]); } //Convert bytes to floats, 2 channel version //Inputs: // sample: array of bytes with the 2 channels interleaved // fsample1: array of floats to put converted bytes from channel 1 in // fsample2: array of floats to put converted bytes from channel 2 in __global__ void floatize_2chan(int8_t* sample, hipfftReal* fsample1, hipfftReal* fsample2) { int i = FLOATIZE_X*(blockDim.x * blockIdx.x + threadIdx.x); for (int j=0; j<FLOATIZE_X/2; j++) { fsample1[i/2+j]=float(sample[i+2*j]); fsample2[i/2+j]=float(sample[i+2*j+1]); } } //Print the elapsed time between 2 cuda events void printDt (hipEvent_t cstart, hipEvent_t cstop, float * total, TWRITER * t) { float gpu_time; CHK(hipEventElapsedTime(&gpu_time, cstart, cstop)); tprintfn (t, 0, " %3.2fms ", gpu_time); *total +=gpu_time; } void printTiming(GPUCARD *gc, int i, TWRITER * t) { float totalTime = 0; tprintfn (t, 0, "GPU timing (copy/floatize/fft/post/calib): "); printDt (gc->eStart[i], gc->eDoneCopy[i], &totalTime, t); totalTime=0; printDt (gc->eDoneCopy[i], gc->eDoneFloatize[i], &totalTime, t); printDt (gc->eDoneFloatize[i], gc->eDoneFFT[i], &totalTime, t); printDt (gc->eDoneFFT[i], gc->eDonePost[i], &totalTime, t); printDt (gc->eDonePost[i], gc->eDoneCalib[i], &totalTime, t); tprintfn (t,1,""); tprintfn (t, 1, "GPU timing cumpute total: %3.2f ", totalTime); } void printLiveStat(SETTINGS *set, GPUCARD *gc, int8_t **buf, TWRITER *twr) { int nCards=(set->card_mask==3) + 1; if (set->print_meanvar) { // now find some statistic over subsamples of samples uint32_t bs=gc->bufsize; uint32_t step=gc->bufsize/(32768); float NSub=bs/step; // number of subsamples to take float m1=0.,m2=0.,v1=0.,v2=0.; float m3=0.,m4=0.,v3=0.,v4=0.; for (int i=0; i<bs; i+=step) { // take them in steps of step float n=buf[0][i]; m1+=n; v1+=n*n; n=buf[0][i+1]; m2+=n; v2+=n*n; if (nCards==2) { n=buf[1][i]; m3+=n; v3+=n*n; n=buf[1][i+1]; m4+=n; v4+=n*n; } } m1/=NSub; v1=sqrt(v1/NSub-m1*m1); //mean and variance m2/=NSub; v2=sqrt(v2/NSub-m2*m2); tprintfn (twr,1,"CH1 mean/rms: %f %f CH2 mean/rms: %f %f ",m1,v1,m2,v2); if (nCards==2) { m3/=NSub; v3=sqrt(v3/NSub-m3*m3); //mean and variance m4/=NSub; v4=sqrt(v4/NSub-m4*m4); tprintfn (twr,1,"CH3 mean/rms: %f %f CH4 mean/rms: %f %f ",m3,v3,m4,v4); } } if (set->check_CH2) { // heuristic to see if CH2 is OK. float mean_card1=0; float mean_card2=0; int count=0; float numin=set->nu_min[0]; float nustep=(set->nu_max[0]-set->nu_min[0])/(gc->pssize1[0]); int ofs2=gc->pssize1[0]; int ofs4=3*gc->pssize1[0]; for (int j=0; j<gc->pssize1[0];j++) { // check just cut 0 float f=numin+nustep*j; if ((f>1560e6-1100e6) && (f<1640e6-1100e6)) { count++; mean_card1+=gc->outps[ofs2+j]; if (nCards==2) mean_card2+=gc->outps[ofs4+j]; } } if (count>0) { mean_card1/=(count*1e11)*(set->fft_avg[0]/8192); int ok=0; if (nCards==1) { ok=mean_card1<1; tprintfn (twr,0,"CH2 check : %f : ",mean_card1); } else { mean_card2/=(count*1e11)*(set->fft_avg[0]/8192);; ok=((mean_card1<1) && (mean_card2<1)); tprintfn (twr,0,"CH2 check : %f / %f : ",mean_card1, mean_card2); } if (ok) tprintfn(twr,1, " OK "); else tprintfn(twr,1, " NOT OK !!!"); } } if (set->print_maxp) { // find max power in each cutout in each channel. int of1=0; // CH1 auto for (int i=0; i<gc->ncuts; i++) { int of2=of1+gc->pssize1[i]; //CH2 auto int of3=of1+2*gc->pssize1[i]; // CH3 auto int of4=of1+3*gc->pssize1[i]; // CH4 auto float ch1p=0, ch2p=0, ch3p=0, ch4p=0; int ch1i=0, ch2i=0, ch3i=0, ch4i=0; for (int j=0; j<gc->pssize1[i];j++) { if (gc->outps[of1+j] > ch1p) {ch1p=gc->outps[of1+j]; ch1i=j;} if (gc->outps[of2+j] > ch2p) {ch2p=gc->outps[of2+j]; ch2i=j;} if (nCards==2) { if (gc->outps[of3+j] > ch3p) {ch3p=gc->outps[of3+j]; ch3i=j;} if (gc->outps[of4+j] > ch4p) {ch4p=gc->outps[of4+j]; ch4i=j;} } } of1+=gc->pssize[i]; // next cutout float numin=set->nu_min[i]; float nustep=(set->nu_max[i]-set->nu_min[i])/(gc->pssize1[i]); float ch1f=(numin+nustep*(0.5+ch1i))/1e6; float ch2f=(numin+nustep*(0.5+ch2i))/1e6; tprintfn (twr,1,"Peak pow (cutout %i): CH1 %f at %f MHz; CH2 %f at %f MHz ", i,log(ch1p),ch1f,log(ch2p),ch2f); if (nCards==2) { float ch3f=(numin+nustep*(0.5+ch3i))/1e6; float ch4f=(numin+nustep*(0.5+ch4i))/1e6; tprintfn (twr,1,"Peak pow (cutout %i): CH3 %f at %f MHz; CH4 %f at %f MHz ", i,log(ch3p),ch3f,log(ch4p),ch4f); } } } if (set->measure_delay || gc->calibrating) { float delayms=float(gc->last_measured_delay)*1.0/(set->sample_rate)*1e3; tprintfn (twr,1, "Calibrating: %i/%i Last measured delay: %i samples = %f ms. ", gc->ndelays, NUM_DELAYS, gc->last_measured_delay, delayms); } else { if (gc->calibrated) tprintfn (twr,1, "DCal: OK: %i/%i Val %f +- %fms Applied delay: %iB+%iS %iB+%iS", gc->calibok, NUM_DELAYS, gc->calibmean_ms, gc->calibrms_ms, set->bufdelay[0], set->delay[0], set->bufdelay[1], set->delay[1]); else tprintfn (twr,1, "DCal: Failed: %i/%i Applied delay: %iB+%iS %iB+%iS ", gc->calibok, NUM_DELAYS, set->bufdelay[0], set->delay[0], set->bufdelay[1], set->delay[1]); } } //process calibration data and stops calibrationc process void processCalibration(GPUCARD *gc, SETTINGS *set) { gc->calibrating=false; long int mean=0; long int var=0; int numok=0; const int OK=1500000; // 1.4 ms for (int i=0; i<NUM_DELAYS; i++) { if (abs(gc->delays[i])<OK) { mean+=gc->delays[i]; var+=gc->delays[i]*gc->delays[i]; numok++; } } gc->calibok=numok; if (numok>NUM_DELAYS/2) { gc->calibrated=true; mean/=numok; var/=numok; gc->calibmean=mean; if (gc->calibmean>0) { set->delay[0]+=gc->calibmean; } else if (gc->calibmean<0) { set->delay[1]+= (-gc->calibmean); } int mindel=::min(set->delay[0],set->delay[1]); set->delay[0]-=mindel; set->delay[1]-=mindel; gc->calibrms = int(sqrt(var-mean*mean)); gc->calibmean_ms= gc->calibmean*1.0/(set->sample_rate)*1e3; gc->calibrms_ms= gc->calibrms*1.0/(set->sample_rate)*1e3; } else gc->calibrated=false; } void startCalib(GPUCARD *gc) { gc->calibrating=true; gc->ndelays=0; } //Process one data packet from the digitizer //Input: // gc: graphics card // buf: data from digitizer // pbuf: old data from digitizer (to implement delay) // wr: writer to write out power spectra and outliers to files // set: settings int gpuProcessBuffer(GPUCARD *gc, int8_t **buf_one, int8_t **buf_two, WRITER *wr, TWRITER *twr, SETTINGS *set) { //streamed version //Check if other streams are finished and proccess the finished ones in order (i.e. print output to file) CHK(hipGetLastError()); int nCards=(set->card_mask==3) + 1; int8_t* buf[2]; int8_t* pbuf[2]; buf[0]=buf_one[set->bufdelay[0]]; pbuf[0]=buf_one[set->bufdelay[0]+1]; buf[1]=buf_two[set->bufdelay[1]]; pbuf[1]=buf_two[set->bufdelay[1]+1]; while(gc->active_streams > 0){ // printf ("S:%i ", hipEventQuery(gc->eStart[gc->fstream])==hipSuccess); // printf ("%i ", hipEventQuery(gc->eDoneCopy[gc->fstream])==hipSuccess); // printf ("%i ", hipEventQuery(gc->eDoneFloatize[gc->fstream])==hipSuccess); // printf ("%i ", hipEventQuery(gc->eDoneFFT[gc->fstream])==hipSuccess); // printf ("%i [%i]\n ", hipEventQuery(gc->eDonePost[gc->fstream])==hipSuccess, gc->fstream); if(hipEventQuery(gc->eDoneStream[gc->fstream])==hipSuccess){ int fstream=gc->fstream; if (!gc->calib[fstream]) { hipMemcpy(gc->outps,gc->coutps[fstream], gc->tot_pssize*sizeof(float), hipMemcpyDeviceToHost); } else { hipMemcpy(&gc->last_measured_delay,&(gc->cmeasured_delay[fstream]), sizeof(int), hipMemcpyDeviceToHost); gc->delays[gc->ndelays]=gc->last_measured_delay; gc->ndelays++; if (gc->ndelays==NUM_DELAYS) processCalibration(gc,set); } if (gc->active_streams==1) { printTiming(gc,fstream,twr); printLiveStat(set,gc,buf,twr); writerAccumulatePS(wr,gc->outps, twr,set); } else writerAccumulatePS(wr,gc->outps, NULL,set); // accumulate, but without talking gc->fstream = (++gc->fstream)%(gc->nstreams); gc->active_streams--; } else break; } if(gc->active_streams == gc->nstreams){ //if no empty streams return false; } gc->active_streams++; int csi = gc->bstream = (++gc->bstream)%(gc->nstreams); //add new stream hipStream_t cs= gc->streams[gc->bstream]; hipEventRecord(gc->eStart[csi], cs); //memory copy for(int i=0; i<nCards; i++) { if (set->delay[i]==0) hipMemcpyAsync(gc->cbuf[csi][i], buf[i], gc->bufsize , hipMemcpyHostToDevice,cs); else { if (set->delay[i]>gc->fftsize) {printf ("Pathological delay.\n"); exit(1);} unsigned ofs=set->delay[i]*gc->nchan; hipMemcpyAsync(&gc->cbuf[csi][i][ofs], buf[i], gc->bufsize-ofs , hipMemcpyHostToDevice,cs); if (pbuf[i]) hipMemcpyAsync(gc->cbuf[csi][i], &pbuf[i][gc->bufsize-ofs], ofs , hipMemcpyHostToDevice,cs); } } //floatize hipEventRecord(gc->eDoneCopy[csi], cs); int threadsPerBlock = gc->threads; int blocksPerGrid = gc->bufsize / threadsPerBlock/FLOATIZE_X; if (gc->nchan==1) hipLaunchKernelGGL(( floatize_1chan), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, cs, gc->cbuf[csi][0],gc->cfbuf[csi]); else for(int i=0; i<nCards; i++) hipLaunchKernelGGL(( floatize_2chan), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, cs, gc->cbuf[csi][i],&(gc->cfbuf[csi][gc->fftsize*2*i]),&(gc->cfbuf[csi][gc->fftsize*(2*i+1)])); hipEventRecord(gc->eDoneFloatize[csi], cs); //perform fft int status = hipfftSetStream(gc->plan, cs); if(status !=HIPFFT_SUCCESS) { printf("CUFFSTETSTREAM failed\n"); exit(1); } for(int i=0; i<nCards;i++){ status=hipfftExecR2C(gc->plan, &(gc->cfbuf[csi][gc->bufsize*i]), &(gc->cfft[csi][2*i*gc->transform_size])); if (status!=HIPFFT_SUCCESS) { printf("CUFFT FAILED\n"); exit(1); } } hipEventRecord(gc->eDoneFFT[csi], cs); if (!set->measure_delay & !gc->calibrating) { gc->calib[csi]=false; //compute spectra if (gc->nchan==1) { int psofs=0; for (int i=0; i<gc->ncuts; i++) { hipLaunchKernelGGL(( ps_reduce), dim3(gc->pssize[i]), dim3(1024), 0, cs, gc->cfft[csi], &(gc->coutps[csi][psofs]), gc->ndxofs[i], gc->fftavg[i]); psofs+=gc->pssize[i]; } } else if(gc->nchan==2){ // note we need to take into account the tricky N/2+1 FFT size while we do N/2 binning // pssize+2 = transformsize+1 int psofs=0; for (int i=0; i<gc->ncuts; i++) { for(int j=0; j<nCards; j++){ hipLaunchKernelGGL(( ps_reduce), dim3(gc->pssize1[i]), dim3(1024), 0, cs, &gc->cfft[csi][2*j*gc->transform_size], &(gc->coutps[csi][psofs]), gc->ndxofs[i], gc->fftavg[i]); psofs+=gc->pssize1[i]; hipLaunchKernelGGL(( ps_reduce), dim3(gc->pssize1[i]), dim3(1024), 0, cs, &gc->cfft[csi][(2*j+1)*gc->transform_size], &(gc->coutps[csi][psofs]), gc->ndxofs[i], gc->fftavg[i]); psofs+=gc->pssize1[i]; } //cross spectra for(int j = 0; j<nCards*2; j++) for(int k = j+1; k < nCards*2 ; k++){ //NEED TO CHECK THAT PARAMETERS ARE ALL CORRECT FOR TWO CARDS AND FOR ONE CARD.... hipLaunchKernelGGL(( ps_X_reduce), dim3(gc->pssize1[i]), dim3(1024), 0, cs, &gc->cfft[csi][j*gc->transform_size], &gc->cfft[csi][k*gc->transform_size], &(gc->coutps[csi][psofs]), &(gc->coutps[csi][psofs+gc->pssize1[i]]), gc->ndxofs[i], gc->fftavg[i]); psofs+=2*gc->pssize1[i]; } } } else{ printf("Can only handle 1 or 2 channels\n"); exit(1); } hipEventRecord(gc->eDonePost[csi], cs); hipEventRecord(gc->eDoneCalib[csi], cs); } if (set->measure_delay || gc->calibrating) { hipEventRecord(gc->eDonePost[csi], cs); gc->calib[csi]=true; int blocksPerGrid = gc->transform_size / threadsPerBlock; hipLaunchKernelGGL(( C12_Cross) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, cs , &(gc->cfft[csi][0]), &(gc->cfft[csi][gc->transform_size]), &(gc->cfft[csi][2*gc->transform_size]), &(gc->cfft[csi][3*gc->transform_size])); int status = hipfftSetStream(gc->iplan, cs); if(status !=HIPFFT_SUCCESS) { printf("CUFFTSETSTREAM failed\n"); exit(1); } status=hipfftExecC2R(gc->iplan, &(gc->cfft[csi][0]), &(gc->cfbuf[csi][0]) ); if (status!=HIPFFT_SUCCESS) { printf("CUFFT FAILED\n"); exit(1); } blocksPerGrid = threadsPerBlock; int mult=gc->fftsize/blocksPerGrid/threadsPerBlock; hipLaunchKernelGGL(( C12_FindMax_Part1), dim3(blocksPerGrid),dim3(threadsPerBlock),0,cs, &(gc->cfbuf[csi][0]), mult,&(gc->cfbuf[csi][gc->fftsize]),(int*)gc->cbuf[csi][0]); hipLaunchKernelGGL(( C12_FindMax_Part2), dim3(1),dim3(1),0,cs, blocksPerGrid, gc->fftsize, &(gc->cfbuf[csi][gc->fftsize]),(int*)gc->cbuf[csi][0], &gc->cmeasured_delay[csi]); hipEventRecord(gc->eDoneCalib[csi], cs); } // this is outside so that event gets processed. hipEventRecord(gc->eDoneStream[csi], cs); return true; }
a30a057c16bb33c4b8bd593997c1e2c36c5e1edd.cu
/*********************************** *********************************** CUDA PART *********************************** **********************************/ #define CUDA_COMPILE //to enable cuda types in gpucard.h #include "gpucard.h" #undef CUDA_COMPILE #include "terminal.h" #include "reduction.h" #include <memory.h> #include <cuda.h> #include <cuda_runtime.h> #include <cufft.h> #include <cuda_profiler_api.h> #include <stdio.h> #include <math.h> #include <iostream> #include <time.h> #define FLOATIZE_X 2 static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "CUDA fail: %s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit(1); } } #define CHK( err ) (HandleError( err, __FILE__, __LINE__ )) //Print GPU properties //inputs: // prop: pointer to structure containing device properties // dev: device number void printDeviceProperties(cudaDeviceProp * prop, int dev){ CHK(cudaGetDeviceProperties(prop, dev)); printf("\nGPU properties \n====================\n"); printf("Version number: %d.%d\n", prop->major, prop->minor); printf("Name: %s\n", prop->name); printf("Total global memory: %lu\n", prop->totalGlobalMem); printf("Total shared memory per block: %lu\n", prop->sharedMemPerBlock); printf("Total registers per block: %d\n", prop->regsPerBlock); printf("Warp size: %d\n", prop->warpSize); printf("Maximum memory pitch: %lu\n", prop->memPitch); printf("Maximum threads per block: %d\n", prop->maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, prop->maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, prop->maxGridSize[i]); printf("Clock rate: %d\n", prop->clockRate); printf("Total constant memory: %lu\n", prop->totalConstMem); printf("Texture alignment: %lu\n", prop->textureAlignment); printf("Concurrent copy and execution: %s\n", (prop->deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", prop->multiProcessorCount); printf("Kernel execution timeout: %s\n\n", (prop->kernelExecTimeoutEnabled ? "Yes" : "No")); } //Initialize instance of GPUCARD //Input: // gc: instance of GPUCARD to initialize // set: settings void gpuCardInit (GPUCARD *gc, SETTINGS *set) { //print out gpu device properties gc->devProp = (cudaDeviceProp *)malloc(sizeof(cudaDeviceProp)); printDeviceProperties(gc->devProp, 0); gc->calibrating=false; gc->calibrated=false; gc->calibok=0; int nchan=gc->nchan=1+(set->channel_mask==3); if ((nchan==2) and (FLOATIZE_X%2==1)) { printf ("Need FLOATIZE_X even for two channels\n"); exit(1); } if(!(OPS_PER_THREAD>0) || !((OPS_PER_THREAD & (OPS_PER_THREAD-1)) == 0)){ printf("Need OPS_PER_THREAD to be a power of 2.\n"); exit(1); } printf ("\n\nInitializing GPU\n"); printf ("====================\n"); printf ("Allocating GPU buffers\n"); int nStreams=set->cuda_streams; int nCards=(set->card_mask==3) + 1; gc->fftsize=set->fft_size; uint32_t bufsize=gc->bufsize=set->fft_size*nchan; uint32_t transform_size=gc->transform_size=(set->fft_size/2+1); float nunyq=set->sample_rate/2; float dnu=nunyq/(set->fft_size/2+1); gc->tot_pssize=0; gc->ncuts=set->n_cuts; for (int i=0; i<gc->ncuts; i++) { printf ("Cutout %i:\n",i); gc->fftavg[i]=set->fft_avg[i]; // first sort reflections etc. float numin, numax; numin=set->nu_min[i]; numax=set->nu_max[i]; while (fabs(numin)>nunyq) numin-=set->sample_rate; while (fabs(numax)>nunyq) numax-=set->sample_rate; numin=abs(numin); numax=abs(numax); if (numax<numin) { float t=numin; numin=numax; numax=t; } printf (" Frequencies %f - %f Mhz appear as %f - %f \n",set->nu_min[i]/1e6, set->nu_max[i]/1e6, numin/1e6, numax/1e6); int imin=int(numin/dnu); if (imin==0) imin=1; int imax=int(numax/dnu)+1; gc->pssize1[i]=(imax-imin)/set->fft_avg[i]; gc->ndxofs[i]=imin; if ((imax-imin)%set->fft_avg[i]>0) gc->pssize1[i]+=1; imax=imin+gc->pssize1[i]*set->fft_avg[i]; numin=imin*dnu; numax=imax*dnu; set->nu_min[i]=numin; set->nu_max[i]=numax; set->pssize[i]=gc->pssize1[i]; if (nchan==2){ if (nCards==1) gc->pssize[i]=gc->pssize1[i]*4; // for two channels and two crosses else gc->pssize[i]=gc->pssize1[i]*16; // for 4 channels and 6*2 crosses } else { gc->pssize[i]=gc->pssize1[i]*nCards; // just one power spectrum } gc->tot_pssize+=gc->pssize[i]; printf (" Actual freq range: %f - %f MHz (edges!)\n",numin/1e6, numax/1e6); printf (" # PS offset, #PS bins: %i %i\n",gc->ndxofs[i],gc->pssize1[i]); } CHK(cudaHostAlloc(&gc->outps, gc->tot_pssize*sizeof(float), cudaHostAllocDefault)); //allocating GPU buffers gc->cbuf=(int8_t***)malloc(nStreams*sizeof(int8_t**)); gc->cfbuf=(cufftReal**)malloc(nStreams*sizeof(cufftReal*)); gc->cfft=(cufftComplex**)malloc(nStreams*sizeof(cufftComplex*)); gc->coutps=(float**)malloc(nStreams*sizeof(float*)); for (int i=0;i<nStreams;i++) { gc->cbuf[i]=(int8_t**)malloc(nCards*sizeof(int8_t*)); for(int j=0; j<nCards; j++) CHK(cudaMalloc(&(gc->cbuf[i][j]),bufsize)); CHK(cudaMalloc(&gc->cfbuf[i], bufsize*nCards*sizeof(cufftReal))); CHK(cudaMalloc(&gc->cfft[i],transform_size*nchan*nCards*sizeof(cufftComplex))); CHK(cudaMalloc(&gc->coutps[i],gc->tot_pssize*sizeof(float))); CHK(cudaMalloc(&gc->cmeasured_delay,nStreams*sizeof(int))); } printf ("Setting up CUFFT\n"); // int status=cufftPlanMany(&gc->plan, 1, (int*)&(set->fft_size), NULL, 0, 0, // NULL, 2*transform_size,1, CUFFT_R2C, nchan); int status=cufftPlan1d(&gc->plan, set->fft_size, CUFFT_R2C, nchan); if (status!=CUFFT_SUCCESS) { printf ("Plan failed:"); if (status==CUFFT_ALLOC_FAILED) printf("CUFFT_ALLOC_FAILED"); if (status==CUFFT_INVALID_VALUE) printf ("CUFFT_INVALID_VALUE"); if (status==CUFFT_INTERNAL_ERROR) printf ("CUFFT_INTERNAL_ERROR"); if (status==CUFFT_SETUP_FAILED) printf ("CUFFT_SETUP_FAILED"); if (status==CUFFT_INVALID_SIZE) printf ("CUFFT_INVALID_SIZE"); printf("\n"); exit(1); } status=cufftPlan1d(&gc->iplan, set->fft_size, CUFFT_C2R, 1); // inverse transform always for one channel only if (status!=CUFFT_SUCCESS) { printf ("Plan failed:"); if (status==CUFFT_ALLOC_FAILED) printf("CUFFT_ALLOC_FAILED"); if (status==CUFFT_INVALID_VALUE) printf ("CUFFT_INVALID_VALUE"); if (status==CUFFT_INTERNAL_ERROR) printf ("CUFFT_INTERNAL_ERROR"); if (status==CUFFT_SETUP_FAILED) printf ("CUFFT_SETUP_FAILED"); if (status==CUFFT_INVALID_SIZE) printf ("CUFFT_INVALID_SIZE"); printf("\n"); exit(1); } printf ("Setting up CUDA streams & events\n"); gc->nstreams = set->cuda_streams; gc->threads=set->cuda_threads; if (gc->nstreams<1) { printf ("Cannot really work with less than one stream.\n"); exit(1); } gc->streams=(cudaStream_t*)malloc(gc->nstreams*sizeof(cudaStream_t)); gc->eStart=(cudaEvent_t*)malloc(gc->nstreams*sizeof(cudaEvent_t)); gc->eDoneCopy=(cudaEvent_t*)malloc(gc->nstreams*sizeof(cudaEvent_t)); gc->eDoneFloatize=(cudaEvent_t*)malloc(gc->nstreams*sizeof(cudaEvent_t)); gc->eDoneFFT=(cudaEvent_t*)malloc(gc->nstreams*sizeof(cudaEvent_t)); gc->eDonePost=(cudaEvent_t*)malloc(gc->nstreams*sizeof(cudaEvent_t)); gc->eDoneCalib=(cudaEvent_t*)malloc(gc->nstreams*sizeof(cudaEvent_t)); gc->eDoneStream=(cudaEvent_t*)malloc(gc->nstreams*sizeof(cudaEvent_t)); for (int i=0;i<gc->nstreams;i++) { //create stream CHK(cudaStreamCreate(&gc->streams[i])); //create events for stream CHK(cudaEventCreate(&gc->eStart[i])); CHK(cudaEventCreate(&gc->eDoneCopy[i])); CHK(cudaEventCreate(&gc->eDoneFloatize[i])); CHK(cudaEventCreate(&gc->eDoneFFT[i])); CHK(cudaEventCreate(&gc->eDonePost[i])); CHK(cudaEventCreate(&gc->eDoneCalib[i])); CHK(cudaEventCreate(&gc->eDoneStream[i])); } gc->fstream = 0; //oldest running stream gc->bstream = -1; //newest stream (will become 0 when we actually start with first real stream) gc->active_streams = 0; //number of streams currently running printf ("GPU ready.\n"); } //Convert bytes to floats, 1 channel version //Inputs: // sample: array of bytes // fsample: array of floats to put output in __global__ void floatize_1chan(int8_t* sample, cufftReal* fsample) { int i = FLOATIZE_X*(blockDim.x * blockIdx.x + threadIdx.x); for (int j=0; j<FLOATIZE_X; j++) fsample[i+j]=float(sample[i+j]); } //Convert bytes to floats, 2 channel version //Inputs: // sample: array of bytes with the 2 channels interleaved // fsample1: array of floats to put converted bytes from channel 1 in // fsample2: array of floats to put converted bytes from channel 2 in __global__ void floatize_2chan(int8_t* sample, cufftReal* fsample1, cufftReal* fsample2) { int i = FLOATIZE_X*(blockDim.x * blockIdx.x + threadIdx.x); for (int j=0; j<FLOATIZE_X/2; j++) { fsample1[i/2+j]=float(sample[i+2*j]); fsample2[i/2+j]=float(sample[i+2*j+1]); } } //Print the elapsed time between 2 cuda events void printDt (cudaEvent_t cstart, cudaEvent_t cstop, float * total, TWRITER * t) { float gpu_time; CHK(cudaEventElapsedTime(&gpu_time, cstart, cstop)); tprintfn (t, 0, " %3.2fms ", gpu_time); *total +=gpu_time; } void printTiming(GPUCARD *gc, int i, TWRITER * t) { float totalTime = 0; tprintfn (t, 0, "GPU timing (copy/floatize/fft/post/calib): "); printDt (gc->eStart[i], gc->eDoneCopy[i], &totalTime, t); totalTime=0; printDt (gc->eDoneCopy[i], gc->eDoneFloatize[i], &totalTime, t); printDt (gc->eDoneFloatize[i], gc->eDoneFFT[i], &totalTime, t); printDt (gc->eDoneFFT[i], gc->eDonePost[i], &totalTime, t); printDt (gc->eDonePost[i], gc->eDoneCalib[i], &totalTime, t); tprintfn (t,1,""); tprintfn (t, 1, "GPU timing cumpute total: %3.2f ", totalTime); } void printLiveStat(SETTINGS *set, GPUCARD *gc, int8_t **buf, TWRITER *twr) { int nCards=(set->card_mask==3) + 1; if (set->print_meanvar) { // now find some statistic over subsamples of samples uint32_t bs=gc->bufsize; uint32_t step=gc->bufsize/(32768); float NSub=bs/step; // number of subsamples to take float m1=0.,m2=0.,v1=0.,v2=0.; float m3=0.,m4=0.,v3=0.,v4=0.; for (int i=0; i<bs; i+=step) { // take them in steps of step float n=buf[0][i]; m1+=n; v1+=n*n; n=buf[0][i+1]; m2+=n; v2+=n*n; if (nCards==2) { n=buf[1][i]; m3+=n; v3+=n*n; n=buf[1][i+1]; m4+=n; v4+=n*n; } } m1/=NSub; v1=sqrt(v1/NSub-m1*m1); //mean and variance m2/=NSub; v2=sqrt(v2/NSub-m2*m2); tprintfn (twr,1,"CH1 mean/rms: %f %f CH2 mean/rms: %f %f ",m1,v1,m2,v2); if (nCards==2) { m3/=NSub; v3=sqrt(v3/NSub-m3*m3); //mean and variance m4/=NSub; v4=sqrt(v4/NSub-m4*m4); tprintfn (twr,1,"CH3 mean/rms: %f %f CH4 mean/rms: %f %f ",m3,v3,m4,v4); } } if (set->check_CH2) { // heuristic to see if CH2 is OK. float mean_card1=0; float mean_card2=0; int count=0; float numin=set->nu_min[0]; float nustep=(set->nu_max[0]-set->nu_min[0])/(gc->pssize1[0]); int ofs2=gc->pssize1[0]; int ofs4=3*gc->pssize1[0]; for (int j=0; j<gc->pssize1[0];j++) { // check just cut 0 float f=numin+nustep*j; if ((f>1560e6-1100e6) && (f<1640e6-1100e6)) { count++; mean_card1+=gc->outps[ofs2+j]; if (nCards==2) mean_card2+=gc->outps[ofs4+j]; } } if (count>0) { mean_card1/=(count*1e11)*(set->fft_avg[0]/8192); int ok=0; if (nCards==1) { ok=mean_card1<1; tprintfn (twr,0,"CH2 check : %f : ",mean_card1); } else { mean_card2/=(count*1e11)*(set->fft_avg[0]/8192);; ok=((mean_card1<1) && (mean_card2<1)); tprintfn (twr,0,"CH2 check : %f / %f : ",mean_card1, mean_card2); } if (ok) tprintfn(twr,1, " OK "); else tprintfn(twr,1, " NOT OK !!!"); } } if (set->print_maxp) { // find max power in each cutout in each channel. int of1=0; // CH1 auto for (int i=0; i<gc->ncuts; i++) { int of2=of1+gc->pssize1[i]; //CH2 auto int of3=of1+2*gc->pssize1[i]; // CH3 auto int of4=of1+3*gc->pssize1[i]; // CH4 auto float ch1p=0, ch2p=0, ch3p=0, ch4p=0; int ch1i=0, ch2i=0, ch3i=0, ch4i=0; for (int j=0; j<gc->pssize1[i];j++) { if (gc->outps[of1+j] > ch1p) {ch1p=gc->outps[of1+j]; ch1i=j;} if (gc->outps[of2+j] > ch2p) {ch2p=gc->outps[of2+j]; ch2i=j;} if (nCards==2) { if (gc->outps[of3+j] > ch3p) {ch3p=gc->outps[of3+j]; ch3i=j;} if (gc->outps[of4+j] > ch4p) {ch4p=gc->outps[of4+j]; ch4i=j;} } } of1+=gc->pssize[i]; // next cutout float numin=set->nu_min[i]; float nustep=(set->nu_max[i]-set->nu_min[i])/(gc->pssize1[i]); float ch1f=(numin+nustep*(0.5+ch1i))/1e6; float ch2f=(numin+nustep*(0.5+ch2i))/1e6; tprintfn (twr,1,"Peak pow (cutout %i): CH1 %f at %f MHz; CH2 %f at %f MHz ", i,log(ch1p),ch1f,log(ch2p),ch2f); if (nCards==2) { float ch3f=(numin+nustep*(0.5+ch3i))/1e6; float ch4f=(numin+nustep*(0.5+ch4i))/1e6; tprintfn (twr,1,"Peak pow (cutout %i): CH3 %f at %f MHz; CH4 %f at %f MHz ", i,log(ch3p),ch3f,log(ch4p),ch4f); } } } if (set->measure_delay || gc->calibrating) { float delayms=float(gc->last_measured_delay)*1.0/(set->sample_rate)*1e3; tprintfn (twr,1, "Calibrating: %i/%i Last measured delay: %i samples = %f ms. ", gc->ndelays, NUM_DELAYS, gc->last_measured_delay, delayms); } else { if (gc->calibrated) tprintfn (twr,1, "DCal: OK: %i/%i Val %f +- %fms Applied delay: %iB+%iS %iB+%iS", gc->calibok, NUM_DELAYS, gc->calibmean_ms, gc->calibrms_ms, set->bufdelay[0], set->delay[0], set->bufdelay[1], set->delay[1]); else tprintfn (twr,1, "DCal: Failed: %i/%i Applied delay: %iB+%iS %iB+%iS ", gc->calibok, NUM_DELAYS, set->bufdelay[0], set->delay[0], set->bufdelay[1], set->delay[1]); } } //process calibration data and stops calibrationc process void processCalibration(GPUCARD *gc, SETTINGS *set) { gc->calibrating=false; long int mean=0; long int var=0; int numok=0; const int OK=1500000; // 1.4 ms for (int i=0; i<NUM_DELAYS; i++) { if (abs(gc->delays[i])<OK) { mean+=gc->delays[i]; var+=gc->delays[i]*gc->delays[i]; numok++; } } gc->calibok=numok; if (numok>NUM_DELAYS/2) { gc->calibrated=true; mean/=numok; var/=numok; gc->calibmean=mean; if (gc->calibmean>0) { set->delay[0]+=gc->calibmean; } else if (gc->calibmean<0) { set->delay[1]+= (-gc->calibmean); } int mindel=std::min(set->delay[0],set->delay[1]); set->delay[0]-=mindel; set->delay[1]-=mindel; gc->calibrms = int(sqrt(var-mean*mean)); gc->calibmean_ms= gc->calibmean*1.0/(set->sample_rate)*1e3; gc->calibrms_ms= gc->calibrms*1.0/(set->sample_rate)*1e3; } else gc->calibrated=false; } void startCalib(GPUCARD *gc) { gc->calibrating=true; gc->ndelays=0; } //Process one data packet from the digitizer //Input: // gc: graphics card // buf: data from digitizer // pbuf: old data from digitizer (to implement delay) // wr: writer to write out power spectra and outliers to files // set: settings int gpuProcessBuffer(GPUCARD *gc, int8_t **buf_one, int8_t **buf_two, WRITER *wr, TWRITER *twr, SETTINGS *set) { //streamed version //Check if other streams are finished and proccess the finished ones in order (i.e. print output to file) CHK(cudaGetLastError()); int nCards=(set->card_mask==3) + 1; int8_t* buf[2]; int8_t* pbuf[2]; buf[0]=buf_one[set->bufdelay[0]]; pbuf[0]=buf_one[set->bufdelay[0]+1]; buf[1]=buf_two[set->bufdelay[1]]; pbuf[1]=buf_two[set->bufdelay[1]+1]; while(gc->active_streams > 0){ // printf ("S:%i ", cudaEventQuery(gc->eStart[gc->fstream])==cudaSuccess); // printf ("%i ", cudaEventQuery(gc->eDoneCopy[gc->fstream])==cudaSuccess); // printf ("%i ", cudaEventQuery(gc->eDoneFloatize[gc->fstream])==cudaSuccess); // printf ("%i ", cudaEventQuery(gc->eDoneFFT[gc->fstream])==cudaSuccess); // printf ("%i [%i]\n ", cudaEventQuery(gc->eDonePost[gc->fstream])==cudaSuccess, gc->fstream); if(cudaEventQuery(gc->eDoneStream[gc->fstream])==cudaSuccess){ int fstream=gc->fstream; if (!gc->calib[fstream]) { cudaMemcpy(gc->outps,gc->coutps[fstream], gc->tot_pssize*sizeof(float), cudaMemcpyDeviceToHost); } else { cudaMemcpy(&gc->last_measured_delay,&(gc->cmeasured_delay[fstream]), sizeof(int), cudaMemcpyDeviceToHost); gc->delays[gc->ndelays]=gc->last_measured_delay; gc->ndelays++; if (gc->ndelays==NUM_DELAYS) processCalibration(gc,set); } if (gc->active_streams==1) { printTiming(gc,fstream,twr); printLiveStat(set,gc,buf,twr); writerAccumulatePS(wr,gc->outps, twr,set); } else writerAccumulatePS(wr,gc->outps, NULL,set); // accumulate, but without talking gc->fstream = (++gc->fstream)%(gc->nstreams); gc->active_streams--; } else break; } if(gc->active_streams == gc->nstreams){ //if no empty streams return false; } gc->active_streams++; int csi = gc->bstream = (++gc->bstream)%(gc->nstreams); //add new stream cudaStream_t cs= gc->streams[gc->bstream]; cudaEventRecord(gc->eStart[csi], cs); //memory copy for(int i=0; i<nCards; i++) { if (set->delay[i]==0) cudaMemcpyAsync(gc->cbuf[csi][i], buf[i], gc->bufsize , cudaMemcpyHostToDevice,cs); else { if (set->delay[i]>gc->fftsize) {printf ("Pathological delay.\n"); exit(1);} unsigned ofs=set->delay[i]*gc->nchan; cudaMemcpyAsync(&gc->cbuf[csi][i][ofs], buf[i], gc->bufsize-ofs , cudaMemcpyHostToDevice,cs); if (pbuf[i]) cudaMemcpyAsync(gc->cbuf[csi][i], &pbuf[i][gc->bufsize-ofs], ofs , cudaMemcpyHostToDevice,cs); } } //floatize cudaEventRecord(gc->eDoneCopy[csi], cs); int threadsPerBlock = gc->threads; int blocksPerGrid = gc->bufsize / threadsPerBlock/FLOATIZE_X; if (gc->nchan==1) floatize_1chan<<<blocksPerGrid, threadsPerBlock, 0, cs>>>(gc->cbuf[csi][0],gc->cfbuf[csi]); else for(int i=0; i<nCards; i++) floatize_2chan<<<blocksPerGrid, threadsPerBlock, 0, cs>>> (gc->cbuf[csi][i],&(gc->cfbuf[csi][gc->fftsize*2*i]),&(gc->cfbuf[csi][gc->fftsize*(2*i+1)])); cudaEventRecord(gc->eDoneFloatize[csi], cs); //perform fft int status = cufftSetStream(gc->plan, cs); if(status !=CUFFT_SUCCESS) { printf("CUFFSTETSTREAM failed\n"); exit(1); } for(int i=0; i<nCards;i++){ status=cufftExecR2C(gc->plan, &(gc->cfbuf[csi][gc->bufsize*i]), &(gc->cfft[csi][2*i*gc->transform_size])); if (status!=CUFFT_SUCCESS) { printf("CUFFT FAILED\n"); exit(1); } } cudaEventRecord(gc->eDoneFFT[csi], cs); if (!set->measure_delay & !gc->calibrating) { gc->calib[csi]=false; //compute spectra if (gc->nchan==1) { int psofs=0; for (int i=0; i<gc->ncuts; i++) { ps_reduce<<<gc->pssize[i], 1024, 0, cs>>> (gc->cfft[csi], &(gc->coutps[csi][psofs]), gc->ndxofs[i], gc->fftavg[i]); psofs+=gc->pssize[i]; } } else if(gc->nchan==2){ // note we need to take into account the tricky N/2+1 FFT size while we do N/2 binning // pssize+2 = transformsize+1 int psofs=0; for (int i=0; i<gc->ncuts; i++) { for(int j=0; j<nCards; j++){ ps_reduce<<<gc->pssize1[i], 1024, 0, cs>>> (&gc->cfft[csi][2*j*gc->transform_size], &(gc->coutps[csi][psofs]), gc->ndxofs[i], gc->fftavg[i]); psofs+=gc->pssize1[i]; ps_reduce<<<gc->pssize1[i], 1024, 0, cs>>> (&gc->cfft[csi][(2*j+1)*gc->transform_size], &(gc->coutps[csi][psofs]), gc->ndxofs[i], gc->fftavg[i]); psofs+=gc->pssize1[i]; } //cross spectra for(int j = 0; j<nCards*2; j++) for(int k = j+1; k < nCards*2 ; k++){ //NEED TO CHECK THAT PARAMETERS ARE ALL CORRECT FOR TWO CARDS AND FOR ONE CARD.... ps_X_reduce<<<gc->pssize1[i], 1024, 0, cs>>> (&gc->cfft[csi][j*gc->transform_size], &gc->cfft[csi][k*gc->transform_size], &(gc->coutps[csi][psofs]), &(gc->coutps[csi][psofs+gc->pssize1[i]]), gc->ndxofs[i], gc->fftavg[i]); psofs+=2*gc->pssize1[i]; } } } else{ printf("Can only handle 1 or 2 channels\n"); exit(1); } cudaEventRecord(gc->eDonePost[csi], cs); cudaEventRecord(gc->eDoneCalib[csi], cs); } if (set->measure_delay || gc->calibrating) { cudaEventRecord(gc->eDonePost[csi], cs); gc->calib[csi]=true; int blocksPerGrid = gc->transform_size / threadsPerBlock; C12_Cross <<<blocksPerGrid, threadsPerBlock, 0, cs >>> (&(gc->cfft[csi][0]), &(gc->cfft[csi][gc->transform_size]), &(gc->cfft[csi][2*gc->transform_size]), &(gc->cfft[csi][3*gc->transform_size])); int status = cufftSetStream(gc->iplan, cs); if(status !=CUFFT_SUCCESS) { printf("CUFFTSETSTREAM failed\n"); exit(1); } status=cufftExecC2R(gc->iplan, &(gc->cfft[csi][0]), &(gc->cfbuf[csi][0]) ); if (status!=CUFFT_SUCCESS) { printf("CUFFT FAILED\n"); exit(1); } blocksPerGrid = threadsPerBlock; int mult=gc->fftsize/blocksPerGrid/threadsPerBlock; C12_FindMax_Part1<<<blocksPerGrid,threadsPerBlock,0,cs>>> (&(gc->cfbuf[csi][0]), mult,&(gc->cfbuf[csi][gc->fftsize]),(int*)gc->cbuf[csi][0]); C12_FindMax_Part2<<<1,1,0,cs>>>(blocksPerGrid, gc->fftsize, &(gc->cfbuf[csi][gc->fftsize]),(int*)gc->cbuf[csi][0], &gc->cmeasured_delay[csi]); cudaEventRecord(gc->eDoneCalib[csi], cs); } // this is outside so that event gets processed. cudaEventRecord(gc->eDoneStream[csi], cs); return true; }
fdc416cb5f111d0c246afca9f35d6d2c8a05afa0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/ /*(c) 2016 Brian Tarasinski*/ /*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/ //kernel to transform to pauli basis (up, x, y, down) //to be run on a complete complex density matrix, once for each bit //this operation is its own inverse (can also be used in opposite direction) __global__ void dm_reduce(double *dm, unsigned int bit, double *dm0, unsigned int state, unsigned int no_qubits) { const int addr = blockIdx.x*blockDim.x + threadIdx.x; if(addr >= (1<< (2*no_qubits))) return; const int low_mask = (1 << (2*bit))-1; //0000011111 const int high_mask = (~low_mask) << 2; //1110000000 if(((addr >> (2*bit)) & 0x3) == state*0x3) { dm0[ (addr & low_mask) | ((addr & high_mask) >> 2) ] = dm[addr]; } }
fdc416cb5f111d0c246afca9f35d6d2c8a05afa0.cu
#include "includes.h" /*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/ /*(c) 2016 Brian Tarasinski*/ /*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/ //kernel to transform to pauli basis (up, x, y, down) //to be run on a complete complex density matrix, once for each bit //this operation is its own inverse (can also be used in opposite direction) __global__ void dm_reduce(double *dm, unsigned int bit, double *dm0, unsigned int state, unsigned int no_qubits) { const int addr = blockIdx.x*blockDim.x + threadIdx.x; if(addr >= (1<< (2*no_qubits))) return; const int low_mask = (1 << (2*bit))-1; //0000011111 const int high_mask = (~low_mask) << 2; //1110000000 if(((addr >> (2*bit)) & 0x3) == state*0x3) { dm0[ (addr & low_mask) | ((addr & high_mask) >> 2) ] = dm[addr]; } }
4ab35b00fdf48ef344436c9030bae06a4747813f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file 3D Optical flow using NVIDIA CUDA * @author Institute for Photon Science and Synchrotron Radiation, Karlsruhe Institute of Technology * * @date 2015-2018 * @version 0.5.0 * * * @section LICENSE * * This program is copyrighted by the author and Institute for Photon Science and Synchrotron Radiation, * Karlsruhe Institute of Technology, Karlsruhe, Germany; * * The current implemetation contains the following licenses: * * 1. TinyXml package: * Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com). <www.sourceforge.net/projects/tinyxml>. * See src/utils/tinyxml.h for details. * */ /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <assert.h> //#include <helper_cuda.h> #include <device_launch_parameters.h> #include <iostream> #include <cstdio> #define __HIPCC__ #include <hip/device_functions.h> #include <math_functions.h> #include "src/data_types/data_structs.h" using namespace std; #define MAX_KERNEL_LENGTH 51 __constant__ DataSize4 container_size; //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ float c_Kernel[MAX_KERNEL_LENGTH]; //extern "C" void setConvolutionKernel(float *h_Kernel, unsigned int kernel_length) //{ // hipMemcpyToSymbol(c_Kernel, h_Kernel, kernel_length * sizeof(float)); //} //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_BLOCKDIM_Z 4 #define ROWS_RESULT_STEPS 4 #define ROWS_HALO_STEPS 1 extern "C" __global__ void convolutionRowsKernel( float *d_Dst, const float *d_Src, int imageW, int imageH, int imageD, int pitch, int kernel_radius ) { /* dim3 global_id(blockDim.x * blockIdx.x * ROWS_RESULT_STEPS + threadIdx.x, blockDim.y * blockIdx.y + threadIdx.y); if (global_id.x > imageW) return;*/ // Handle to thread block group __shared__ float s_Data[ROWS_BLOCKDIM_Z][ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * ROWS_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ*imageH + baseY) * pitch + baseX; d_Dst += (baseZ*imageH + baseY) * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { // Original NVIDIA version //s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; // Modified version to allow for arbitrary image width size_t global_x = blockIdx.x * ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X + ROWS_BLOCKDIM_X*(i-ROWS_HALO_STEPS) + threadIdx.x; s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (global_x < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; // Testing /* size_t global_x = blockIdx.x * ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X + ROWS_BLOCKDIM_X*(i-5) + threadIdx.x; s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (global_x < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; if (global_x > imageW) std::printf("Bx:%u tx:%u iter:%d global.x: %lu data:%f \n", blockIdx.x, threadIdx.x, i, static_cast<unsigned long>(global_x), s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X]); */ } //Compute and store results // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // // -------------------------------------------------------- // __syncthreads(); // -------------------------------------------------------- // // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { size_t global_x = blockIdx.x * ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X + ROWS_BLOCKDIM_X*(i-ROWS_HALO_STEPS) + threadIdx.x; if (global_x >= imageW) return; float sum = 0; #pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += c_Kernel[kernel_radius - j] * s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 4 #define COLUMNS_BLOCKDIM_Y 16 #define COLUMNS_BLOCKDIM_Z 4 #define COLUMNS_RESULT_STEPS 4 #define COLUMNS_HALO_STEPS 1 extern "C" __global__ void convolutionColumnsKernel( float *d_Dst, const float *d_Src, int imageW, int imageH, int imageD, int pitch, int kernel_radius ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y][COLUMNS_BLOCKDIM_Z]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * COLUMNS_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ*imageH + baseY) * pitch + baseX; d_Dst += (baseZ*imageH + baseY) * pitch + baseX; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { // Original NVIDIA version //s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; // Modified version to allow for arbitrary image height size_t global_y = blockIdx.y * COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y + COLUMNS_BLOCKDIM_Y*(i-COLUMNS_HALO_STEPS) + threadIdx.y; s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y][threadIdx.z]= (global_y < imageH) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y][threadIdx.z] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y][threadIdx.z] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // // -------------------------------------------------------- // __syncthreads(); // -------------------------------------------------------- // // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { size_t global_y = blockIdx.y * COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y + COLUMNS_BLOCKDIM_Y*(i-COLUMNS_HALO_STEPS) + threadIdx.y; //std::printf("By:%u ty:%u iter:%d global.y: %lu\n", blockIdx.y, threadIdx.y, i, static_cast<unsigned long>(global_y)); if (global_y >= imageH) return; float sum = 0; #pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += c_Kernel[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j][threadIdx.z]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; //std::printf("out: %d \n", i * COLUMNS_BLOCKDIM_Y * pitch); } } //////////////////////////////////////////////////////////////////////////////// // Slices convolution filter //////////////////////////////////////////////////////////////////////////////// #define SLICES_BLOCKDIM_X 4 #define SLICES_BLOCKDIM_Y 4 #define SLICES_BLOCKDIM_Z 16 #define SLICES_RESULT_STEPS 4 #define SLICES_HALO_STEPS 1 extern "C" __global__ void convolutionSlicesKernel( float *d_Dst, const float *d_Src, int imageW, int imageH, int imageD, int pitch, int kernel_radius ) { __shared__ float s_Data[SLICES_BLOCKDIM_X][SLICES_BLOCKDIM_Y][(SLICES_RESULT_STEPS + 2 * SLICES_HALO_STEPS) * SLICES_BLOCKDIM_Z]; //Offset to the upper halo edge const int baseX = blockIdx.x * SLICES_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * SLICES_BLOCKDIM_Y + threadIdx.y; const int baseZ = (blockIdx.z * SLICES_RESULT_STEPS - SLICES_HALO_STEPS) * SLICES_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ*imageH + baseY) * pitch + baseX; d_Dst += (baseZ*imageH + baseY) * pitch + baseX; //Main data #pragma unroll for (int i = SLICES_HALO_STEPS; i < SLICES_HALO_STEPS + SLICES_RESULT_STEPS; i++) { // Original NVIDIA version //s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; // Modified version to allow for arbitrary image height size_t global_z = blockIdx.z * SLICES_RESULT_STEPS * SLICES_BLOCKDIM_Z + SLICES_BLOCKDIM_Z*(i-SLICES_HALO_STEPS) + threadIdx.z; s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * SLICES_BLOCKDIM_Z]= (global_z < imageD) ? d_Src[i * SLICES_BLOCKDIM_Z * pitch*imageH] : 0; } //Front halo #pragma unroll for (int i = 0; i < SLICES_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * SLICES_BLOCKDIM_Z] = (baseZ >= -i * SLICES_BLOCKDIM_Z) ? d_Src[i * SLICES_BLOCKDIM_Z * pitch*imageH] : 0; } //Back halo #pragma unroll for (int i = SLICES_HALO_STEPS + SLICES_RESULT_STEPS; i < SLICES_HALO_STEPS + SLICES_RESULT_STEPS + SLICES_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * SLICES_BLOCKDIM_Z] = (imageD - baseZ > i * SLICES_BLOCKDIM_Z) ? d_Src[i * SLICES_BLOCKDIM_Z * pitch*imageH] : 0; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // // -------------------------------------------------------- // __syncthreads(); // -------------------------------------------------------- // // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // #pragma unroll for (int i = SLICES_HALO_STEPS; i < SLICES_HALO_STEPS + SLICES_RESULT_STEPS; i++) { size_t global_z = blockIdx.z * SLICES_RESULT_STEPS * SLICES_BLOCKDIM_Z + SLICES_BLOCKDIM_Z*(i-SLICES_HALO_STEPS) + threadIdx.z; //std::printf("By:%u ty:%u iter:%d global.y: %lu\n", blockIdx.y, threadIdx.y, i, static_cast<unsigned long>(global_y)); if (global_z >= imageD) return; float sum = 0; #pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += c_Kernel[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * SLICES_BLOCKDIM_Z + j]; } d_Dst[i * SLICES_BLOCKDIM_Z * pitch*imageH] = sum; //std::printf("out: %d \n", i * COLUMNS_BLOCKDIM_Y * pitch); } }
4ab35b00fdf48ef344436c9030bae06a4747813f.cu
/** * @file 3D Optical flow using NVIDIA CUDA * @author Institute for Photon Science and Synchrotron Radiation, Karlsruhe Institute of Technology * * @date 2015-2018 * @version 0.5.0 * * * @section LICENSE * * This program is copyrighted by the author and Institute for Photon Science and Synchrotron Radiation, * Karlsruhe Institute of Technology, Karlsruhe, Germany; * * The current implemetation contains the following licenses: * * 1. TinyXml package: * Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com). <www.sourceforge.net/projects/tinyxml>. * See src/utils/tinyxml.h for details. * */ /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <assert.h> //#include <helper_cuda.h> #include <device_launch_parameters.h> #include <iostream> #include <cstdio> #define __CUDACC__ #include <device_functions.h> #include <math_functions.h> #include "src/data_types/data_structs.h" using namespace std; #define MAX_KERNEL_LENGTH 51 __constant__ DataSize4 container_size; //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ float c_Kernel[MAX_KERNEL_LENGTH]; //extern "C" void setConvolutionKernel(float *h_Kernel, unsigned int kernel_length) //{ // cudaMemcpyToSymbol(c_Kernel, h_Kernel, kernel_length * sizeof(float)); //} //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_BLOCKDIM_Z 4 #define ROWS_RESULT_STEPS 4 #define ROWS_HALO_STEPS 1 extern "C" __global__ void convolutionRowsKernel( float *d_Dst, const float *d_Src, int imageW, int imageH, int imageD, int pitch, int kernel_radius ) { /* dim3 global_id(blockDim.x * blockIdx.x * ROWS_RESULT_STEPS + threadIdx.x, blockDim.y * blockIdx.y + threadIdx.y); if (global_id.x > imageW) return;*/ // Handle to thread block group __shared__ float s_Data[ROWS_BLOCKDIM_Z][ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * ROWS_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ*imageH + baseY) * pitch + baseX; d_Dst += (baseZ*imageH + baseY) * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { // Original NVIDIA version //s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; // Modified version to allow for arbitrary image width size_t global_x = blockIdx.x * ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X + ROWS_BLOCKDIM_X*(i-ROWS_HALO_STEPS) + threadIdx.x; s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (global_x < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; // Testing /* size_t global_x = blockIdx.x * ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X + ROWS_BLOCKDIM_X*(i-5) + threadIdx.x; s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (global_x < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; if (global_x > imageW) std::printf("Bx:%u tx:%u iter:%d global.x: %lu data:%f \n", blockIdx.x, threadIdx.x, i, static_cast<unsigned long>(global_x), s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X]); */ } //Compute and store results // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // // -------------------------------------------------------- // __syncthreads(); // -------------------------------------------------------- // // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { size_t global_x = blockIdx.x * ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X + ROWS_BLOCKDIM_X*(i-ROWS_HALO_STEPS) + threadIdx.x; if (global_x >= imageW) return; float sum = 0; #pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += c_Kernel[kernel_radius - j] * s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 4 #define COLUMNS_BLOCKDIM_Y 16 #define COLUMNS_BLOCKDIM_Z 4 #define COLUMNS_RESULT_STEPS 4 #define COLUMNS_HALO_STEPS 1 extern "C" __global__ void convolutionColumnsKernel( float *d_Dst, const float *d_Src, int imageW, int imageH, int imageD, int pitch, int kernel_radius ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y][COLUMNS_BLOCKDIM_Z]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * COLUMNS_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ*imageH + baseY) * pitch + baseX; d_Dst += (baseZ*imageH + baseY) * pitch + baseX; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { // Original NVIDIA version //s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; // Modified version to allow for arbitrary image height size_t global_y = blockIdx.y * COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y + COLUMNS_BLOCKDIM_Y*(i-COLUMNS_HALO_STEPS) + threadIdx.y; s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y][threadIdx.z]= (global_y < imageH) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y][threadIdx.z] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y][threadIdx.z] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // // -------------------------------------------------------- // __syncthreads(); // -------------------------------------------------------- // // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { size_t global_y = blockIdx.y * COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y + COLUMNS_BLOCKDIM_Y*(i-COLUMNS_HALO_STEPS) + threadIdx.y; //std::printf("By:%u ty:%u iter:%d global.y: %lu\n", blockIdx.y, threadIdx.y, i, static_cast<unsigned long>(global_y)); if (global_y >= imageH) return; float sum = 0; #pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += c_Kernel[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j][threadIdx.z]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; //std::printf("out: %d \n", i * COLUMNS_BLOCKDIM_Y * pitch); } } //////////////////////////////////////////////////////////////////////////////// // Slices convolution filter //////////////////////////////////////////////////////////////////////////////// #define SLICES_BLOCKDIM_X 4 #define SLICES_BLOCKDIM_Y 4 #define SLICES_BLOCKDIM_Z 16 #define SLICES_RESULT_STEPS 4 #define SLICES_HALO_STEPS 1 extern "C" __global__ void convolutionSlicesKernel( float *d_Dst, const float *d_Src, int imageW, int imageH, int imageD, int pitch, int kernel_radius ) { __shared__ float s_Data[SLICES_BLOCKDIM_X][SLICES_BLOCKDIM_Y][(SLICES_RESULT_STEPS + 2 * SLICES_HALO_STEPS) * SLICES_BLOCKDIM_Z]; //Offset to the upper halo edge const int baseX = blockIdx.x * SLICES_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * SLICES_BLOCKDIM_Y + threadIdx.y; const int baseZ = (blockIdx.z * SLICES_RESULT_STEPS - SLICES_HALO_STEPS) * SLICES_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ*imageH + baseY) * pitch + baseX; d_Dst += (baseZ*imageH + baseY) * pitch + baseX; //Main data #pragma unroll for (int i = SLICES_HALO_STEPS; i < SLICES_HALO_STEPS + SLICES_RESULT_STEPS; i++) { // Original NVIDIA version //s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; // Modified version to allow for arbitrary image height size_t global_z = blockIdx.z * SLICES_RESULT_STEPS * SLICES_BLOCKDIM_Z + SLICES_BLOCKDIM_Z*(i-SLICES_HALO_STEPS) + threadIdx.z; s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * SLICES_BLOCKDIM_Z]= (global_z < imageD) ? d_Src[i * SLICES_BLOCKDIM_Z * pitch*imageH] : 0; } //Front halo #pragma unroll for (int i = 0; i < SLICES_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * SLICES_BLOCKDIM_Z] = (baseZ >= -i * SLICES_BLOCKDIM_Z) ? d_Src[i * SLICES_BLOCKDIM_Z * pitch*imageH] : 0; } //Back halo #pragma unroll for (int i = SLICES_HALO_STEPS + SLICES_RESULT_STEPS; i < SLICES_HALO_STEPS + SLICES_RESULT_STEPS + SLICES_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * SLICES_BLOCKDIM_Z] = (imageD - baseZ > i * SLICES_BLOCKDIM_Z) ? d_Src[i * SLICES_BLOCKDIM_Z * pitch*imageH] : 0; } // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // // -------------------------------------------------------- // __syncthreads(); // -------------------------------------------------------- // // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // #pragma unroll for (int i = SLICES_HALO_STEPS; i < SLICES_HALO_STEPS + SLICES_RESULT_STEPS; i++) { size_t global_z = blockIdx.z * SLICES_RESULT_STEPS * SLICES_BLOCKDIM_Z + SLICES_BLOCKDIM_Z*(i-SLICES_HALO_STEPS) + threadIdx.z; //std::printf("By:%u ty:%u iter:%d global.y: %lu\n", blockIdx.y, threadIdx.y, i, static_cast<unsigned long>(global_y)); if (global_z >= imageD) return; float sum = 0; #pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += c_Kernel[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * SLICES_BLOCKDIM_Z + j]; } d_Dst[i * SLICES_BLOCKDIM_Z * pitch*imageH] = sum; //std::printf("out: %d \n", i * COLUMNS_BLOCKDIM_Y * pitch); } }
e2baca6a839454a95c06bf6bbab51799df02f7dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void cube(float * d_out, float * d_in) { int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f*f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory hipMalloc((void**) &d_in, ARRAY_BYTES); hipMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); // launch the kernel hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in); // copy back the result array to the CPU hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } hipFree(d_in); hipFree(d_out); return 0; }
e2baca6a839454a95c06bf6bbab51799df02f7dd.cu
#include <stdio.h> __global__ void cube(float * d_out, float * d_in) { int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f*f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel cube<<<1, ARRAY_SIZE>>>(d_out, d_in); // copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
49abe97ff3c2d6953b73bacb987ce13d70032b2e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <limits> #include <rmm/thrust_rmm_allocator.h> #include <graph.hpp> #include <thrust/for_each.h> #include <thrust/random.h> #include <utilities/error.hpp> #include <raft/lap/lap.cuh> //#define TIMING #ifdef TIMING #include <utilities/high_res_timer.hpp> #endif namespace cugraph { namespace detail { template <typename index_t, typename weight_t> weight_t hungarian(raft::handle_t const &handle, index_t num_rows, index_t num_cols, weight_t const *d_original_cost, index_t *d_assignment, hipStream_t stream) { // // TODO: Can Date/Nagi implementation in raft handle rectangular matrices? // CUGRAPH_EXPECTS(num_rows == num_cols, "Current implementation only supports square matrices"); rmm::device_vector<index_t> col_assignments_v(num_rows); // Create an instance of LinearAssignmentProblem using problem size, number of subproblems raft::lap::LinearAssignmentProblem<index_t, weight_t> lpx(handle, num_rows, 1); // Solve LAP(s) for given cost matrix lpx.solve(d_original_cost, d_assignment, col_assignments_v.data().get()); return lpx.getPrimalObjectiveValue(0); } template <typename vertex_t, typename edge_t, typename weight_t> weight_t hungarian_sparse(raft::handle_t const &handle, GraphCOOView<vertex_t, edge_t, weight_t> const &graph, vertex_t num_workers, vertex_t const *workers, vertex_t *assignment, hipStream_t stream) { CUGRAPH_EXPECTS(assignment != nullptr, "Invalid API parameter: assignment pointer is NULL"); CUGRAPH_EXPECTS(graph.edge_data != nullptr, "Invalid API parameter: graph must have edge data (costs)"); #ifdef TIMING HighResTimer hr_timer; hr_timer.start("prep"); #endif // // Translate sparse matrix into dense bipartite matrix. // rows are the workers, columns are the tasks // vertex_t num_rows = num_workers; vertex_t num_cols = graph.number_of_vertices - num_rows; vertex_t matrix_dimension = ::max(num_rows, num_cols); rmm::device_vector<weight_t> cost_v(matrix_dimension * matrix_dimension); rmm::device_vector<vertex_t> tasks_v(num_cols); rmm::device_vector<vertex_t> temp_tasks_v(graph.number_of_vertices); rmm::device_vector<vertex_t> temp_workers_v(graph.number_of_vertices); weight_t *d_cost = cost_v.data().get(); vertex_t *d_tasks = tasks_v.data().get(); vertex_t *d_temp_tasks = temp_tasks_v.data().get(); vertex_t *d_temp_workers = temp_workers_v.data().get(); vertex_t *d_src_indices = graph.src_indices; vertex_t *d_dst_indices = graph.dst_indices; weight_t *d_edge_data = graph.edge_data; // // Renumber vertices internally. Workers will become // rows, tasks will become columns // thrust::sequence(rmm::exec_policy(stream)->on(stream), temp_tasks_v.begin(), temp_tasks_v.end()); thrust::for_each(rmm::exec_policy(stream)->on(stream), workers, workers + num_workers, [d_temp_tasks] __device__(vertex_t v) { d_temp_tasks[v] = -1; }); auto temp_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream), temp_tasks_v.begin(), temp_tasks_v.end(), d_tasks, [] __device__(vertex_t v) { return v >= 0; }); vertex_t size = thrust::distance(d_tasks, temp_end); tasks_v.resize(size); // // Now we'll assign costs into the dense array // thrust::fill(rmm::exec_policy(stream)->on(stream), temp_workers_v.begin(), temp_workers_v.end(), vertex_t{-1}); thrust::fill( rmm::exec_policy(stream)->on(stream), temp_tasks_v.begin(), temp_tasks_v.end(), vertex_t{-1}); thrust::fill(rmm::exec_policy(stream)->on(stream), cost_v.begin(), cost_v.end(), weight_t{0}); thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_rows), [d_temp_workers, workers] __device__(vertex_t v) { d_temp_workers[workers[v]] = v; }); thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_cols), [d_temp_tasks, d_tasks] __device__(vertex_t v) { d_temp_tasks[d_tasks[v]] = v; }); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [d_temp_workers, d_temp_tasks, d_cost, matrix_dimension, d_src_indices, d_dst_indices, d_edge_data] __device__(edge_t loc) { vertex_t src = d_temp_workers[d_src_indices[loc]]; vertex_t dst = d_temp_tasks[d_dst_indices[loc]]; if ((src >= 0) && (dst >= 0)) { d_cost[src * matrix_dimension + dst] = d_edge_data[loc]; } }); #ifdef TIMING hr_timer.stop(); hr_timer.start("hungarian"); #endif // // temp_assignment_v will hold the assignment in the dense // bipartite matrix numbering // rmm::device_vector<vertex_t> temp_assignment_v(matrix_dimension); vertex_t *d_temp_assignment = temp_assignment_v.data().get(); weight_t min_cost = detail::hungarian( handle, matrix_dimension, matrix_dimension, d_cost, d_temp_assignment, stream); #ifdef TIMING hr_timer.stop(); hr_timer.start("translate"); #endif // // Translate the assignment back to the original vertex ids // thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_rows), [d_tasks, d_temp_assignment, assignment] __device__(vertex_t id) { assignment[id] = d_tasks[d_temp_assignment[id]]; }); #ifdef TIMING hr_timer.stop(); hr_timer.display(std::cout); #endif return min_cost; } } // namespace detail template <typename vertex_t, typename edge_t, typename weight_t> weight_t hungarian(raft::handle_t const &handle, GraphCOOView<vertex_t, edge_t, weight_t> const &graph, vertex_t num_workers, vertex_t const *workers, vertex_t *assignment) { hipStream_t stream{0}; return detail::hungarian_sparse(handle, graph, num_workers, workers, assignment, stream); } template int32_t hungarian<int32_t, int32_t, int32_t>( raft::handle_t const &, GraphCOOView<int32_t, int32_t, int32_t> const &, int32_t, int32_t const *, int32_t *); template float hungarian<int32_t, int32_t, float>(raft::handle_t const &, GraphCOOView<int32_t, int32_t, float> const &, int32_t, int32_t const *, int32_t *); template double hungarian<int32_t, int32_t, double>(raft::handle_t const &, GraphCOOView<int32_t, int32_t, double> const &, int32_t, int32_t const *, int32_t *); namespace dense { template <typename index_t, typename weight_t> weight_t hungarian(raft::handle_t const &handle, weight_t const *costs, index_t num_rows, index_t num_cols, index_t *assignment) { hipStream_t stream{0}; return detail::hungarian(handle, num_rows, num_cols, costs, assignment, stream); } template int32_t hungarian<int32_t, int32_t>( raft::handle_t const &, int32_t const *, int32_t, int32_t, int32_t *); template float hungarian<int32_t, float>( raft::handle_t const &, float const *, int32_t, int32_t, int32_t *); template double hungarian<int32_t, double>( raft::handle_t const &, double const *, int32_t, int32_t, int32_t *); } // namespace dense } // namespace cugraph
49abe97ff3c2d6953b73bacb987ce13d70032b2e.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <limits> #include <rmm/thrust_rmm_allocator.h> #include <graph.hpp> #include <thrust/for_each.h> #include <thrust/random.h> #include <utilities/error.hpp> #include <raft/lap/lap.cuh> //#define TIMING #ifdef TIMING #include <utilities/high_res_timer.hpp> #endif namespace cugraph { namespace detail { template <typename index_t, typename weight_t> weight_t hungarian(raft::handle_t const &handle, index_t num_rows, index_t num_cols, weight_t const *d_original_cost, index_t *d_assignment, cudaStream_t stream) { // // TODO: Can Date/Nagi implementation in raft handle rectangular matrices? // CUGRAPH_EXPECTS(num_rows == num_cols, "Current implementation only supports square matrices"); rmm::device_vector<index_t> col_assignments_v(num_rows); // Create an instance of LinearAssignmentProblem using problem size, number of subproblems raft::lap::LinearAssignmentProblem<index_t, weight_t> lpx(handle, num_rows, 1); // Solve LAP(s) for given cost matrix lpx.solve(d_original_cost, d_assignment, col_assignments_v.data().get()); return lpx.getPrimalObjectiveValue(0); } template <typename vertex_t, typename edge_t, typename weight_t> weight_t hungarian_sparse(raft::handle_t const &handle, GraphCOOView<vertex_t, edge_t, weight_t> const &graph, vertex_t num_workers, vertex_t const *workers, vertex_t *assignment, cudaStream_t stream) { CUGRAPH_EXPECTS(assignment != nullptr, "Invalid API parameter: assignment pointer is NULL"); CUGRAPH_EXPECTS(graph.edge_data != nullptr, "Invalid API parameter: graph must have edge data (costs)"); #ifdef TIMING HighResTimer hr_timer; hr_timer.start("prep"); #endif // // Translate sparse matrix into dense bipartite matrix. // rows are the workers, columns are the tasks // vertex_t num_rows = num_workers; vertex_t num_cols = graph.number_of_vertices - num_rows; vertex_t matrix_dimension = std::max(num_rows, num_cols); rmm::device_vector<weight_t> cost_v(matrix_dimension * matrix_dimension); rmm::device_vector<vertex_t> tasks_v(num_cols); rmm::device_vector<vertex_t> temp_tasks_v(graph.number_of_vertices); rmm::device_vector<vertex_t> temp_workers_v(graph.number_of_vertices); weight_t *d_cost = cost_v.data().get(); vertex_t *d_tasks = tasks_v.data().get(); vertex_t *d_temp_tasks = temp_tasks_v.data().get(); vertex_t *d_temp_workers = temp_workers_v.data().get(); vertex_t *d_src_indices = graph.src_indices; vertex_t *d_dst_indices = graph.dst_indices; weight_t *d_edge_data = graph.edge_data; // // Renumber vertices internally. Workers will become // rows, tasks will become columns // thrust::sequence(rmm::exec_policy(stream)->on(stream), temp_tasks_v.begin(), temp_tasks_v.end()); thrust::for_each(rmm::exec_policy(stream)->on(stream), workers, workers + num_workers, [d_temp_tasks] __device__(vertex_t v) { d_temp_tasks[v] = -1; }); auto temp_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream), temp_tasks_v.begin(), temp_tasks_v.end(), d_tasks, [] __device__(vertex_t v) { return v >= 0; }); vertex_t size = thrust::distance(d_tasks, temp_end); tasks_v.resize(size); // // Now we'll assign costs into the dense array // thrust::fill(rmm::exec_policy(stream)->on(stream), temp_workers_v.begin(), temp_workers_v.end(), vertex_t{-1}); thrust::fill( rmm::exec_policy(stream)->on(stream), temp_tasks_v.begin(), temp_tasks_v.end(), vertex_t{-1}); thrust::fill(rmm::exec_policy(stream)->on(stream), cost_v.begin(), cost_v.end(), weight_t{0}); thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_rows), [d_temp_workers, workers] __device__(vertex_t v) { d_temp_workers[workers[v]] = v; }); thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_cols), [d_temp_tasks, d_tasks] __device__(vertex_t v) { d_temp_tasks[d_tasks[v]] = v; }); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [d_temp_workers, d_temp_tasks, d_cost, matrix_dimension, d_src_indices, d_dst_indices, d_edge_data] __device__(edge_t loc) { vertex_t src = d_temp_workers[d_src_indices[loc]]; vertex_t dst = d_temp_tasks[d_dst_indices[loc]]; if ((src >= 0) && (dst >= 0)) { d_cost[src * matrix_dimension + dst] = d_edge_data[loc]; } }); #ifdef TIMING hr_timer.stop(); hr_timer.start("hungarian"); #endif // // temp_assignment_v will hold the assignment in the dense // bipartite matrix numbering // rmm::device_vector<vertex_t> temp_assignment_v(matrix_dimension); vertex_t *d_temp_assignment = temp_assignment_v.data().get(); weight_t min_cost = detail::hungarian( handle, matrix_dimension, matrix_dimension, d_cost, d_temp_assignment, stream); #ifdef TIMING hr_timer.stop(); hr_timer.start("translate"); #endif // // Translate the assignment back to the original vertex ids // thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(num_rows), [d_tasks, d_temp_assignment, assignment] __device__(vertex_t id) { assignment[id] = d_tasks[d_temp_assignment[id]]; }); #ifdef TIMING hr_timer.stop(); hr_timer.display(std::cout); #endif return min_cost; } } // namespace detail template <typename vertex_t, typename edge_t, typename weight_t> weight_t hungarian(raft::handle_t const &handle, GraphCOOView<vertex_t, edge_t, weight_t> const &graph, vertex_t num_workers, vertex_t const *workers, vertex_t *assignment) { cudaStream_t stream{0}; return detail::hungarian_sparse(handle, graph, num_workers, workers, assignment, stream); } template int32_t hungarian<int32_t, int32_t, int32_t>( raft::handle_t const &, GraphCOOView<int32_t, int32_t, int32_t> const &, int32_t, int32_t const *, int32_t *); template float hungarian<int32_t, int32_t, float>(raft::handle_t const &, GraphCOOView<int32_t, int32_t, float> const &, int32_t, int32_t const *, int32_t *); template double hungarian<int32_t, int32_t, double>(raft::handle_t const &, GraphCOOView<int32_t, int32_t, double> const &, int32_t, int32_t const *, int32_t *); namespace dense { template <typename index_t, typename weight_t> weight_t hungarian(raft::handle_t const &handle, weight_t const *costs, index_t num_rows, index_t num_cols, index_t *assignment) { cudaStream_t stream{0}; return detail::hungarian(handle, num_rows, num_cols, costs, assignment, stream); } template int32_t hungarian<int32_t, int32_t>( raft::handle_t const &, int32_t const *, int32_t, int32_t, int32_t *); template float hungarian<int32_t, float>( raft::handle_t const &, float const *, int32_t, int32_t, int32_t *); template double hungarian<int32_t, double>( raft::handle_t const &, double const *, int32_t, int32_t, int32_t *); } // namespace dense } // namespace cugraph
602f9851497affef7dac03f714dfeaa2e1f143ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * conway.cu * * https://github.com/rbxb/ReefCA */ #include "conway.cuh" #include "cudahelpers.cuh" using namespace ReefCA; template<int width, int height, int depth, typename T> __global__ void ReefCA::conway_transition(T* buf_r, T* buf_w) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < width * height) { int x = i % width; int y = i / width; unsigned char count = 0; count += buf_r[get_rel<width, height, depth>(x, y, 1, 1)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, 1, 0)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, 1, -1)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, 0, 1)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, 0, -1)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, -1, 1)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, -1, 0)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, -1, -1)] & 1; if (count == 3) buf_w[i * depth] = 255; else if (count != 2) buf_w[i * depth] = 0; else buf_w[i * depth] = buf_r[i * depth]; } }
602f9851497affef7dac03f714dfeaa2e1f143ac.cu
/* * conway.cu * * https://github.com/rbxb/ReefCA */ #include "conway.cuh" #include "cudahelpers.cuh" using namespace ReefCA; template<int width, int height, int depth, typename T> __global__ void ReefCA::conway_transition(T* buf_r, T* buf_w) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < width * height) { int x = i % width; int y = i / width; unsigned char count = 0; count += buf_r[get_rel<width, height, depth>(x, y, 1, 1)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, 1, 0)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, 1, -1)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, 0, 1)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, 0, -1)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, -1, 1)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, -1, 0)] & 1; count += buf_r[get_rel<width, height, depth>(x, y, -1, -1)] & 1; if (count == 3) buf_w[i * depth] = 255; else if (count != 2) buf_w[i * depth] = 0; else buf_w[i * depth] = buf_r[i * depth]; } }
a5def303d8d7188937646debb2a9bf66653cb153.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA //-*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// // // This file contains the implementation of reduction with KMPC interface. // //===----------------------------------------------------------------------===// #include <stdio.h> #include <complex.h> #include "omptarget-nvptx.h" // cannot implement atomic_start and atomic_end for GPU. Report runtime error EXTERN void __kmpc_atomic_start() { printf("__kmpc_atomic_start not supported\n"); asm("trap;"); return; } EXTERN void __kmpc_atomic_end() { printf("__kmpc_atomic_end not supported\n"); asm("trap;"); return; } //may eventually remove this EXTERN int32_t __gpu_block_reduce() { int tid = GetLogicalThreadIdInBlock(); int nt = GetNumberOfOmpThreads(tid, isSPMDMode(), isRuntimeUninitialized()); if (nt != blockDim.x) return 0; unsigned tnum = __ACTIVEMASK(); if (tnum != (~0x0)) { // assume swapSize is 32 return 0; } return 1; } EXTERN int32_t __kmpc_reduce_gpu(kmp_Indent *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, void *reduce_array_size, kmp_ReductFctPtr *reductFct, kmp_CriticalName *lck) { int threadId = GetLogicalThreadIdInBlock(); omptarget_nvptx_TaskDescr *currTaskDescr = getMyTopTaskDescriptor(threadId); int numthread; if (currTaskDescr->IsParallelConstruct()) { numthread = GetNumberOfOmpThreads(threadId, isSPMDMode(), isRuntimeUninitialized()); } else { numthread = GetNumberOfOmpTeams(); } if (numthread == 1) return 1; else if (!__gpu_block_reduce()) return 2; else { if (threadIdx.x == 0) return 1; else return 0; } } EXTERN int32_t __kmpc_reduce_combined(kmp_Indent *loc) { if (threadIdx.x == 0) { return 2; } else { return 0; } } EXTERN int32_t __kmpc_reduce_simd(kmp_Indent *loc) { if (threadIdx.x % 32 == 0) { return 1; } else { return 0; } } /** to be removed EXTERN int32_t __kmpc_reduce41(kmp_Indent *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, void *reduce_array_size, kmp_ReductFctPtr *reductFct, kmp_CriticalName *lck) { return __kmpc_reduce_gpu(loc, global_tid, num_vars, reduce_size, reduce_data, reduce_array_size, reductFct, lck); } */ EXTERN void __kmpc_nvptx_end_reduce(int32_t global_tid) {} EXTERN void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {} // implement different data type or operations with atomicCAS #define omptarget_nvptx_add(x, y) ((x) + (y)) #define omptarget_nvptx_sub(x, y) ((x) - (y)) #define omptarget_nvptx_sub_rev(y, x) ((x) - (y)) #define omptarget_nvptx_mul(x, y) ((x) * (y)) #define omptarget_nvptx_div(x, y) ((x) / (y)) #define omptarget_nvptx_div_rev(y, x) ((x) / (y)) #define omptarget_nvptx_min(x, y) ((x) > (y) ? (y) : (x)) #define omptarget_nvptx_max(x, y) ((x) < (y) ? (y) : (x)) #define omptarget_nvptx_andb(x, y) ((x) & (y)) #define omptarget_nvptx_orb(x, y) ((x) | (y)) #define omptarget_nvptx_xor(x, y) ((x) ^ (y)) #define omptarget_nvptx_shl(x, y) ((x) << (y)) #define omptarget_nvptx_shr(x, y) ((x) >> (y)) #define omptarget_nvptx_andl(x, y) ((x) && (y)) #define omptarget_nvptx_orl(x, y) ((x) || (y)) #define omptarget_nvptx_eqv(x, y) ((x) == (y)) #define omptarget_nvptx_neqv(x, y) ((x) != (y)) INLINE __device__ float atomicCAS(float *_addr, float _compare, float _val) { int *addr = (int *)_addr; int compare = __float_as_int(_compare); int val = __float_as_int(_val); return __int_as_float(atomicCAS(addr, compare, val)); } INLINE __device__ double atomicCAS(double *_addr, double _compare, double _val) { unsigned long long int *addr = (unsigned long long int *)_addr; unsigned long long int compare = __double_as_longlong(_compare); unsigned long long int val = __double_as_longlong(_val); return __longlong_as_double(atomicCAS(addr, compare, val)); } INLINE __device__ long long int atomicCAS(long long int *_addr, long long int _compare, long long int _val) { unsigned long long int *addr = (unsigned long long int *)_addr; unsigned long long int compare = (unsigned long long int)(_compare); unsigned long long int val = (unsigned long long int)(_val); return (long long int)(atomicCAS(addr, compare, val)); } INLINE __device__ int64_t atomicCAS(int64_t *_addr, int64_t _compare, int64_t _val) { unsigned long long int *addr = (unsigned long long int *)_addr; unsigned long long int compare = (unsigned long long int)(_compare); unsigned long long int val = (unsigned long long int)(_val); return (int64_t)(atomicCAS(addr, compare, val)); } INLINE __device__ uint64_t atomicCAS(uint64_t *_addr, uint64_t _compare, uint64_t _val) { unsigned long long int *addr = (unsigned long long int *)_addr; unsigned long long int compare = (unsigned long long int)(_compare); unsigned long long int val = (unsigned long long int)(_val); return (uint64_t)(atomicCAS(addr, compare, val)); } INLINE __device__ float complex atomicCAS(float complex *_addr, float complex _compare, float complex _val) { double *addr = (double *)_addr; double compare = (double)(_compare); double val = (double)(_val); return (float complex)(atomicCAS(addr, compare, val)); } #define ATOMIC_GENOP_NATIVE(_name, _dtype, _op, _cudaop) \ EXTERN void __kmpc_atomic_##_name##_##_op(kmp_Indent *id_ref, int32_t gtid, \ _dtype *lhs, _dtype rhs) { \ PRINT(LD_LOOP, "Reduction: thead %d\n", gtid); \ atomic##_cudaop(lhs, rhs); \ } \ \ EXTERN _dtype __kmpc_atomic_##_name##_##_op##_cpt( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs, int flag) { \ _dtype old = atomic##_cudaop(lhs, rhs); \ if (flag) { \ return omptarget_nvptx_##_op(old, rhs); \ } else { \ return old; \ } \ } // for types that are supported directly by atomicCAS #define ATOMIC_GENOP_DIRECT(_name, _dtype, _op) \ EXTERN void __kmpc_atomic_##_name##_##_op(kmp_Indent *id_ref, int32_t gtid, \ _dtype *lhs, _dtype rhs) { \ PRINT(LD_LOOP, "Reduction: thead %d\n", gtid); \ _dtype *temp_lhs = lhs; \ _dtype oldvalue = *temp_lhs; \ _dtype saved; \ _dtype newvalue; \ do { \ saved = oldvalue; \ newvalue = (_dtype)omptarget_nvptx_##_op(saved, rhs); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ } \ \ EXTERN _dtype __kmpc_atomic_##_name##_##_op##_cpt( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs, int flag) { \ _dtype *temp_lhs = lhs; \ _dtype oldvalue = *temp_lhs; \ _dtype saved; \ _dtype newvalue; \ do { \ saved = oldvalue; \ newvalue = (_dtype)omptarget_nvptx_##_op(saved, rhs); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ if (flag) \ return newvalue; \ else \ return oldvalue; \ } #define ATOMIC_GENOP_DIRECT_REV(_name, _dtype, _op) \ EXTERN void __kmpc_atomic_##_name##_##_op##_rev( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs) { \ _dtype *temp_lhs = lhs; \ _dtype oldvalue = *temp_lhs; \ _dtype saved; \ _dtype newvalue; \ do { \ saved = oldvalue; \ newvalue = (_dtype)omptarget_nvptx_##_op(rhs, saved); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ } \ \ EXTERN _dtype __kmpc_atomic_##_name##_##_op##_cpt##_rev( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs, int flag) { \ _dtype *temp_lhs = lhs; \ _dtype oldvalue = *temp_lhs; \ _dtype saved; \ _dtype newvalue; \ do { \ saved = oldvalue; \ newvalue = (_dtype)omptarget_nvptx_##_op(rhs, saved); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ if (flag) \ return newvalue; \ else \ return oldvalue; \ } INLINE __device__ void dc_add(double complex *lhs, double complex rhs) { double *ptrl = (double *)lhs; double *ptrr = (double *)&rhs; ptrl[0] += ptrr[0]; ptrl[1] += ptrr[1]; } INLINE __device__ void dc_sub(double complex *lhs, double complex rhs) { double *ptrl = (double *)lhs; double *ptrr = (double *)&rhs; ptrl[0] -= ptrr[0]; ptrl[1] -= ptrr[1]; } INLINE __device__ void dc_mul(double complex *lhs, double complex rhs) { double *ptrl = (double *)lhs; double *ptrr = (double *)&rhs; double r1 = ptrl[0], r2 = ptrr[0]; double i1 = ptrl[1], i2 = ptrr[1]; ptrl[0] = r1 * r2 - i1 * i2; ptrl[1] = r1 * i2 + r2 * i1; } INLINE __device__ void dc_div(double complex *lhs, double complex rhs) { double *ptrl = (double *)lhs; double *ptrr = (double *)&rhs; double r1 = ptrl[0], r2 = ptrr[0]; double i1 = ptrl[1], i2 = ptrr[1]; ptrl[0] = (r1 * r2 + i1 * i2) / (r2 * r2 + i2 * i2); ptrl[1] = (i1 * r2 - r1 * i2) / (r2 * r2 + i2 * i2); } #define ATOMIC_GENOP_DC(_op) \ EXTERN void __kmpc_atomic_cmplx8_##_op(kmp_Indent *id_ref, int32_t gtid, \ double _Complex *lhs, \ double _Complex rhs) { \ printf("Double complex atomic operation not supported\n"); \ asm("trap;"); \ return; \ } \ EXTERN double _Complex __kmpc_atomic_cmplx8_##_op##_cpt(kmp_Indent *id_ref, \ int32_t gtid, \ double _Complex *lhs, \ double _Complex rhs, \ int flag) { \ printf("Double complex atomic operation not supported\n"); \ asm("trap;"); \ return rhs; \ } \ EXTERN double _Complex __gpu_warpBlockRedu_cmplx8_##_op( \ double _Complex rhs) { \ __shared__ double _Complex lhs; \ if (threadIdx.x == 0) \ lhs = rhs; \ __syncthreads(); \ for (int i = 1; i < blockDim.x; i++) { \ if (threadIdx.x == i) { \ dc_##_op(&lhs, rhs); \ } \ __syncthreads(); \ } \ return lhs; \ } #define ATOMIC_GENOP_DC_REV(_op) \ EXTERN void __kmpc_atomic_cmplx8_##_op##_rev(kmp_Indent *id_ref, int32_t gtid, \ double _Complex *lhs, \ double _Complex rhs) { \ printf("Double complex atomic operation not supported\n"); \ asm("trap;"); \ return; \ } \ EXTERN double _Complex __kmpc_atomic_cmplx8_##_op##_cpt_rev( \ kmp_Indent *id_ref, \ int32_t gtid, \ double _Complex *lhs, \ double _Complex rhs, \ int flag) { \ printf("Double complex atomic operation not supported\n"); \ asm("trap;"); \ return rhs; \ } ATOMIC_GENOP_DC(add); ATOMIC_GENOP_DC(sub); ATOMIC_GENOP_DC(mul); ATOMIC_GENOP_DC(div); ATOMIC_GENOP_DC_REV(sub) ATOMIC_GENOP_DC_REV(div) INLINE __device__ uint64_t fc_add(float r1, float i1, float r2, float i2) { uint64_t result; float *rr = (float *)&result; float *ri = rr + 1; *rr = r1 + r2; *ri = i1 + i2; return result; } INLINE __device__ uint64_t fc_sub(float r1, float i1, float r2, float i2) { uint64_t result; float *rr = (float *)&result; float *ri = rr + 1; *rr = r1 - r2; *ri = i1 - i2; return result; } INLINE __device__ uint64_t fc_mul(float r1, float i1, float r2, float i2) { uint64_t result; float *rr = (float *)&result; float *ri = rr + 1; *rr = r1 * r2 - i1 * i2; *ri = r1 * i2 + r2 * i1; return result; } INLINE __device__ uint64_t fc_div(float r1, float i1, float r2, float i2) { uint64_t result; float *rr = (float *)&result; float *ri = rr + 1; *rr = (r1 * r2 + i1 * i2) / (r2 * r2 + i2 * i2); *ri = (i1 * r2 - r1 * i2) / (r2 * r2 + i2 * i2); return result; } #define ATOMIC_GENOP_FC(_op) \ EXTERN void __kmpc_atomic_cmplx4_##_op(kmp_Indent *id_ref, int32_t gtid, \ float complex *lhs, \ float complex rhs) { \ uint64_t *temp_lhs = (uint64_t *)lhs; \ uint64_t oldvalue = *temp_lhs; \ uint64_t saved; \ float *pr1 = (float *)&rhs; \ float *pi1 = pr1 + 1; \ float r1 = *pr1; \ float i1 = *pi1; \ uint64_t newvalue; \ do { \ saved = oldvalue; \ float *pr2 = (float *)&saved; \ float *pi2 = pr2 + 1; \ newvalue = fc_##_op(*pr2, *pi2, r1, i1); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ } \ \ EXTERN void __kmpc_atomic_cmplx4_##_op##_cpt( \ kmp_Indent *id_ref, int32_t gtid, float complex *lhs, float complex rhs, \ float complex *outp, int flag) { \ uint64_t *temp_lhs = (uint64_t *)lhs; \ uint64_t oldvalue = *temp_lhs; \ uint64_t saved; \ float *pr1 = (float *)&rhs; \ float *pi1 = pr1 + 1; \ float r1 = *pr1; \ float i1 = *pi1; \ uint64_t newvalue; \ do { \ saved = oldvalue; \ float *pr2 = (float *)&saved; \ float *pi2 = pr2 + 1; \ newvalue = fc_##_op(*pr2, *pi2, r1, i1); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ if (flag) { \ float complex *temp = (float complex *)&newvalue; \ *outp = *temp; \ } else { \ float complex *temp = (float complex *)&saved; \ *outp = *temp; \ } \ } #define ATOMIC_GENOP_FC_REV(_op) \ EXTERN void __kmpc_atomic_cmplx4_##_op##_rev( \ kmp_Indent *id_ref, int32_t gtid, float complex *lhs, \ float complex rhs) { \ uint64_t *temp_lhs = (uint64_t *)lhs; \ uint64_t oldvalue = *temp_lhs; \ uint64_t saved; \ float *pr1 = (float *)&rhs; \ float *pi1 = pr1 + 1; \ float r1 = *pr1; \ float i1 = *pi1; \ uint64_t newvalue; \ do { \ saved = oldvalue; \ float *pr2 = (float *)&saved; \ float *pi2 = pr2 + 1; \ newvalue = fc_##_op(r1, i1, *pr2, *pi2); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ } \ \ EXTERN void __kmpc_atomic_cmplx4_##_op##_cpt##_rev( \ kmp_Indent *id_ref, int32_t gtid, float complex *lhs, float complex rhs, \ float complex *outp, int flag) { \ uint64_t *temp_lhs = (uint64_t *)lhs; \ uint64_t oldvalue = *temp_lhs; \ uint64_t saved; \ float *pr1 = (float *)&rhs; \ float *pi1 = pr1 + 1; \ float r1 = *pr1; \ float i1 = *pi1; \ uint64_t newvalue; \ do { \ saved = oldvalue; \ float *pr2 = (float *)&saved; \ float *pi2 = pr2 + 1; \ newvalue = fc_##_op(r1, i1, *pr2, *pi2); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ if (flag) { \ float complex *temp = (float complex *)&newvalue; \ *outp = *temp; \ } else { \ float complex *temp = (float complex *)&saved; \ *outp = *temp; \ } \ } ATOMIC_GENOP_FC(add); ATOMIC_GENOP_FC(sub); ATOMIC_GENOP_FC_REV(sub); ATOMIC_GENOP_FC(mul); ATOMIC_GENOP_FC(div); ATOMIC_GENOP_FC_REV(div); // for int and uint #define ATOMIC_GENOP_ALL_MIXED(_name, _dirname, _tname, _optype) \ _dirname(_tname, _optype, add, Add); \ _dirname(_tname, _optype, sub, Sub); \ _name##_REV(_tname, _optype, sub); \ _name(_tname, _optype, mul); \ _name(_tname, _optype, div); \ _name##_REV(_tname, _optype, div); \ _dirname(_tname, _optype, min, Min); \ _dirname(_tname, _optype, max, Max); \ _dirname(_tname, _optype, andb, And); \ _dirname(_tname, _optype, orb, Or); \ _dirname(_tname, _optype, xor, Xor); \ _name(_tname, _optype, shl); \ _name(_tname, _optype, shr); \ _name(_tname, _optype, andl); \ _name(_tname, _optype, orl); \ _name(_tname, _optype, eqv); \ _name(_tname, _optype, neqv); #define ATOMIC_GENOP_ALL(_name, _tname, _optype) \ _name(_tname, _optype, add); \ _name(_tname, _optype, sub); \ _name##_REV(_tname, _optype, sub); \ _name(_tname, _optype, mul); \ _name(_tname, _optype, div); \ _name##_REV(_tname, _optype, div); \ _name(_tname, _optype, min); \ _name(_tname, _optype, max); \ _name(_tname, _optype, andb); \ _name(_tname, _optype, orb); \ _name(_tname, _optype, xor); \ _name(_tname, _optype, shl); \ _name(_tname, _optype, shr); \ _name(_tname, _optype, andl); \ _name(_tname, _optype, orl); \ _name(_tname, _optype, eqv); \ _name(_tname, _optype, neqv); #define ATOMIC_GENOP_FLOAT(_name, _tname, _optype) \ _name(_tname, _optype, add); \ _name(_tname, _optype, sub); \ _name##_REV(_tname, _optype, sub); \ _name(_tname, _optype, mul); \ _name(_tname, _optype, div); \ _name##_REV(_tname, _optype, div); \ _name(_tname, _optype, min); \ _name(_tname, _optype, max); ATOMIC_GENOP_ALL_MIXED(ATOMIC_GENOP_DIRECT, ATOMIC_GENOP_NATIVE, fixed4, int32_t); ATOMIC_GENOP_ALL_MIXED(ATOMIC_GENOP_DIRECT, ATOMIC_GENOP_NATIVE, fixed4u, uint32_t); ATOMIC_GENOP_ALL(ATOMIC_GENOP_DIRECT, fixed8, int64_t); ATOMIC_GENOP_ALL(ATOMIC_GENOP_DIRECT, fixed8u, uint64_t); ATOMIC_GENOP_FLOAT(ATOMIC_GENOP_DIRECT, float4, float); ATOMIC_GENOP_FLOAT(ATOMIC_GENOP_DIRECT, float8, double); // // data type of size not 32 nor 64 // typedef enum { omptarget_nvptx_inc, omptarget_nvptx_dec, omptarget_nvptx_add, omptarget_nvptx_sub, omptarget_nvptx_sub_rev, omptarget_nvptx_mul, omptarget_nvptx_div, omptarget_nvptx_div_rev, omptarget_nvptx_min, omptarget_nvptx_max, omptarget_nvptx_rd, omptarget_nvptx_wr, omptarget_nvptx_swp, omptarget_nvptx_andb, omptarget_nvptx_orb, omptarget_nvptx_xor, omptarget_nvptx_andl, omptarget_nvptx_orl, omptarget_nvptx_eqv, omptarget_nvptx_neqv, omptarget_nvptx_shl, omptarget_nvptx_shl_rev, omptarget_nvptx_shr, omptarget_nvptx_shr_rev, } omptarget_nvptx_BINOP_t; template <typename OpType, // type of the operation performed omptarget_nvptx_BINOP_t binop // enum describing the operation > INLINE __device__ OpType Compute(OpType a, OpType b) // a is old value, b is new value { OpType res = 0; if (binop == omptarget_nvptx_inc) res = a + b; if (binop == omptarget_nvptx_dec) res = a - b; if (binop == omptarget_nvptx_add) res = a + b; if (binop == omptarget_nvptx_sub) res = a - b; if (binop == omptarget_nvptx_sub_rev) res = b - a; if (binop == omptarget_nvptx_mul) res = a * b; if (binop == omptarget_nvptx_div) res = a / b; if (binop == omptarget_nvptx_div_rev) res = b / a; if (binop == omptarget_nvptx_min) res = a < b ? a : b; if (binop == omptarget_nvptx_max) res = a > b ? a : b; if (binop == omptarget_nvptx_rd) res = a; // read if (binop == omptarget_nvptx_wr) res = b; // write and swap are the same if (binop == omptarget_nvptx_swp) res = b; // write and swap are the same if (binop == omptarget_nvptx_andb) res = a & b; if (binop == omptarget_nvptx_orb) res = a | b; if (binop == omptarget_nvptx_xor) res = a ^ b; if (binop == omptarget_nvptx_andl) res = a && b; if (binop == omptarget_nvptx_orl) res = a || b; if (binop == omptarget_nvptx_eqv) res = a == b; if (binop == omptarget_nvptx_neqv) res = a != b; if (binop == omptarget_nvptx_shl) res = a << b; if (binop == omptarget_nvptx_shl_rev) res = b << a; if (binop == omptarget_nvptx_shr) res = a >> b; if (binop == omptarget_nvptx_shr_rev) res = b >> a; return res; } /* specialize the template to avoid the switch at runtime */ template <> INLINE __device__ float Compute<float, omptarget_nvptx_add>(float a, float b) { return a + b; } template <> INLINE __device__ float Compute<float, omptarget_nvptx_sub>(float a, float b) { return a - b; } template <> INLINE __device__ float Compute<float, omptarget_nvptx_mul>(float a, float b) { return a * b; } template <> INLINE __device__ float Compute<float, omptarget_nvptx_div>(float a, float b) { return a / b; } template <> INLINE __device__ float Compute<float, omptarget_nvptx_min>(float a, float b) { return a < b ? a : b; } template <> INLINE __device__ float Compute<float, omptarget_nvptx_max>(float a, float b) { return a > b ? a : b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_add>(double a, double b) { return a + b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_sub>(double a, double b) { return a - b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_mul>(double a, double b) { return a * b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_div>(double a, double b) { return a / b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_min>(double a, double b) { return a < b ? a : b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_max>(double a, double b) { return a > b ? a : b; } //////////////////////////////////////////////////////////////////////////////// // common atomic slicing functions (modifying only a part of a word) //////////////////////////////////////////////////////////////////////////////// template <typename MemType, // type of the underlying atomic memory operation typename OpType // type of the operation performed > INLINE __device__ void ComputeAtomic_PrepareSlice( OpType *addr, // original address MemType **memAddrPtr, // truncated address to MemType boundary MemType *memBitShiftRightPtr, // bits to shift to move val to rightmost position MemType *memValMaskInPlacePtr) // mask of val in proper position { // compute the mask that corresponds to the natural alignment of memType // int -> 0x3; long long -> 0x7 unsigned long memAddrMask = sizeof(MemType) - 1; // compute the addr of the atomic variable truncated to alignment of memType *memAddrPtr = (MemType *)((unsigned long)addr & ~memAddrMask); // compute the number of bit shift to move the target atomic value in // the rightmost position unsigned long byteOffsetInMem = (unsigned long)addr & memAddrMask; // assumes little-endian unsigned long byteShiftRight = byteOffsetInMem; *memBitShiftRightPtr = (MemType)(byteShiftRight << 3); // 3: byte to bits // mask to isolate target atomic value located in rightmost position MemType memValMask = ((MemType)1 << (sizeof(OpType) << 3)) - 1; // mask to isolate target atomic value located in place *memValMaskInPlacePtr = memValMask << *memBitShiftRightPtr; } template <typename MemType, // type of the underlying atomic memory operation typename OpType, // type of the operation performed omptarget_nvptx_BINOP_t binop // enum describing the operation > INLINE __device__ MemType ComputeAtomic_ComputeSlice( MemType oldMemVal, // old value OpType val, // value to compute with MemType memBitShiftRight, // bits to shift to move val to rightmost position MemType memValMaskInPlace // mask of val in proper position ) { OpType oldValtmp; OpType newValtmp; // select target atomic val MemType oldMemVal_targetVal = oldMemVal & memValMaskInPlace; MemType oldMemVal_otherVal = oldMemVal & ~memValMaskInPlace; // shift target atomic val to rightmost place: this is the old value // type conversion?? oldValtmp = (OpType)(oldMemVal_targetVal >> memBitShiftRight); // perform op newValtmp = Compute<OpType, binop>(oldValtmp, val); // insert new value in old world mem // type conversion?? MemType newMemVal_targetVal = ((MemType)newValtmp) << memBitShiftRight; newMemVal_targetVal &= memValMaskInPlace; MemType newMemVal = oldMemVal_otherVal | newMemVal_targetVal; return newMemVal; } #define ATOMIC_GENOP_PARTIAL(_name, _dtype, _op, _memType) \ EXTERN void __kmpc_atomic_##_name##_##_op(kmp_Indent *id_ref, int32_t gtid, \ _dtype *lhs, _dtype rhs) { \ _memType *memAddr; \ _memType memBitShiftRightPtr; \ _memType memValMaskInPlacePtr; \ ComputeAtomic_PrepareSlice<_memType, _dtype>( \ lhs, &memAddr, &memBitShiftRightPtr, &memValMaskInPlacePtr); \ _memType oldMemVal, newMemVal; \ oldMemVal = *memAddr; \ _memType savedMemVal; \ do { \ savedMemVal = oldMemVal; \ newMemVal = \ ComputeAtomic_ComputeSlice<_memType, _dtype, omptarget_nvptx_##_op>( \ oldMemVal, rhs, memBitShiftRightPtr, memValMaskInPlacePtr); \ oldMemVal = atomicCAS(memAddr, savedMemVal, newMemVal); \ } while (savedMemVal != oldMemVal); \ } \ \ EXTERN _dtype __kmpc_atomic_##_name##_##_op##_cpt( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs, int flag) { \ _memType *memAddr; \ _memType memBitShiftRightPtr; \ _memType memValMaskInPlacePtr; \ ComputeAtomic_PrepareSlice<_memType, _dtype>( \ lhs, &memAddr, &memBitShiftRightPtr, &memValMaskInPlacePtr); \ _memType oldMemVal, newMemVal; \ oldMemVal = *memAddr; \ _memType savedMemVal; \ do { \ savedMemVal = oldMemVal; \ newMemVal = \ ComputeAtomic_ComputeSlice<_memType, _dtype, omptarget_nvptx_##_op>( \ oldMemVal, rhs, memBitShiftRightPtr, memValMaskInPlacePtr); \ oldMemVal = atomicCAS(memAddr, savedMemVal, newMemVal); \ } while (savedMemVal != oldMemVal); \ if (flag) \ return (_dtype)((newMemVal & memValMaskInPlacePtr) >> \ memBitShiftRightPtr); \ else \ return (_dtype)((oldMemVal & memValMaskInPlacePtr) >> \ memBitShiftRightPtr); \ } #define ATOMIC_GENOP_PARTIAL_REV(_name, _dtype, _op, _memType) \ EXTERN void __kmpc_atomic_##_name##_##_op##_rev( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs) { \ _memType *memAddr; \ _memType memBitShiftRightPtr; \ _memType memValMaskInPlacePtr; \ ComputeAtomic_PrepareSlice<_memType, _dtype>( \ lhs, &memAddr, &memBitShiftRightPtr, &memValMaskInPlacePtr); \ _memType oldMemVal, newMemVal; \ oldMemVal = *memAddr; \ _memType savedMemVal; \ do { \ savedMemVal = oldMemVal; \ newMemVal = \ ComputeAtomic_ComputeSlice<_memType, _dtype, omptarget_nvptx_##_op>( \ oldMemVal, rhs, memBitShiftRightPtr, memValMaskInPlacePtr); \ oldMemVal = atomicCAS(memAddr, savedMemVal, newMemVal); \ } while (savedMemVal != oldMemVal); \ } \ \ EXTERN _dtype __kmpc_atomic_##_name##_##_op##_cpt_rev( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs, int flag) { \ _memType *memAddr; \ _memType memBitShiftRightPtr; \ _memType memValMaskInPlacePtr; \ ComputeAtomic_PrepareSlice<_memType, _dtype>( \ lhs, &memAddr, &memBitShiftRightPtr, &memValMaskInPlacePtr); \ _memType oldMemVal, newMemVal; \ oldMemVal = *memAddr; \ _memType savedMemVal; \ do { \ savedMemVal = oldMemVal; \ newMemVal = \ ComputeAtomic_ComputeSlice<_memType, _dtype, omptarget_nvptx_##_op>( \ oldMemVal, rhs, memBitShiftRightPtr, memValMaskInPlacePtr); \ oldMemVal = atomicCAS(memAddr, savedMemVal, newMemVal); \ } while (savedMemVal != oldMemVal); \ if (flag) \ return (_dtype)((newMemVal & memValMaskInPlacePtr) >> \ memBitShiftRightPtr); \ else \ return (_dtype)((oldMemVal & memValMaskInPlacePtr) >> \ memBitShiftRightPtr); \ } #define ATOMIC_GENOP_ALL4(_name, _tname, _optype, _memtype) \ _name(_tname, _optype, add, _memtype); \ _name(_tname, _optype, sub, _memtype); \ _name##_REV(_tname, _optype, sub_rev, _memtype); \ _name(_tname, _optype, mul, _memtype); \ _name(_tname, _optype, div, _memtype); \ _name##_REV(_tname, _optype, div_rev, _memtype); \ _name(_tname, _optype, min, _memtype); \ _name(_tname, _optype, max, _memtype); \ _name(_tname, _optype, andb, _memtype); \ _name(_tname, _optype, orb, _memtype); \ _name(_tname, _optype, xor, _memtype); \ _name(_tname, _optype, andl, _memtype); \ _name(_tname, _optype, orl, _memtype); \ _name(_tname, _optype, eqv, _memtype); \ _name(_tname, _optype, neqv, _memtype); \ _name(_tname, _optype, shl, _memtype); \ _name(_tname, _optype, shr, _memtype); ATOMIC_GENOP_ALL4(ATOMIC_GENOP_PARTIAL, fixed1, int8_t, int32_t); ATOMIC_GENOP_ALL4(ATOMIC_GENOP_PARTIAL, fixed1u, uint8_t, int32_t); ATOMIC_GENOP_ALL4(ATOMIC_GENOP_PARTIAL, fixed2u, uint16_t, int32_t); ATOMIC_GENOP_ALL4(ATOMIC_GENOP_PARTIAL, fixed2, int16_t, int32_t); EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) { return __SHFL_DOWN_SYNC(0xFFFFFFFF, val, delta, size); } EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) { int lo, hi; asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(val)); hi = __SHFL_DOWN_SYNC(0xFFFFFFFF, hi, delta, size); lo = __SHFL_DOWN_SYNC(0xFFFFFFFF, lo, delta, size); asm volatile("mov.b64 %0, {%1,%2};" : "=l"(val) : "r"(lo), "r"(hi)); return val; } template <typename T, omptarget_nvptx_BINOP_t binop> __inline__ __device__ T reduInitVal() { switch (binop) { case omptarget_nvptx_mul: case omptarget_nvptx_div: case omptarget_nvptx_div_rev: case omptarget_nvptx_andl: case omptarget_nvptx_andb: return (T)1; default: return (T)0; } } #define MYGSIZE 32 static INLINE void gpu_regular_warp_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) { for (uint32_t mask = WARPSIZE/2; mask > 0; mask /= 2) { shflFct(reduce_data, /*LaneId - not used= */0, /*Offset = */mask, /*AlgoVersion=*/0); } } static INLINE void gpu_irregular_warp_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct, uint32_t size, uint32_t tid) { uint32_t curr_size; uint32_t mask; curr_size = size; mask = curr_size/2; while (mask>0) { shflFct(reduce_data, /*LaneId = */tid, /*Offset=*/mask, /*AlgoVersion=*/1); curr_size = (curr_size+1)/2; mask = curr_size/2; } } static INLINE uint32_t gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) { uint32_t lanemask_lt; uint32_t lanemask_gt; uint32_t size, remote_id, physical_lane_id; physical_lane_id = GetThreadIdInBlock() % WARPSIZE; asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask_lt)); uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true); uint32_t logical_lane_id = __popc(Liveness & lanemask_lt) * 2; asm("mov.u32 %0, %%lanemask_gt;" : "=r"(lanemask_gt)); do { Liveness = __BALLOT_SYNC(0xFFFFFFFF, true); remote_id = __ffs(Liveness & lanemask_gt); size = __popc(Liveness); logical_lane_id /= 2; shflFct(reduce_data, /*LaneId =*/logical_lane_id, /*Offset=*/remote_id-1-physical_lane_id, /*AlgoVersion=*/2); } while (logical_lane_id % 2 == 0 && size > 1); return (logical_lane_id == 0); } // // runtime support for array reduction // #define ARRAYATOMIC_GENOP(_name, _dtype, _op) \ EXTERN void __array_atomic_##_name##_##_op( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype *rhs, int64_t n) { \ PRINT(LD_LOOP, "Reduction: thead %d\n", gtid); \ for (int i = 0; i < n / sizeof(_dtype); i++) { \ __kmpc_atomic_##_name##_##_op(id_ref, gtid, lhs + i, rhs[i]); \ } \ } \ EXTERN void __gpu_array_warpBlockRedu_##_name##_##_op(_dtype *ldata, \ int64_t n) { \ for (int i = 0; i < n / sizeof(_dtype); i++) { \ ldata[i] = __gpu_warpBlockRedu_##_name##_##_op(ldata[i]); \ } \ } #define ARRAY_GEN_ALLOP_INTEGER(_name, _tname, _optype) \ _name(_tname, _optype, add); \ _name(_tname, _optype, sub); \ _name(_tname, _optype, mul); \ _name(_tname, _optype, div); \ _name(_tname, _optype, min); \ _name(_tname, _optype, max); \ _name(_tname, _optype, andb); \ _name(_tname, _optype, orb); \ _name(_tname, _optype, xor); \ _name(_tname, _optype, shl); \ _name(_tname, _optype, shr); \ _name(_tname, _optype, andl); \ _name(_tname, _optype, orl); \ _name(_tname, _optype, eqv); \ _name(_tname, _optype, neqv); #define ARRAY_GEN_ALLOP_FLOAT(_name, _tname, _optype) \ _name(_tname, _optype, add); \ _name(_tname, _optype, sub); \ _name(_tname, _optype, mul); \ _name(_tname, _optype, div); \ _name(_tname, _optype, min); \ _name(_tname, _optype, max); EXTERN int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true); if (Liveness == 0xffffffff) { gpu_regular_warp_reduce(reduce_data, shflFct); return GetThreadIdInBlock() % WARPSIZE == 0; // Result on lane 0 of the simd warp. } else { return gpu_irregular_simd_reduce(reduce_data, shflFct); // Result on the first active lane. } } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 INLINE int32_t nvptx_parallel_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized = false) { /* * This reduce function handles reduction within a team. It handles * parallel regions in both L1 and L2 parallelism levels. It also * supports Generic, SPMD, and NoOMP modes. * * 1. Reduce within a warp. * 2. Warp master copies value to warp 0 via shared memory. * 3. Warp 0 reduces to a single value. * 4. The reduced value is available in the thread that returns 1. */ uint32_t BlockThreadId = GetLogicalThreadIdInBlock(); uint32_t NumThreads = GetNumberOfOmpThreads(BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized); uint32_t WarpsNeeded = (NumThreads+WARPSIZE-1)/WARPSIZE; uint32_t WarpId = BlockThreadId/WARPSIZE; // Volta execution model: // For the Generic execution mode a parallel region either has 1 thread and beyond that, // always a multiple of 32. // For the SPMD execution mode we may have any number of threads. if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1)) gpu_regular_warp_reduce(reduce_data, shflFct); else if (NumThreads > 1) // Only SPMD execution mode comes thru this case. gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/NumThreads % WARPSIZE, /*LaneId=*/GetThreadIdInBlock() % WARPSIZE); // else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2 parallel region may enter here; return early. // return gpu_irregular_simd_reduce(reduce_data, shflFct); // When we have more than [warpsize] number of threads // a block reduction is performed here. // // Only L1 parallel region can enter this if condition. if (NumThreads > WARPSIZE) { // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, BlockThreadId); return BlockThreadId == 0; // } else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) { // return BlockThreadId == 0; } return BlockThreadId == 0; // Get the OMP thread Id. This is different from BlockThreadId in the case of // an L2 parallel region. // return GetOmpThreadId(BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized) == 0; } #else INLINE int32_t nvptx_parallel_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized = false) { /* * This reduce function handles reduction within a team. It handles * parallel regions in both L1 and L2 parallelism levels. It also * supports Generic, SPMD, and NoOMP modes. * * 1. Reduce within a warp. * 2. Warp master copies value to warp 0 via shared memory. * 3. Warp 0 reduces to a single value. * 4. The reduced value is available in the thread that returns 1. */ uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true); if (Liveness == 0xffffffff) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/__popc(Liveness), /*LaneId=*/GetThreadIdInBlock() % WARPSIZE); else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2 parallel region may enter here; return early. return gpu_irregular_simd_reduce(reduce_data, shflFct); uint32_t BlockThreadId = GetLogicalThreadIdInBlock(); uint32_t NumThreads = GetNumberOfOmpThreads(BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized); // When we have more than [warpsize] number of threads // a block reduction is performed here. // // Only L1 parallel region can enter this if condition. if (NumThreads > WARPSIZE) { uint32_t WarpsNeeded = (NumThreads+WARPSIZE-1)/WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = BlockThreadId/WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, BlockThreadId); return BlockThreadId == 0; } else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) { return BlockThreadId == 0; } // Get the OMP thread Id. This is different from BlockThreadId in the case of // an L2 parallel region. return GetOmpThreadId(BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized) == 0; } #endif // __CUDA_ARCH__ >= 700 EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, /*isSPMDExecutionMode=*/isSPMDMode()); } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, /*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true); } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, /*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true); } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 INLINE int32_t nvptx_teams_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized = false) { uint32_t ThreadId = GetLogicalThreadIdInBlock(); // In non-generic mode all workers participate in the teams reduction. // In generic mode only the team master participates in the teams // reduction because the workers are waiting for parallel work. uint32_t NumThreads = isSPMDExecutionMode ? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true, isRuntimeUninitialized) : /*Master thread only*/ 1; uint32_t TeamId = GetBlockIdInKernel(); uint32_t NumTeams = GetNumberOfBlocksInKernel(); __shared__ volatile bool IsLastTeam; // Team masters of all teams write to the scratchpad. if (ThreadId == 0) { unsigned int *timestamp = GetTeamsReductionTimestamp(); char *scratchpad = GetTeamsReductionScratchpad(); scratchFct(reduce_data, scratchpad, TeamId, NumTeams); __threadfence(); // atomicInc increments 'timestamp' and has a range [0, NumTeams-1]. // It resets 'timestamp' back to 0 once the last team increments // this counter. unsigned val = atomicInc(timestamp, NumTeams-1); IsLastTeam = val == NumTeams - 1; } // We have to wait on L1 barrier because in GENERIC mode the workers // are waiting on barrier 0 for work. // // If we guard this barrier as follows it leads to deadlock, probably // because of a compiler bug: if (!IsGenericMode()) __syncthreads(); uint16_t SyncWarps = (NumThreads+WARPSIZE-1)/WARPSIZE; named_sync(L1_BARRIER, SyncWarps*WARPSIZE); // If this team is not the last, quit. if (/* Volatile read by all threads */ !IsLastTeam) return 0; // // Last team processing. // // Threads in excess of #teams do not participate in reduction of the // scratchpad values. uint32_t ActiveThreads = NumThreads; if (NumTeams < NumThreads) { ActiveThreads = (NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1); } if (ThreadId >= ActiveThreads) return 0; // Load from scratchpad and reduce. char *scratchpad = GetTeamsReductionScratchpad(); ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/0); for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads) ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/1); uint32_t WarpsNeeded = (ActiveThreads+WARPSIZE-1)/WARPSIZE; uint32_t WarpId = ThreadId/WARPSIZE; // Reduce across warps to the warp master. if ((ActiveThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1)) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else if (ActiveThreads > 1) // Partial warp but contiguous lanes // Only SPMD execution mode comes thru this case. gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/ActiveThreads % WARPSIZE, /*LaneId=*/ThreadId % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. if (ActiveThreads > WARPSIZE) { // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } return ThreadId == 0; } #else INLINE int32_t nvptx_teams_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized = false) { uint32_t ThreadId = GetLogicalThreadIdInBlock(); // In non-generic mode all workers participate in the teams reduction. // In generic mode only the team master participates in the teams // reduction because the workers are waiting for parallel work. uint32_t NumThreads = isSPMDExecutionMode ? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true, isRuntimeUninitialized) : /*Master thread only*/ 1; uint32_t TeamId = GetBlockIdInKernel(); uint32_t NumTeams = GetNumberOfBlocksInKernel(); __shared__ volatile bool IsLastTeam; // Team masters of all teams write to the scratchpad. if (ThreadId == 0) { unsigned int *timestamp = GetTeamsReductionTimestamp(); char *scratchpad = GetTeamsReductionScratchpad(); scratchFct(reduce_data, scratchpad, TeamId, NumTeams); __threadfence(); // atomicInc increments 'timestamp' and has a range [0, NumTeams-1]. // It resets 'timestamp' back to 0 once the last team increments // this counter. unsigned val = atomicInc(timestamp, NumTeams-1); IsLastTeam = val == NumTeams - 1; } // We have to wait on L1 barrier because in GENERIC mode the workers // are waiting on barrier 0 for work. // // If we guard this barrier as follows it leads to deadlock, probably // because of a compiler bug: if (!IsGenericMode()) __syncthreads(); uint16_t SyncWarps = (NumThreads+WARPSIZE-1)/WARPSIZE; named_sync(L1_BARRIER, SyncWarps*WARPSIZE); // If this team is not the last, quit. if (/* Volatile read by all threads */ !IsLastTeam) return 0; // // Last team processing. // // Threads in excess of #teams do not participate in reduction of the // scratchpad values. if (ThreadId >= NumTeams) return 0; // Load from scratchpad and reduce. char *scratchpad = GetTeamsReductionScratchpad(); ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/0); for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads) ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/1); // Reduce across warps to the warp master. uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true); if (Liveness == 0xffffffff) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else // Partial warp but contiguous lanes gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/__popc(Liveness), /*LaneId=*/ThreadId % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads; if (ActiveThreads > WARPSIZE) { uint32_t WarpsNeeded = (ActiveThreads+WARPSIZE-1)/WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = ThreadId/WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } return ThreadId == 0; } #endif // __CUDA_ARCH__ >= 700 EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, /*isSPMDExecutionMode=*/isSPMDMode()); } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, /*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true); } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, /*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true); }
a5def303d8d7188937646debb2a9bf66653cb153.cu
//===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA //-*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// // // This file contains the implementation of reduction with KMPC interface. // //===----------------------------------------------------------------------===// #include <stdio.h> #include <complex.h> #include "omptarget-nvptx.h" // cannot implement atomic_start and atomic_end for GPU. Report runtime error EXTERN void __kmpc_atomic_start() { printf("__kmpc_atomic_start not supported\n"); asm("trap;"); return; } EXTERN void __kmpc_atomic_end() { printf("__kmpc_atomic_end not supported\n"); asm("trap;"); return; } //may eventually remove this EXTERN int32_t __gpu_block_reduce() { int tid = GetLogicalThreadIdInBlock(); int nt = GetNumberOfOmpThreads(tid, isSPMDMode(), isRuntimeUninitialized()); if (nt != blockDim.x) return 0; unsigned tnum = __ACTIVEMASK(); if (tnum != (~0x0)) { // assume swapSize is 32 return 0; } return 1; } EXTERN int32_t __kmpc_reduce_gpu(kmp_Indent *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, void *reduce_array_size, kmp_ReductFctPtr *reductFct, kmp_CriticalName *lck) { int threadId = GetLogicalThreadIdInBlock(); omptarget_nvptx_TaskDescr *currTaskDescr = getMyTopTaskDescriptor(threadId); int numthread; if (currTaskDescr->IsParallelConstruct()) { numthread = GetNumberOfOmpThreads(threadId, isSPMDMode(), isRuntimeUninitialized()); } else { numthread = GetNumberOfOmpTeams(); } if (numthread == 1) return 1; else if (!__gpu_block_reduce()) return 2; else { if (threadIdx.x == 0) return 1; else return 0; } } EXTERN int32_t __kmpc_reduce_combined(kmp_Indent *loc) { if (threadIdx.x == 0) { return 2; } else { return 0; } } EXTERN int32_t __kmpc_reduce_simd(kmp_Indent *loc) { if (threadIdx.x % 32 == 0) { return 1; } else { return 0; } } /** to be removed EXTERN int32_t __kmpc_reduce41(kmp_Indent *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, void *reduce_array_size, kmp_ReductFctPtr *reductFct, kmp_CriticalName *lck) { return __kmpc_reduce_gpu(loc, global_tid, num_vars, reduce_size, reduce_data, reduce_array_size, reductFct, lck); } */ EXTERN void __kmpc_nvptx_end_reduce(int32_t global_tid) {} EXTERN void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {} // implement different data type or operations with atomicCAS #define omptarget_nvptx_add(x, y) ((x) + (y)) #define omptarget_nvptx_sub(x, y) ((x) - (y)) #define omptarget_nvptx_sub_rev(y, x) ((x) - (y)) #define omptarget_nvptx_mul(x, y) ((x) * (y)) #define omptarget_nvptx_div(x, y) ((x) / (y)) #define omptarget_nvptx_div_rev(y, x) ((x) / (y)) #define omptarget_nvptx_min(x, y) ((x) > (y) ? (y) : (x)) #define omptarget_nvptx_max(x, y) ((x) < (y) ? (y) : (x)) #define omptarget_nvptx_andb(x, y) ((x) & (y)) #define omptarget_nvptx_orb(x, y) ((x) | (y)) #define omptarget_nvptx_xor(x, y) ((x) ^ (y)) #define omptarget_nvptx_shl(x, y) ((x) << (y)) #define omptarget_nvptx_shr(x, y) ((x) >> (y)) #define omptarget_nvptx_andl(x, y) ((x) && (y)) #define omptarget_nvptx_orl(x, y) ((x) || (y)) #define omptarget_nvptx_eqv(x, y) ((x) == (y)) #define omptarget_nvptx_neqv(x, y) ((x) != (y)) INLINE __device__ float atomicCAS(float *_addr, float _compare, float _val) { int *addr = (int *)_addr; int compare = __float_as_int(_compare); int val = __float_as_int(_val); return __int_as_float(atomicCAS(addr, compare, val)); } INLINE __device__ double atomicCAS(double *_addr, double _compare, double _val) { unsigned long long int *addr = (unsigned long long int *)_addr; unsigned long long int compare = __double_as_longlong(_compare); unsigned long long int val = __double_as_longlong(_val); return __longlong_as_double(atomicCAS(addr, compare, val)); } INLINE __device__ long long int atomicCAS(long long int *_addr, long long int _compare, long long int _val) { unsigned long long int *addr = (unsigned long long int *)_addr; unsigned long long int compare = (unsigned long long int)(_compare); unsigned long long int val = (unsigned long long int)(_val); return (long long int)(atomicCAS(addr, compare, val)); } INLINE __device__ int64_t atomicCAS(int64_t *_addr, int64_t _compare, int64_t _val) { unsigned long long int *addr = (unsigned long long int *)_addr; unsigned long long int compare = (unsigned long long int)(_compare); unsigned long long int val = (unsigned long long int)(_val); return (int64_t)(atomicCAS(addr, compare, val)); } INLINE __device__ uint64_t atomicCAS(uint64_t *_addr, uint64_t _compare, uint64_t _val) { unsigned long long int *addr = (unsigned long long int *)_addr; unsigned long long int compare = (unsigned long long int)(_compare); unsigned long long int val = (unsigned long long int)(_val); return (uint64_t)(atomicCAS(addr, compare, val)); } INLINE __device__ float complex atomicCAS(float complex *_addr, float complex _compare, float complex _val) { double *addr = (double *)_addr; double compare = (double)(_compare); double val = (double)(_val); return (float complex)(atomicCAS(addr, compare, val)); } #define ATOMIC_GENOP_NATIVE(_name, _dtype, _op, _cudaop) \ EXTERN void __kmpc_atomic_##_name##_##_op(kmp_Indent *id_ref, int32_t gtid, \ _dtype *lhs, _dtype rhs) { \ PRINT(LD_LOOP, "Reduction: thead %d\n", gtid); \ atomic##_cudaop(lhs, rhs); \ } \ \ EXTERN _dtype __kmpc_atomic_##_name##_##_op##_cpt( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs, int flag) { \ _dtype old = atomic##_cudaop(lhs, rhs); \ if (flag) { \ return omptarget_nvptx_##_op(old, rhs); \ } else { \ return old; \ } \ } // for types that are supported directly by atomicCAS #define ATOMIC_GENOP_DIRECT(_name, _dtype, _op) \ EXTERN void __kmpc_atomic_##_name##_##_op(kmp_Indent *id_ref, int32_t gtid, \ _dtype *lhs, _dtype rhs) { \ PRINT(LD_LOOP, "Reduction: thead %d\n", gtid); \ _dtype *temp_lhs = lhs; \ _dtype oldvalue = *temp_lhs; \ _dtype saved; \ _dtype newvalue; \ do { \ saved = oldvalue; \ newvalue = (_dtype)omptarget_nvptx_##_op(saved, rhs); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ } \ \ EXTERN _dtype __kmpc_atomic_##_name##_##_op##_cpt( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs, int flag) { \ _dtype *temp_lhs = lhs; \ _dtype oldvalue = *temp_lhs; \ _dtype saved; \ _dtype newvalue; \ do { \ saved = oldvalue; \ newvalue = (_dtype)omptarget_nvptx_##_op(saved, rhs); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ if (flag) \ return newvalue; \ else \ return oldvalue; \ } #define ATOMIC_GENOP_DIRECT_REV(_name, _dtype, _op) \ EXTERN void __kmpc_atomic_##_name##_##_op##_rev( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs) { \ _dtype *temp_lhs = lhs; \ _dtype oldvalue = *temp_lhs; \ _dtype saved; \ _dtype newvalue; \ do { \ saved = oldvalue; \ newvalue = (_dtype)omptarget_nvptx_##_op(rhs, saved); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ } \ \ EXTERN _dtype __kmpc_atomic_##_name##_##_op##_cpt##_rev( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs, int flag) { \ _dtype *temp_lhs = lhs; \ _dtype oldvalue = *temp_lhs; \ _dtype saved; \ _dtype newvalue; \ do { \ saved = oldvalue; \ newvalue = (_dtype)omptarget_nvptx_##_op(rhs, saved); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ if (flag) \ return newvalue; \ else \ return oldvalue; \ } INLINE __device__ void dc_add(double complex *lhs, double complex rhs) { double *ptrl = (double *)lhs; double *ptrr = (double *)&rhs; ptrl[0] += ptrr[0]; ptrl[1] += ptrr[1]; } INLINE __device__ void dc_sub(double complex *lhs, double complex rhs) { double *ptrl = (double *)lhs; double *ptrr = (double *)&rhs; ptrl[0] -= ptrr[0]; ptrl[1] -= ptrr[1]; } INLINE __device__ void dc_mul(double complex *lhs, double complex rhs) { double *ptrl = (double *)lhs; double *ptrr = (double *)&rhs; double r1 = ptrl[0], r2 = ptrr[0]; double i1 = ptrl[1], i2 = ptrr[1]; ptrl[0] = r1 * r2 - i1 * i2; ptrl[1] = r1 * i2 + r2 * i1; } INLINE __device__ void dc_div(double complex *lhs, double complex rhs) { double *ptrl = (double *)lhs; double *ptrr = (double *)&rhs; double r1 = ptrl[0], r2 = ptrr[0]; double i1 = ptrl[1], i2 = ptrr[1]; ptrl[0] = (r1 * r2 + i1 * i2) / (r2 * r2 + i2 * i2); ptrl[1] = (i1 * r2 - r1 * i2) / (r2 * r2 + i2 * i2); } #define ATOMIC_GENOP_DC(_op) \ EXTERN void __kmpc_atomic_cmplx8_##_op(kmp_Indent *id_ref, int32_t gtid, \ double _Complex *lhs, \ double _Complex rhs) { \ printf("Double complex atomic operation not supported\n"); \ asm("trap;"); \ return; \ } \ EXTERN double _Complex __kmpc_atomic_cmplx8_##_op##_cpt(kmp_Indent *id_ref, \ int32_t gtid, \ double _Complex *lhs, \ double _Complex rhs, \ int flag) { \ printf("Double complex atomic operation not supported\n"); \ asm("trap;"); \ return rhs; \ } \ EXTERN double _Complex __gpu_warpBlockRedu_cmplx8_##_op( \ double _Complex rhs) { \ __shared__ double _Complex lhs; \ if (threadIdx.x == 0) \ lhs = rhs; \ __syncthreads(); \ for (int i = 1; i < blockDim.x; i++) { \ if (threadIdx.x == i) { \ dc_##_op(&lhs, rhs); \ } \ __syncthreads(); \ } \ return lhs; \ } #define ATOMIC_GENOP_DC_REV(_op) \ EXTERN void __kmpc_atomic_cmplx8_##_op##_rev(kmp_Indent *id_ref, int32_t gtid, \ double _Complex *lhs, \ double _Complex rhs) { \ printf("Double complex atomic operation not supported\n"); \ asm("trap;"); \ return; \ } \ EXTERN double _Complex __kmpc_atomic_cmplx8_##_op##_cpt_rev( \ kmp_Indent *id_ref, \ int32_t gtid, \ double _Complex *lhs, \ double _Complex rhs, \ int flag) { \ printf("Double complex atomic operation not supported\n"); \ asm("trap;"); \ return rhs; \ } ATOMIC_GENOP_DC(add); ATOMIC_GENOP_DC(sub); ATOMIC_GENOP_DC(mul); ATOMIC_GENOP_DC(div); ATOMIC_GENOP_DC_REV(sub) ATOMIC_GENOP_DC_REV(div) INLINE __device__ uint64_t fc_add(float r1, float i1, float r2, float i2) { uint64_t result; float *rr = (float *)&result; float *ri = rr + 1; *rr = r1 + r2; *ri = i1 + i2; return result; } INLINE __device__ uint64_t fc_sub(float r1, float i1, float r2, float i2) { uint64_t result; float *rr = (float *)&result; float *ri = rr + 1; *rr = r1 - r2; *ri = i1 - i2; return result; } INLINE __device__ uint64_t fc_mul(float r1, float i1, float r2, float i2) { uint64_t result; float *rr = (float *)&result; float *ri = rr + 1; *rr = r1 * r2 - i1 * i2; *ri = r1 * i2 + r2 * i1; return result; } INLINE __device__ uint64_t fc_div(float r1, float i1, float r2, float i2) { uint64_t result; float *rr = (float *)&result; float *ri = rr + 1; *rr = (r1 * r2 + i1 * i2) / (r2 * r2 + i2 * i2); *ri = (i1 * r2 - r1 * i2) / (r2 * r2 + i2 * i2); return result; } #define ATOMIC_GENOP_FC(_op) \ EXTERN void __kmpc_atomic_cmplx4_##_op(kmp_Indent *id_ref, int32_t gtid, \ float complex *lhs, \ float complex rhs) { \ uint64_t *temp_lhs = (uint64_t *)lhs; \ uint64_t oldvalue = *temp_lhs; \ uint64_t saved; \ float *pr1 = (float *)&rhs; \ float *pi1 = pr1 + 1; \ float r1 = *pr1; \ float i1 = *pi1; \ uint64_t newvalue; \ do { \ saved = oldvalue; \ float *pr2 = (float *)&saved; \ float *pi2 = pr2 + 1; \ newvalue = fc_##_op(*pr2, *pi2, r1, i1); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ } \ \ EXTERN void __kmpc_atomic_cmplx4_##_op##_cpt( \ kmp_Indent *id_ref, int32_t gtid, float complex *lhs, float complex rhs, \ float complex *outp, int flag) { \ uint64_t *temp_lhs = (uint64_t *)lhs; \ uint64_t oldvalue = *temp_lhs; \ uint64_t saved; \ float *pr1 = (float *)&rhs; \ float *pi1 = pr1 + 1; \ float r1 = *pr1; \ float i1 = *pi1; \ uint64_t newvalue; \ do { \ saved = oldvalue; \ float *pr2 = (float *)&saved; \ float *pi2 = pr2 + 1; \ newvalue = fc_##_op(*pr2, *pi2, r1, i1); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ if (flag) { \ float complex *temp = (float complex *)&newvalue; \ *outp = *temp; \ } else { \ float complex *temp = (float complex *)&saved; \ *outp = *temp; \ } \ } #define ATOMIC_GENOP_FC_REV(_op) \ EXTERN void __kmpc_atomic_cmplx4_##_op##_rev( \ kmp_Indent *id_ref, int32_t gtid, float complex *lhs, \ float complex rhs) { \ uint64_t *temp_lhs = (uint64_t *)lhs; \ uint64_t oldvalue = *temp_lhs; \ uint64_t saved; \ float *pr1 = (float *)&rhs; \ float *pi1 = pr1 + 1; \ float r1 = *pr1; \ float i1 = *pi1; \ uint64_t newvalue; \ do { \ saved = oldvalue; \ float *pr2 = (float *)&saved; \ float *pi2 = pr2 + 1; \ newvalue = fc_##_op(r1, i1, *pr2, *pi2); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ } \ \ EXTERN void __kmpc_atomic_cmplx4_##_op##_cpt##_rev( \ kmp_Indent *id_ref, int32_t gtid, float complex *lhs, float complex rhs, \ float complex *outp, int flag) { \ uint64_t *temp_lhs = (uint64_t *)lhs; \ uint64_t oldvalue = *temp_lhs; \ uint64_t saved; \ float *pr1 = (float *)&rhs; \ float *pi1 = pr1 + 1; \ float r1 = *pr1; \ float i1 = *pi1; \ uint64_t newvalue; \ do { \ saved = oldvalue; \ float *pr2 = (float *)&saved; \ float *pi2 = pr2 + 1; \ newvalue = fc_##_op(r1, i1, *pr2, *pi2); \ oldvalue = atomicCAS(temp_lhs, saved, newvalue); \ } while (saved != oldvalue); \ if (flag) { \ float complex *temp = (float complex *)&newvalue; \ *outp = *temp; \ } else { \ float complex *temp = (float complex *)&saved; \ *outp = *temp; \ } \ } ATOMIC_GENOP_FC(add); ATOMIC_GENOP_FC(sub); ATOMIC_GENOP_FC_REV(sub); ATOMIC_GENOP_FC(mul); ATOMIC_GENOP_FC(div); ATOMIC_GENOP_FC_REV(div); // for int and uint #define ATOMIC_GENOP_ALL_MIXED(_name, _dirname, _tname, _optype) \ _dirname(_tname, _optype, add, Add); \ _dirname(_tname, _optype, sub, Sub); \ _name##_REV(_tname, _optype, sub); \ _name(_tname, _optype, mul); \ _name(_tname, _optype, div); \ _name##_REV(_tname, _optype, div); \ _dirname(_tname, _optype, min, Min); \ _dirname(_tname, _optype, max, Max); \ _dirname(_tname, _optype, andb, And); \ _dirname(_tname, _optype, orb, Or); \ _dirname(_tname, _optype, xor, Xor); \ _name(_tname, _optype, shl); \ _name(_tname, _optype, shr); \ _name(_tname, _optype, andl); \ _name(_tname, _optype, orl); \ _name(_tname, _optype, eqv); \ _name(_tname, _optype, neqv); #define ATOMIC_GENOP_ALL(_name, _tname, _optype) \ _name(_tname, _optype, add); \ _name(_tname, _optype, sub); \ _name##_REV(_tname, _optype, sub); \ _name(_tname, _optype, mul); \ _name(_tname, _optype, div); \ _name##_REV(_tname, _optype, div); \ _name(_tname, _optype, min); \ _name(_tname, _optype, max); \ _name(_tname, _optype, andb); \ _name(_tname, _optype, orb); \ _name(_tname, _optype, xor); \ _name(_tname, _optype, shl); \ _name(_tname, _optype, shr); \ _name(_tname, _optype, andl); \ _name(_tname, _optype, orl); \ _name(_tname, _optype, eqv); \ _name(_tname, _optype, neqv); #define ATOMIC_GENOP_FLOAT(_name, _tname, _optype) \ _name(_tname, _optype, add); \ _name(_tname, _optype, sub); \ _name##_REV(_tname, _optype, sub); \ _name(_tname, _optype, mul); \ _name(_tname, _optype, div); \ _name##_REV(_tname, _optype, div); \ _name(_tname, _optype, min); \ _name(_tname, _optype, max); ATOMIC_GENOP_ALL_MIXED(ATOMIC_GENOP_DIRECT, ATOMIC_GENOP_NATIVE, fixed4, int32_t); ATOMIC_GENOP_ALL_MIXED(ATOMIC_GENOP_DIRECT, ATOMIC_GENOP_NATIVE, fixed4u, uint32_t); ATOMIC_GENOP_ALL(ATOMIC_GENOP_DIRECT, fixed8, int64_t); ATOMIC_GENOP_ALL(ATOMIC_GENOP_DIRECT, fixed8u, uint64_t); ATOMIC_GENOP_FLOAT(ATOMIC_GENOP_DIRECT, float4, float); ATOMIC_GENOP_FLOAT(ATOMIC_GENOP_DIRECT, float8, double); // // data type of size not 32 nor 64 // typedef enum { omptarget_nvptx_inc, omptarget_nvptx_dec, omptarget_nvptx_add, omptarget_nvptx_sub, omptarget_nvptx_sub_rev, omptarget_nvptx_mul, omptarget_nvptx_div, omptarget_nvptx_div_rev, omptarget_nvptx_min, omptarget_nvptx_max, omptarget_nvptx_rd, omptarget_nvptx_wr, omptarget_nvptx_swp, omptarget_nvptx_andb, omptarget_nvptx_orb, omptarget_nvptx_xor, omptarget_nvptx_andl, omptarget_nvptx_orl, omptarget_nvptx_eqv, omptarget_nvptx_neqv, omptarget_nvptx_shl, omptarget_nvptx_shl_rev, omptarget_nvptx_shr, omptarget_nvptx_shr_rev, } omptarget_nvptx_BINOP_t; template <typename OpType, // type of the operation performed omptarget_nvptx_BINOP_t binop // enum describing the operation > INLINE __device__ OpType Compute(OpType a, OpType b) // a is old value, b is new value { OpType res = 0; if (binop == omptarget_nvptx_inc) res = a + b; if (binop == omptarget_nvptx_dec) res = a - b; if (binop == omptarget_nvptx_add) res = a + b; if (binop == omptarget_nvptx_sub) res = a - b; if (binop == omptarget_nvptx_sub_rev) res = b - a; if (binop == omptarget_nvptx_mul) res = a * b; if (binop == omptarget_nvptx_div) res = a / b; if (binop == omptarget_nvptx_div_rev) res = b / a; if (binop == omptarget_nvptx_min) res = a < b ? a : b; if (binop == omptarget_nvptx_max) res = a > b ? a : b; if (binop == omptarget_nvptx_rd) res = a; // read if (binop == omptarget_nvptx_wr) res = b; // write and swap are the same if (binop == omptarget_nvptx_swp) res = b; // write and swap are the same if (binop == omptarget_nvptx_andb) res = a & b; if (binop == omptarget_nvptx_orb) res = a | b; if (binop == omptarget_nvptx_xor) res = a ^ b; if (binop == omptarget_nvptx_andl) res = a && b; if (binop == omptarget_nvptx_orl) res = a || b; if (binop == omptarget_nvptx_eqv) res = a == b; if (binop == omptarget_nvptx_neqv) res = a != b; if (binop == omptarget_nvptx_shl) res = a << b; if (binop == omptarget_nvptx_shl_rev) res = b << a; if (binop == omptarget_nvptx_shr) res = a >> b; if (binop == omptarget_nvptx_shr_rev) res = b >> a; return res; } /* specialize the template to avoid the switch at runtime */ template <> INLINE __device__ float Compute<float, omptarget_nvptx_add>(float a, float b) { return a + b; } template <> INLINE __device__ float Compute<float, omptarget_nvptx_sub>(float a, float b) { return a - b; } template <> INLINE __device__ float Compute<float, omptarget_nvptx_mul>(float a, float b) { return a * b; } template <> INLINE __device__ float Compute<float, omptarget_nvptx_div>(float a, float b) { return a / b; } template <> INLINE __device__ float Compute<float, omptarget_nvptx_min>(float a, float b) { return a < b ? a : b; } template <> INLINE __device__ float Compute<float, omptarget_nvptx_max>(float a, float b) { return a > b ? a : b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_add>(double a, double b) { return a + b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_sub>(double a, double b) { return a - b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_mul>(double a, double b) { return a * b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_div>(double a, double b) { return a / b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_min>(double a, double b) { return a < b ? a : b; } template <> INLINE __device__ double Compute<double, omptarget_nvptx_max>(double a, double b) { return a > b ? a : b; } //////////////////////////////////////////////////////////////////////////////// // common atomic slicing functions (modifying only a part of a word) //////////////////////////////////////////////////////////////////////////////// template <typename MemType, // type of the underlying atomic memory operation typename OpType // type of the operation performed > INLINE __device__ void ComputeAtomic_PrepareSlice( OpType *addr, // original address MemType **memAddrPtr, // truncated address to MemType boundary MemType *memBitShiftRightPtr, // bits to shift to move val to rightmost position MemType *memValMaskInPlacePtr) // mask of val in proper position { // compute the mask that corresponds to the natural alignment of memType // int -> 0x3; long long -> 0x7 unsigned long memAddrMask = sizeof(MemType) - 1; // compute the addr of the atomic variable truncated to alignment of memType *memAddrPtr = (MemType *)((unsigned long)addr & ~memAddrMask); // compute the number of bit shift to move the target atomic value in // the rightmost position unsigned long byteOffsetInMem = (unsigned long)addr & memAddrMask; // assumes little-endian unsigned long byteShiftRight = byteOffsetInMem; *memBitShiftRightPtr = (MemType)(byteShiftRight << 3); // 3: byte to bits // mask to isolate target atomic value located in rightmost position MemType memValMask = ((MemType)1 << (sizeof(OpType) << 3)) - 1; // mask to isolate target atomic value located in place *memValMaskInPlacePtr = memValMask << *memBitShiftRightPtr; } template <typename MemType, // type of the underlying atomic memory operation typename OpType, // type of the operation performed omptarget_nvptx_BINOP_t binop // enum describing the operation > INLINE __device__ MemType ComputeAtomic_ComputeSlice( MemType oldMemVal, // old value OpType val, // value to compute with MemType memBitShiftRight, // bits to shift to move val to rightmost position MemType memValMaskInPlace // mask of val in proper position ) { OpType oldValtmp; OpType newValtmp; // select target atomic val MemType oldMemVal_targetVal = oldMemVal & memValMaskInPlace; MemType oldMemVal_otherVal = oldMemVal & ~memValMaskInPlace; // shift target atomic val to rightmost place: this is the old value // type conversion?? oldValtmp = (OpType)(oldMemVal_targetVal >> memBitShiftRight); // perform op newValtmp = Compute<OpType, binop>(oldValtmp, val); // insert new value in old world mem // type conversion?? MemType newMemVal_targetVal = ((MemType)newValtmp) << memBitShiftRight; newMemVal_targetVal &= memValMaskInPlace; MemType newMemVal = oldMemVal_otherVal | newMemVal_targetVal; return newMemVal; } #define ATOMIC_GENOP_PARTIAL(_name, _dtype, _op, _memType) \ EXTERN void __kmpc_atomic_##_name##_##_op(kmp_Indent *id_ref, int32_t gtid, \ _dtype *lhs, _dtype rhs) { \ _memType *memAddr; \ _memType memBitShiftRightPtr; \ _memType memValMaskInPlacePtr; \ ComputeAtomic_PrepareSlice<_memType, _dtype>( \ lhs, &memAddr, &memBitShiftRightPtr, &memValMaskInPlacePtr); \ _memType oldMemVal, newMemVal; \ oldMemVal = *memAddr; \ _memType savedMemVal; \ do { \ savedMemVal = oldMemVal; \ newMemVal = \ ComputeAtomic_ComputeSlice<_memType, _dtype, omptarget_nvptx_##_op>( \ oldMemVal, rhs, memBitShiftRightPtr, memValMaskInPlacePtr); \ oldMemVal = atomicCAS(memAddr, savedMemVal, newMemVal); \ } while (savedMemVal != oldMemVal); \ } \ \ EXTERN _dtype __kmpc_atomic_##_name##_##_op##_cpt( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs, int flag) { \ _memType *memAddr; \ _memType memBitShiftRightPtr; \ _memType memValMaskInPlacePtr; \ ComputeAtomic_PrepareSlice<_memType, _dtype>( \ lhs, &memAddr, &memBitShiftRightPtr, &memValMaskInPlacePtr); \ _memType oldMemVal, newMemVal; \ oldMemVal = *memAddr; \ _memType savedMemVal; \ do { \ savedMemVal = oldMemVal; \ newMemVal = \ ComputeAtomic_ComputeSlice<_memType, _dtype, omptarget_nvptx_##_op>( \ oldMemVal, rhs, memBitShiftRightPtr, memValMaskInPlacePtr); \ oldMemVal = atomicCAS(memAddr, savedMemVal, newMemVal); \ } while (savedMemVal != oldMemVal); \ if (flag) \ return (_dtype)((newMemVal & memValMaskInPlacePtr) >> \ memBitShiftRightPtr); \ else \ return (_dtype)((oldMemVal & memValMaskInPlacePtr) >> \ memBitShiftRightPtr); \ } #define ATOMIC_GENOP_PARTIAL_REV(_name, _dtype, _op, _memType) \ EXTERN void __kmpc_atomic_##_name##_##_op##_rev( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs) { \ _memType *memAddr; \ _memType memBitShiftRightPtr; \ _memType memValMaskInPlacePtr; \ ComputeAtomic_PrepareSlice<_memType, _dtype>( \ lhs, &memAddr, &memBitShiftRightPtr, &memValMaskInPlacePtr); \ _memType oldMemVal, newMemVal; \ oldMemVal = *memAddr; \ _memType savedMemVal; \ do { \ savedMemVal = oldMemVal; \ newMemVal = \ ComputeAtomic_ComputeSlice<_memType, _dtype, omptarget_nvptx_##_op>( \ oldMemVal, rhs, memBitShiftRightPtr, memValMaskInPlacePtr); \ oldMemVal = atomicCAS(memAddr, savedMemVal, newMemVal); \ } while (savedMemVal != oldMemVal); \ } \ \ EXTERN _dtype __kmpc_atomic_##_name##_##_op##_cpt_rev( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype rhs, int flag) { \ _memType *memAddr; \ _memType memBitShiftRightPtr; \ _memType memValMaskInPlacePtr; \ ComputeAtomic_PrepareSlice<_memType, _dtype>( \ lhs, &memAddr, &memBitShiftRightPtr, &memValMaskInPlacePtr); \ _memType oldMemVal, newMemVal; \ oldMemVal = *memAddr; \ _memType savedMemVal; \ do { \ savedMemVal = oldMemVal; \ newMemVal = \ ComputeAtomic_ComputeSlice<_memType, _dtype, omptarget_nvptx_##_op>( \ oldMemVal, rhs, memBitShiftRightPtr, memValMaskInPlacePtr); \ oldMemVal = atomicCAS(memAddr, savedMemVal, newMemVal); \ } while (savedMemVal != oldMemVal); \ if (flag) \ return (_dtype)((newMemVal & memValMaskInPlacePtr) >> \ memBitShiftRightPtr); \ else \ return (_dtype)((oldMemVal & memValMaskInPlacePtr) >> \ memBitShiftRightPtr); \ } #define ATOMIC_GENOP_ALL4(_name, _tname, _optype, _memtype) \ _name(_tname, _optype, add, _memtype); \ _name(_tname, _optype, sub, _memtype); \ _name##_REV(_tname, _optype, sub_rev, _memtype); \ _name(_tname, _optype, mul, _memtype); \ _name(_tname, _optype, div, _memtype); \ _name##_REV(_tname, _optype, div_rev, _memtype); \ _name(_tname, _optype, min, _memtype); \ _name(_tname, _optype, max, _memtype); \ _name(_tname, _optype, andb, _memtype); \ _name(_tname, _optype, orb, _memtype); \ _name(_tname, _optype, xor, _memtype); \ _name(_tname, _optype, andl, _memtype); \ _name(_tname, _optype, orl, _memtype); \ _name(_tname, _optype, eqv, _memtype); \ _name(_tname, _optype, neqv, _memtype); \ _name(_tname, _optype, shl, _memtype); \ _name(_tname, _optype, shr, _memtype); ATOMIC_GENOP_ALL4(ATOMIC_GENOP_PARTIAL, fixed1, int8_t, int32_t); ATOMIC_GENOP_ALL4(ATOMIC_GENOP_PARTIAL, fixed1u, uint8_t, int32_t); ATOMIC_GENOP_ALL4(ATOMIC_GENOP_PARTIAL, fixed2u, uint16_t, int32_t); ATOMIC_GENOP_ALL4(ATOMIC_GENOP_PARTIAL, fixed2, int16_t, int32_t); EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) { return __SHFL_DOWN_SYNC(0xFFFFFFFF, val, delta, size); } EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) { int lo, hi; asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(val)); hi = __SHFL_DOWN_SYNC(0xFFFFFFFF, hi, delta, size); lo = __SHFL_DOWN_SYNC(0xFFFFFFFF, lo, delta, size); asm volatile("mov.b64 %0, {%1,%2};" : "=l"(val) : "r"(lo), "r"(hi)); return val; } template <typename T, omptarget_nvptx_BINOP_t binop> __inline__ __device__ T reduInitVal() { switch (binop) { case omptarget_nvptx_mul: case omptarget_nvptx_div: case omptarget_nvptx_div_rev: case omptarget_nvptx_andl: case omptarget_nvptx_andb: return (T)1; default: return (T)0; } } #define MYGSIZE 32 static INLINE void gpu_regular_warp_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) { for (uint32_t mask = WARPSIZE/2; mask > 0; mask /= 2) { shflFct(reduce_data, /*LaneId - not used= */0, /*Offset = */mask, /*AlgoVersion=*/0); } } static INLINE void gpu_irregular_warp_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct, uint32_t size, uint32_t tid) { uint32_t curr_size; uint32_t mask; curr_size = size; mask = curr_size/2; while (mask>0) { shflFct(reduce_data, /*LaneId = */tid, /*Offset=*/mask, /*AlgoVersion=*/1); curr_size = (curr_size+1)/2; mask = curr_size/2; } } static INLINE uint32_t gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) { uint32_t lanemask_lt; uint32_t lanemask_gt; uint32_t size, remote_id, physical_lane_id; physical_lane_id = GetThreadIdInBlock() % WARPSIZE; asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask_lt)); uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true); uint32_t logical_lane_id = __popc(Liveness & lanemask_lt) * 2; asm("mov.u32 %0, %%lanemask_gt;" : "=r"(lanemask_gt)); do { Liveness = __BALLOT_SYNC(0xFFFFFFFF, true); remote_id = __ffs(Liveness & lanemask_gt); size = __popc(Liveness); logical_lane_id /= 2; shflFct(reduce_data, /*LaneId =*/logical_lane_id, /*Offset=*/remote_id-1-physical_lane_id, /*AlgoVersion=*/2); } while (logical_lane_id % 2 == 0 && size > 1); return (logical_lane_id == 0); } // // runtime support for array reduction // #define ARRAYATOMIC_GENOP(_name, _dtype, _op) \ EXTERN void __array_atomic_##_name##_##_op( \ kmp_Indent *id_ref, int32_t gtid, _dtype *lhs, _dtype *rhs, int64_t n) { \ PRINT(LD_LOOP, "Reduction: thead %d\n", gtid); \ for (int i = 0; i < n / sizeof(_dtype); i++) { \ __kmpc_atomic_##_name##_##_op(id_ref, gtid, lhs + i, rhs[i]); \ } \ } \ EXTERN void __gpu_array_warpBlockRedu_##_name##_##_op(_dtype *ldata, \ int64_t n) { \ for (int i = 0; i < n / sizeof(_dtype); i++) { \ ldata[i] = __gpu_warpBlockRedu_##_name##_##_op(ldata[i]); \ } \ } #define ARRAY_GEN_ALLOP_INTEGER(_name, _tname, _optype) \ _name(_tname, _optype, add); \ _name(_tname, _optype, sub); \ _name(_tname, _optype, mul); \ _name(_tname, _optype, div); \ _name(_tname, _optype, min); \ _name(_tname, _optype, max); \ _name(_tname, _optype, andb); \ _name(_tname, _optype, orb); \ _name(_tname, _optype, xor); \ _name(_tname, _optype, shl); \ _name(_tname, _optype, shr); \ _name(_tname, _optype, andl); \ _name(_tname, _optype, orl); \ _name(_tname, _optype, eqv); \ _name(_tname, _optype, neqv); #define ARRAY_GEN_ALLOP_FLOAT(_name, _tname, _optype) \ _name(_tname, _optype, add); \ _name(_tname, _optype, sub); \ _name(_tname, _optype, mul); \ _name(_tname, _optype, div); \ _name(_tname, _optype, min); \ _name(_tname, _optype, max); EXTERN int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true); if (Liveness == 0xffffffff) { gpu_regular_warp_reduce(reduce_data, shflFct); return GetThreadIdInBlock() % WARPSIZE == 0; // Result on lane 0 of the simd warp. } else { return gpu_irregular_simd_reduce(reduce_data, shflFct); // Result on the first active lane. } } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 INLINE int32_t nvptx_parallel_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized = false) { /* * This reduce function handles reduction within a team. It handles * parallel regions in both L1 and L2 parallelism levels. It also * supports Generic, SPMD, and NoOMP modes. * * 1. Reduce within a warp. * 2. Warp master copies value to warp 0 via shared memory. * 3. Warp 0 reduces to a single value. * 4. The reduced value is available in the thread that returns 1. */ uint32_t BlockThreadId = GetLogicalThreadIdInBlock(); uint32_t NumThreads = GetNumberOfOmpThreads(BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized); uint32_t WarpsNeeded = (NumThreads+WARPSIZE-1)/WARPSIZE; uint32_t WarpId = BlockThreadId/WARPSIZE; // Volta execution model: // For the Generic execution mode a parallel region either has 1 thread and beyond that, // always a multiple of 32. // For the SPMD execution mode we may have any number of threads. if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1)) gpu_regular_warp_reduce(reduce_data, shflFct); else if (NumThreads > 1) // Only SPMD execution mode comes thru this case. gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/NumThreads % WARPSIZE, /*LaneId=*/GetThreadIdInBlock() % WARPSIZE); // else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2 parallel region may enter here; return early. // return gpu_irregular_simd_reduce(reduce_data, shflFct); // When we have more than [warpsize] number of threads // a block reduction is performed here. // // Only L1 parallel region can enter this if condition. if (NumThreads > WARPSIZE) { // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, BlockThreadId); return BlockThreadId == 0; // } else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) { // return BlockThreadId == 0; } return BlockThreadId == 0; // Get the OMP thread Id. This is different from BlockThreadId in the case of // an L2 parallel region. // return GetOmpThreadId(BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized) == 0; } #else INLINE int32_t nvptx_parallel_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized = false) { /* * This reduce function handles reduction within a team. It handles * parallel regions in both L1 and L2 parallelism levels. It also * supports Generic, SPMD, and NoOMP modes. * * 1. Reduce within a warp. * 2. Warp master copies value to warp 0 via shared memory. * 3. Warp 0 reduces to a single value. * 4. The reduced value is available in the thread that returns 1. */ uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true); if (Liveness == 0xffffffff) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/__popc(Liveness), /*LaneId=*/GetThreadIdInBlock() % WARPSIZE); else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2 parallel region may enter here; return early. return gpu_irregular_simd_reduce(reduce_data, shflFct); uint32_t BlockThreadId = GetLogicalThreadIdInBlock(); uint32_t NumThreads = GetNumberOfOmpThreads(BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized); // When we have more than [warpsize] number of threads // a block reduction is performed here. // // Only L1 parallel region can enter this if condition. if (NumThreads > WARPSIZE) { uint32_t WarpsNeeded = (NumThreads+WARPSIZE-1)/WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = BlockThreadId/WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, BlockThreadId); return BlockThreadId == 0; } else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) { return BlockThreadId == 0; } // Get the OMP thread Id. This is different from BlockThreadId in the case of // an L2 parallel region. return GetOmpThreadId(BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized) == 0; } #endif // __CUDA_ARCH__ >= 700 EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, /*isSPMDExecutionMode=*/isSPMDMode()); } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, /*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true); } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, /*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true); } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 INLINE int32_t nvptx_teams_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized = false) { uint32_t ThreadId = GetLogicalThreadIdInBlock(); // In non-generic mode all workers participate in the teams reduction. // In generic mode only the team master participates in the teams // reduction because the workers are waiting for parallel work. uint32_t NumThreads = isSPMDExecutionMode ? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true, isRuntimeUninitialized) : /*Master thread only*/ 1; uint32_t TeamId = GetBlockIdInKernel(); uint32_t NumTeams = GetNumberOfBlocksInKernel(); __shared__ volatile bool IsLastTeam; // Team masters of all teams write to the scratchpad. if (ThreadId == 0) { unsigned int *timestamp = GetTeamsReductionTimestamp(); char *scratchpad = GetTeamsReductionScratchpad(); scratchFct(reduce_data, scratchpad, TeamId, NumTeams); __threadfence(); // atomicInc increments 'timestamp' and has a range [0, NumTeams-1]. // It resets 'timestamp' back to 0 once the last team increments // this counter. unsigned val = atomicInc(timestamp, NumTeams-1); IsLastTeam = val == NumTeams - 1; } // We have to wait on L1 barrier because in GENERIC mode the workers // are waiting on barrier 0 for work. // // If we guard this barrier as follows it leads to deadlock, probably // because of a compiler bug: if (!IsGenericMode()) __syncthreads(); uint16_t SyncWarps = (NumThreads+WARPSIZE-1)/WARPSIZE; named_sync(L1_BARRIER, SyncWarps*WARPSIZE); // If this team is not the last, quit. if (/* Volatile read by all threads */ !IsLastTeam) return 0; // // Last team processing. // // Threads in excess of #teams do not participate in reduction of the // scratchpad values. uint32_t ActiveThreads = NumThreads; if (NumTeams < NumThreads) { ActiveThreads = (NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1); } if (ThreadId >= ActiveThreads) return 0; // Load from scratchpad and reduce. char *scratchpad = GetTeamsReductionScratchpad(); ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/0); for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads) ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/1); uint32_t WarpsNeeded = (ActiveThreads+WARPSIZE-1)/WARPSIZE; uint32_t WarpId = ThreadId/WARPSIZE; // Reduce across warps to the warp master. if ((ActiveThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1)) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else if (ActiveThreads > 1) // Partial warp but contiguous lanes // Only SPMD execution mode comes thru this case. gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/ActiveThreads % WARPSIZE, /*LaneId=*/ThreadId % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. if (ActiveThreads > WARPSIZE) { // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } return ThreadId == 0; } #else INLINE int32_t nvptx_teams_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized = false) { uint32_t ThreadId = GetLogicalThreadIdInBlock(); // In non-generic mode all workers participate in the teams reduction. // In generic mode only the team master participates in the teams // reduction because the workers are waiting for parallel work. uint32_t NumThreads = isSPMDExecutionMode ? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true, isRuntimeUninitialized) : /*Master thread only*/ 1; uint32_t TeamId = GetBlockIdInKernel(); uint32_t NumTeams = GetNumberOfBlocksInKernel(); __shared__ volatile bool IsLastTeam; // Team masters of all teams write to the scratchpad. if (ThreadId == 0) { unsigned int *timestamp = GetTeamsReductionTimestamp(); char *scratchpad = GetTeamsReductionScratchpad(); scratchFct(reduce_data, scratchpad, TeamId, NumTeams); __threadfence(); // atomicInc increments 'timestamp' and has a range [0, NumTeams-1]. // It resets 'timestamp' back to 0 once the last team increments // this counter. unsigned val = atomicInc(timestamp, NumTeams-1); IsLastTeam = val == NumTeams - 1; } // We have to wait on L1 barrier because in GENERIC mode the workers // are waiting on barrier 0 for work. // // If we guard this barrier as follows it leads to deadlock, probably // because of a compiler bug: if (!IsGenericMode()) __syncthreads(); uint16_t SyncWarps = (NumThreads+WARPSIZE-1)/WARPSIZE; named_sync(L1_BARRIER, SyncWarps*WARPSIZE); // If this team is not the last, quit. if (/* Volatile read by all threads */ !IsLastTeam) return 0; // // Last team processing. // // Threads in excess of #teams do not participate in reduction of the // scratchpad values. if (ThreadId >= NumTeams) return 0; // Load from scratchpad and reduce. char *scratchpad = GetTeamsReductionScratchpad(); ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/0); for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads) ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/1); // Reduce across warps to the warp master. uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true); if (Liveness == 0xffffffff) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else // Partial warp but contiguous lanes gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/__popc(Liveness), /*LaneId=*/ThreadId % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads; if (ActiveThreads > WARPSIZE) { uint32_t WarpsNeeded = (ActiveThreads+WARPSIZE-1)/WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = ThreadId/WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } return ThreadId == 0; } #endif // __CUDA_ARCH__ >= 700 EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, /*isSPMDExecutionMode=*/isSPMDMode()); } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, /*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true); } EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) { return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, scratchFct, ldFct, /*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true); }
b08b5134a72028efc1937be4baf8983c5b268a8d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> //NI DAQ #include "../include/ContAcq-IntClk.h" #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define ITERATIONS REPLACE_ITERATIONS // Variables bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit_int(unsigned*, int); void RandomInit_fp(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal1(unsigned *A, unsigned *B, int N) { int id = blockDim.x * blockIdx.x + threadIdx.x; int cta_id=blockDim.x * blockIdx.x; int offset=THREADS_PER_BLOCK/2; unsigned sum=0; if(id < N){ for(unsigned i=0; i<ITERATIONS; ++i){ A[id] = A[id] + B[id] + id; //for(unsigned j=0; j<ITERATIONS/4; ++j){ sum += A[id]; sum += A[id+1]; sum += A[id+2]; if(id>cta_id+offset){ A[id+5]=sum; A[id+6]=sum; A[id+7]=sum; A[id+8]=sum; A[id+9]=sum; } sum *= A[id+3]; sum *= A[id+4]; sum *= A[id+10]; if(id>cta_id+offset && id<cta_id+(offset+offset/2)){ sum += A[id+11]; A[id+12]=sum; sum += A[id+13]; A[id+14]=sum; sum += A[id+15]; } A[id] = sum+A[id]+B[id]; } } } __global__ void PowerKernalEmpty(unsigned* C, int N) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation __syncthreads(); // Excessive Mod/Div Operations for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) { //Value1=(I1)+k; //Value2=(I2)+k; //Value3=(Value2)+k; //Value2=(Value1)+k; /* __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); */ } C[id]=id; __syncthreads(); } // Host code unsigned *h_A1, *h_A2, *h_A3; unsigned *d_A1, *d_A2, *d_A3; int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2; // Allocate input vectors h_A and h_B in host memory size_t size1 = N * sizeof(unsigned); h_A1 = (unsigned*)malloc(size1); if (h_A1 == 0) CleanupResources(); h_A2 = (unsigned*)malloc(size1); if (h_A2 == 0) CleanupResources(); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); // Initialize input vectors RandomInit_int(h_A1, N); RandomInit_int(h_A2, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A1, size1) ); checkCudaErrors( hipMalloc((void**)&d_A2, size1) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); CUDA_SAFE_CALL( hipDeviceSynchronize() ); //PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); //PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A1) hipFree(d_A1); if (d_A2) hipFree(d_A2); if (d_A3) hipFree(d_A3); // Free host memory if (h_A1) free(h_A1); if (h_A2) free(h_A2); if (h_A3) free(h_A3); } // Allocates an array with random float entries. void RandomInit_int(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } } void RandomInit_fp(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
b08b5134a72028efc1937be4baf8983c5b268a8d.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> //NI DAQ #include "../include/ContAcq-IntClk.h" #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define ITERATIONS REPLACE_ITERATIONS // Variables bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit_int(unsigned*, int); void RandomInit_fp(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal1(unsigned *A, unsigned *B, int N) { int id = blockDim.x * blockIdx.x + threadIdx.x; int cta_id=blockDim.x * blockIdx.x; int offset=THREADS_PER_BLOCK/2; unsigned sum=0; if(id < N){ for(unsigned i=0; i<ITERATIONS; ++i){ A[id] = A[id] + B[id] + id; //for(unsigned j=0; j<ITERATIONS/4; ++j){ sum += A[id]; sum += A[id+1]; sum += A[id+2]; if(id>cta_id+offset){ A[id+5]=sum; A[id+6]=sum; A[id+7]=sum; A[id+8]=sum; A[id+9]=sum; } sum *= A[id+3]; sum *= A[id+4]; sum *= A[id+10]; if(id>cta_id+offset && id<cta_id+(offset+offset/2)){ sum += A[id+11]; A[id+12]=sum; sum += A[id+13]; A[id+14]=sum; sum += A[id+15]; } A[id] = sum+A[id]+B[id]; } } } __global__ void PowerKernalEmpty(unsigned* C, int N) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation __syncthreads(); // Excessive Mod/Div Operations for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) { //Value1=(I1)+k; //Value2=(I2)+k; //Value3=(Value2)+k; //Value2=(Value1)+k; /* __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); */ } C[id]=id; __syncthreads(); } // Host code unsigned *h_A1, *h_A2, *h_A3; unsigned *d_A1, *d_A2, *d_A3; int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2; // Allocate input vectors h_A and h_B in host memory size_t size1 = N * sizeof(unsigned); h_A1 = (unsigned*)malloc(size1); if (h_A1 == 0) CleanupResources(); h_A2 = (unsigned*)malloc(size1); if (h_A2 == 0) CleanupResources(); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); // Initialize input vectors RandomInit_int(h_A1, N); RandomInit_int(h_A2, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A1, size1) ); checkCudaErrors( cudaMalloc((void**)&d_A2, size1) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); CUDA_SAFE_CALL( cudaThreadSynchronize() ); //PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); //PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A1) cudaFree(d_A1); if (d_A2) cudaFree(d_A2); if (d_A3) cudaFree(d_A3); // Free host memory if (h_A1) free(h_A1); if (h_A2) free(h_A2); if (h_A3) free(h_A3); } // Allocates an array with random float entries. void RandomInit_int(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } } void RandomInit_fp(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
cb59729c385356f61c506de8dd48ba1e569c93ad.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <math_functions.h> #include "kernel.h" __device__ float tempParticle1[NUM_OF_DIMENSIONS]; __device__ float tempParticle2[NUM_OF_DIMENSIONS]; // Fungsi yang dioptimasi // Levy 3-dimensional __device__ float fitness_function(float x[]) { float res = 0; float y1 = 1 + (x[0] - 1) / 4; float yn = 1 + (x[NUM_OF_DIMENSIONS - 1] - 1) / 4; res += pow(sin(phi * y1), 2); for (int i = 0; i < NUM_OF_DIMENSIONS - 1; i++) { float y = 1 + (x[i] - 1) / 4; float yp = 1 + (x[i + 1] - 1) / 4; res += pow(y - 1, 2) * (1 + 10 * pow(sin(phi * yp), 2)) + pow(yn - 1, 2); } return res; } __global__ void kernelUpdateParticle(float *positions, float *velocities, float *pBests, float *gBest, float r1, float r2) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= NUM_OF_PARTICLES * NUM_OF_DIMENSIONS) return; //float rp = getRandomClamped(); //float rg = getRandomClamped(); float rp = r1; float rg = r2; velocities[i] = OMEGA * velocities[i] + c1 * rp * (pBests[i] - positions[i]) + c2 * rg * (gBest[i % NUM_OF_DIMENSIONS] - positions[i]); // Update posisi particle positions[i] += velocities[i]; } __global__ void kernelUpdatePBest(float *positions, float *pBests, float* gBest) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= NUM_OF_PARTICLES * NUM_OF_DIMENSIONS || i % NUM_OF_DIMENSIONS != 0) return; for (int j = 0; j < NUM_OF_DIMENSIONS; j++) { tempParticle1[j] = positions[i + j]; tempParticle2[j] = pBests[i + j]; } if (fitness_function(tempParticle1) < fitness_function(tempParticle2)) { for (int k = 0; k < NUM_OF_DIMENSIONS; k++) pBests[i + k] = positions[i + k]; } } extern "C" void cuda_pso(float *positions, float *velocities, float *pBests, float *gBest) { int size = NUM_OF_PARTICLES * NUM_OF_DIMENSIONS; float *devPos; float *devVel; float *devPBest; float *devGBest; float temp[NUM_OF_DIMENSIONS]; // Memory allocation hipMalloc((void**)&devPos, sizeof(float) * size); hipMalloc((void**)&devVel, sizeof(float) * size); hipMalloc((void**)&devPBest, sizeof(float) * size); hipMalloc((void**)&devGBest, sizeof(float) * NUM_OF_DIMENSIONS); // Thread & Block number int threadsNum = 32; int blocksNum = NUM_OF_PARTICLES / threadsNum; // Copy particle datas from host to device hipMemcpy(devPos, positions, sizeof(float) * size, hipMemcpyHostToDevice); hipMemcpy(devVel, velocities, sizeof(float) * size, hipMemcpyHostToDevice); hipMemcpy(devPBest, pBests, sizeof(float) * size, hipMemcpyHostToDevice); hipMemcpy(devGBest, gBest, sizeof(float) * NUM_OF_DIMENSIONS, hipMemcpyHostToDevice); // PSO main function for (int iter = 0; iter < MAX_ITER; iter++) { // Update position and velocity hipLaunchKernelGGL(( kernelUpdateParticle), dim3(blocksNum), dim3(threadsNum), 0, 0, devPos, devVel, devPBest, devGBest, getRandomClamped(), getRandomClamped()); // Update pBest hipLaunchKernelGGL(( kernelUpdatePBest), dim3(blocksNum), dim3(threadsNum), 0, 0, devPos, devPBest, devGBest); // Update gBest hipMemcpy(pBests, devPBest, sizeof(float) * NUM_OF_PARTICLES * NUM_OF_DIMENSIONS, hipMemcpyDeviceToHost); for(int i = 0; i < size; i += NUM_OF_DIMENSIONS) { for(int k = 0; k < NUM_OF_DIMENSIONS; k++) temp[k] = pBests[i + k]; if (host_fitness_function(temp) < host_fitness_function(gBest)) { for (int k = 0; k < NUM_OF_DIMENSIONS; k++) gBest[k] = temp[k]; } } hipMemcpy(devGBest, gBest, sizeof(float) * NUM_OF_DIMENSIONS, hipMemcpyHostToDevice); } // Retrieve particle datas from device to host hipMemcpy(positions, devPos, sizeof(float) * size, hipMemcpyDeviceToHost); hipMemcpy(velocities, devVel, sizeof(float) * size, hipMemcpyDeviceToHost); hipMemcpy(pBests, devPBest, sizeof(float) * size, hipMemcpyDeviceToHost); hipMemcpy(gBest, devGBest, sizeof(float) * NUM_OF_DIMENSIONS, hipMemcpyDeviceToHost); // cleanup hipFree(devPos); hipFree(devVel); hipFree(devPBest); hipFree(devGBest); }
cb59729c385356f61c506de8dd48ba1e569c93ad.cu
#include <cuda_runtime.h> #include <cuda.h> #include <math_functions.h> #include "kernel.h" __device__ float tempParticle1[NUM_OF_DIMENSIONS]; __device__ float tempParticle2[NUM_OF_DIMENSIONS]; // Fungsi yang dioptimasi // Levy 3-dimensional __device__ float fitness_function(float x[]) { float res = 0; float y1 = 1 + (x[0] - 1) / 4; float yn = 1 + (x[NUM_OF_DIMENSIONS - 1] - 1) / 4; res += pow(sin(phi * y1), 2); for (int i = 0; i < NUM_OF_DIMENSIONS - 1; i++) { float y = 1 + (x[i] - 1) / 4; float yp = 1 + (x[i + 1] - 1) / 4; res += pow(y - 1, 2) * (1 + 10 * pow(sin(phi * yp), 2)) + pow(yn - 1, 2); } return res; } __global__ void kernelUpdateParticle(float *positions, float *velocities, float *pBests, float *gBest, float r1, float r2) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= NUM_OF_PARTICLES * NUM_OF_DIMENSIONS) return; //float rp = getRandomClamped(); //float rg = getRandomClamped(); float rp = r1; float rg = r2; velocities[i] = OMEGA * velocities[i] + c1 * rp * (pBests[i] - positions[i]) + c2 * rg * (gBest[i % NUM_OF_DIMENSIONS] - positions[i]); // Update posisi particle positions[i] += velocities[i]; } __global__ void kernelUpdatePBest(float *positions, float *pBests, float* gBest) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= NUM_OF_PARTICLES * NUM_OF_DIMENSIONS || i % NUM_OF_DIMENSIONS != 0) return; for (int j = 0; j < NUM_OF_DIMENSIONS; j++) { tempParticle1[j] = positions[i + j]; tempParticle2[j] = pBests[i + j]; } if (fitness_function(tempParticle1) < fitness_function(tempParticle2)) { for (int k = 0; k < NUM_OF_DIMENSIONS; k++) pBests[i + k] = positions[i + k]; } } extern "C" void cuda_pso(float *positions, float *velocities, float *pBests, float *gBest) { int size = NUM_OF_PARTICLES * NUM_OF_DIMENSIONS; float *devPos; float *devVel; float *devPBest; float *devGBest; float temp[NUM_OF_DIMENSIONS]; // Memory allocation cudaMalloc((void**)&devPos, sizeof(float) * size); cudaMalloc((void**)&devVel, sizeof(float) * size); cudaMalloc((void**)&devPBest, sizeof(float) * size); cudaMalloc((void**)&devGBest, sizeof(float) * NUM_OF_DIMENSIONS); // Thread & Block number int threadsNum = 32; int blocksNum = NUM_OF_PARTICLES / threadsNum; // Copy particle datas from host to device cudaMemcpy(devPos, positions, sizeof(float) * size, cudaMemcpyHostToDevice); cudaMemcpy(devVel, velocities, sizeof(float) * size, cudaMemcpyHostToDevice); cudaMemcpy(devPBest, pBests, sizeof(float) * size, cudaMemcpyHostToDevice); cudaMemcpy(devGBest, gBest, sizeof(float) * NUM_OF_DIMENSIONS, cudaMemcpyHostToDevice); // PSO main function for (int iter = 0; iter < MAX_ITER; iter++) { // Update position and velocity kernelUpdateParticle<<<blocksNum, threadsNum>>>(devPos, devVel, devPBest, devGBest, getRandomClamped(), getRandomClamped()); // Update pBest kernelUpdatePBest<<<blocksNum, threadsNum>>>(devPos, devPBest, devGBest); // Update gBest cudaMemcpy(pBests, devPBest, sizeof(float) * NUM_OF_PARTICLES * NUM_OF_DIMENSIONS, cudaMemcpyDeviceToHost); for(int i = 0; i < size; i += NUM_OF_DIMENSIONS) { for(int k = 0; k < NUM_OF_DIMENSIONS; k++) temp[k] = pBests[i + k]; if (host_fitness_function(temp) < host_fitness_function(gBest)) { for (int k = 0; k < NUM_OF_DIMENSIONS; k++) gBest[k] = temp[k]; } } cudaMemcpy(devGBest, gBest, sizeof(float) * NUM_OF_DIMENSIONS, cudaMemcpyHostToDevice); } // Retrieve particle datas from device to host cudaMemcpy(positions, devPos, sizeof(float) * size, cudaMemcpyDeviceToHost); cudaMemcpy(velocities, devVel, sizeof(float) * size, cudaMemcpyDeviceToHost); cudaMemcpy(pBests, devPBest, sizeof(float) * size, cudaMemcpyDeviceToHost); cudaMemcpy(gBest, devGBest, sizeof(float) * NUM_OF_DIMENSIONS, cudaMemcpyDeviceToHost); // cleanup cudaFree(devPos); cudaFree(devVel); cudaFree(devPBest); cudaFree(devGBest); }
34a2a89b3aa4d99c3ac712f768c68fa205b63e27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::conv2d(const Tensor& input, int outChannels, int kernelH, int kernelW, int strideH, int strideW, int paddingH, int paddingW, ActiMode activation, int groups, bool use_bias, const Op* shared_op, Initializer* kernel_initializer, Initializer* bias_initializer, char const *name) { if (kernel_initializer == NULL) { int seed = std::rand(); kernel_initializer = new GlorotUniform(seed); } if (bias_initializer == NULL && use_bias) { bias_initializer = new ZeroInitializer(); } assert(input.numDim == 4); /*NCHW*/ Conv2D *conv = new Conv2D(*this, input, outChannels, kernelH, kernelW, strideH, strideW, paddingH, paddingW, groups, activation, use_bias, shared_op, kernel_initializer, bias_initializer, name); layers.push_back(conv); return conv->outputs[0]; } /* locals[0] = kernel locals[1] = bias */ Conv2D::Conv2D(FFModel& model, const Tensor& _input, int out_dim, int _kernel_h, int _kernel_w, int _stride_h, int _stride_w, int _padding_h, int _padding_w, int _groups, ActiMode _activation, bool _use_bias, const Op* shared_op, Initializer* _kernel_initializer, Initializer* _bias_initializer, const char* name) : Op(model, OP_CONV2D, shared_op, name, _input), in_channels(_input.adim[2]), out_channels(out_dim), kernel_h(_kernel_h), kernel_w(_kernel_w), stride_h(_stride_h), stride_w(_stride_w), padding_h(_padding_h), padding_w(_padding_w), groups(_groups), activation(_activation), use_bias(_use_bias), kernel_initializer(_kernel_initializer), bias_initializer(_bias_initializer) { assert(_input.numDim == 4); // Set output shape int input_w = inputs[0].adim[0]; int input_h = inputs[0].adim[1]; int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w; int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h; int output_c = out_channels; int output_n = inputs[0].adim[3]; numOutputs = 1; outputs[0].numDim = 4; outputs[0].adim[0] = output_w; outputs[0].adim[1] = output_h; outputs[0].adim[2] = output_c; outputs[0].adim[3] = output_n; weights[0].numDim = 4; weights[0].adim[0] = kernel_w; weights[0].adim[1] = kernel_h; // Require input channels is divisible by groups assert(in_channels % groups == 0); weights[0].adim[2] = in_channels / groups; weights[0].adim[3] = out_channels; numWeights = 1; if (use_bias) { weights[1].numDim = 1; weights[1].adim[0] = out_channels; numWeights = 2; } } void Conv2D::create_weights(FFModel& model) { // Retrive the task indexspace for the op task_is = (IndexSpaceT<4>)model.get_or_create_task_is(4, name); // TODO: temp work, will let users to pick either NCCL or PS #ifdef FF_USE_NCCL ParameterSyncType comm_type = ParameterSyncType::NCCL; #else ParameterSyncType comm_type = ParameterSyncType::PS; #endif // Create kernel { const int dims[4] = {out_channels, in_channels / groups, kernel_h, kernel_w}; weights[0] = model.create_conv_weight<4>(this, dims, DT_FLOAT, kernel_initializer, true/*create_grad*/, comm_type); } // Create bias tensor if (use_bias) { const int dims[1] = {out_channels}; weights[1] = model.create_conv_weight<1>(this, dims, DT_FLOAT, bias_initializer, true/*create_grad*/, comm_type); assert(numWeights == 2); } else { assert(numWeights == 1); } } void Conv2D::create_output_and_partition(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is); // Create output tensor int input_w = inputs[0].adim[0]; int input_h = inputs[0].adim[1]; int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w; int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h; int output_c = out_channels; int output_n = inputs[0].adim[3]; int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1; int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1; int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1; { const int dims[4] = {output_n, output_c, output_h, output_w}; outputs[0] = model.create_tensor<4>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Compute partition bound for input Rect<4> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); // Currently assume we didn't split across the channel dimension assert(num_par_c == 1); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition( inputs[0], (IndexSpaceT<4>)task_is, input_lps[0], input_grad_lps[0]); } } cudnnConvolutionFwdAlgo_t selectConvolutionForwardAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t yDesc, void* y); cudnnConvolutionBwdFilterAlgo_t selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnFilterDescriptor_t dwDesc, void* dw); cudnnConvolutionBwdDataAlgo_t selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t dxDesc, void* dx); /* regions[0]: input regions[1]: output regions[2](I): filter regions[3](I): bias regions[4](O): filter_grad regions[5](O): input_grad */ __host__ OpMeta* Conv2D::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const Conv2D* conv = (Conv2D*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 4> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); // TensorAccessorR<float, 1> acc_bias( // regions[3], task->regions[3], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_kernel_grad( regions[3], task->regions[3], FID_DATA, ctx, runtime, false/*readOutput*/); //TensorAccessorW<float, 4> acc_input_grad( // regions[4], task->regions[4], FID_DATA, ctx, runtime, // false/*readOutput*/); Conv2DMeta* m = new Conv2DMeta(handle); m->relu = conv->activation == AC_MODE_RELU; m->use_bias = conv->use_bias; m->profiling = conv->profiling; m->trainableInputs[0] = conv->trainableInputs[0]; std::strcpy(m->op_name, conv->name); int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1; int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1; int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1; int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1; int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1; int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1; printf("init conv (input): n(%d) c(%d) h(%d) w(%d)\n", input_n, input_c, input_h, input_w); printf("init conv (output): n(%d) c(%d) h(%d) w(%d)\n", output_n, output_c, output_h, output_w); checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); // Require that input_c is divisible by conv->groups assert(input_c % conv->groups == 0); printf("filterDim: kernel(%d %d) c_in(%d), c_out(%d)\n", conv->kernel_h, conv->kernel_w, input_c / conv->groups, output_c); checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_c, input_c / conv->groups, conv->kernel_h, conv->kernel_w)); //printf("convDim: padding(%d %d) stride(%d %d)\n", conv->padding_h, conv->padding_w, conv->stride_h, conv->stride_w); int pad_h = ((output_h - 1) * conv->stride_h + conv->kernel_h - input_h + 1) / 2; int pad_w = ((output_w - 1) * conv->stride_w + conv->kernel_w - input_w + 1) / 2; if (pad_h != conv->padding_h) printf("Warning: changing conv_padding_h to satisfy output_h size\n"); if (pad_w != conv->padding_w) printf("Warning: changing conv_padding_w to satisfy output_w size\n"); checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc, pad_h,//conv->padding_h, pad_w,//conv->padding_w, conv->stride_h, conv->stride_w, 1/*upscale_x*/, 1/*upscale_y*/, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); if (conv->groups != 1) { checkCUDNN(cudnnSetConvolutionGroupCount(m->convDesc, conv->groups)); } // enable tensor core when possible if (m->handle.allowTensorOpMathConversion) { checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION)); } else { checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH)); } int n, c, h, w; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc, m->inputTensor, m->filterDesc, &n, &c, &h, &w)); assert(n == output_n); assert(c == output_c); assert(h == output_h); assert(w == output_w); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); // select forward algorithm m->fwdAlgo = selectConvolutionForwardAlgorithm(m->handle.dnn, m->inputTensor, acc_input.ptr, m->filterDesc, acc_kernel.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->outputTensor, acc_output.ptr); // select backward filter algorithm m->bwdFilterAlgo = selectConvolutionBackwardFilterAlgorithm( m->handle.dnn, m->inputTensor, acc_input.ptr, m->outputTensor, acc_output.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->filterDesc, acc_kernel_grad.ptr); // select backward data algorithm m->bwdDataAlgo = selectConvolutionBackwardDataAlgorithm( m->handle.dnn, m->filterDesc, acc_kernel.ptr, m->outputTensor, acc_output.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->inputTensor, (float*)acc_input.ptr); if (m->relu) { checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); } return m; } void Conv2D::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); ParallelConfig pc; std::string pcname = name; ff.config.find_parallel_config(4, pcname, pc); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { FFHandler handle = ff.handlers[pc.device_ids[idx++]]; #ifdef FF_USE_NCCL handle.ncclComm = pc.nccl_comms[idx-1]; #endif argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(CONV2D_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Conv2D)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); // launcher.add_region_requirement( // RegionRequirement(weights[1].part, 0/*projection id*/, // READ_ONLY, EXCLUSIVE, weights[1].region)); // launcher.add_field(3, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(3, FID_DATA); //launcher.add_region_requirement( // RegionRequirement(input_grad_lps[0], 0/*projection id*/, // WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); //launcher.add_field(4, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /*static*/ void Conv2D::forward_kernel(const Conv2DMeta* m, const float* input_ptr, float* output_ptr, const float* filter_ptr, const float* bias_ptr, hipStream_t stream) { checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); float alpha = 1.0f, beta = 0.0f; checkCUDNN(cudnnConvolutionForward(m->handle.dnn, &alpha, m->inputTensor, input_ptr, m->filterDesc, filter_ptr, m->convDesc, m->fwdAlgo, m->handle.workSpace, m->handle.workSpaceSize, &beta, m->outputTensor, output_ptr)); // use_bias == True if (bias_ptr != NULL) { checkCUDNN(cudnnAddTensor(m->handle.dnn, &alpha, m->biasTensor, bias_ptr, &alpha, m->outputTensor, output_ptr)); } if (m->relu) { checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc, &alpha, m->outputTensor, output_ptr, &beta, m->outputTensor, output_ptr)); } } /* regions[0](I): input regions[1](O): output regions[2](I): filter regions[3](I): bias */ __host__ void Conv2D::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Conv2D* conv = (Conv2D*) task->args; const Conv2DMeta* m = *((Conv2DMeta**) task->local_args); assert(regions.size() == (3 + int(m->use_bias))); assert(task->regions.size() == (3 + int(m->use_bias))); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 4> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); const float* acc_bias_ptr = NULL; if (m->use_bias) { TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); acc_bias_ptr = acc_bias.ptr; } //printf("fwdAlgo(%d), bwdFilterALgo(%d), bwdDataAlgo(%d)\n", (int)m->fwdAlgo,(int) m->bwdFilterAlgo,(int) m->bwdDataAlgo); hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } Conv2D::forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_kernel.ptr, acc_bias_ptr, stream); if (m->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); //print_tensor<4, float>(acc_input.ptr, acc_input.rect, "[Conv2D:forward:input]"); //print_tensor<4, float>(acc_kernel.ptr, acc_kernel.rect, "[Conv2D:forward:kernel]"); //print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Conv2D:forward:bias]"); //print_tensor<4, float>(acc_output.ptr, acc_output.rect, "[Conv2D:forward:output]"); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("%s [Conv2D] forward time (CF) = %.2fms\n", m->op_name, elapsed); } } __host__ void Conv2D::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(CONV2D_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); if (use_bias) { launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); } runtime->execute_index_space(ctx, launcher); } /*static*/ void Conv2D::backward_kernel(const Conv2DMeta* m, const float* input_ptr, float* input_grad_ptr, const float* output_ptr, float* output_grad_ptr, const float* kernel_ptr, float* kernel_grad_ptr, float* bias_grad_ptr, hipStream_t stream) { checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); float alpha = 1.0f; //float beta = 0.0f; if (m->relu) { cudnnDataType_t dataType; int n, c, h, w, nStride, cStride, hStride, wStride; checkCUDNN(cudnnGetTensor4dDescriptor(m->outputTensor, &dataType, &n, &c, &h, &w, &nStride, &cStride, &hStride, &wStride)); hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(n*c*h*w)), dim3(CUDA_NUM_THREADS), 0, stream, output_grad_ptr, output_ptr, n*c*h*w); } // Compute filter gradiant // NOTE: we use alpha for kernel_grad to accumulate gradients checkCUDNN(cudnnConvolutionBackwardFilter(m->handle.dnn, &alpha, m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr, m->convDesc, m->bwdFilterAlgo, m->handle.workSpace, m->handle.workSpaceSize, &alpha, m->filterDesc, kernel_grad_ptr)); // Compute bias gradiant // NOTE: we use alpha for bias_grad to accumulate gradients if (bias_grad_ptr != NULL) { checkCUDNN(cudnnConvolutionBackwardBias(m->handle.dnn, &alpha, m->outputTensor, output_grad_ptr, &alpha, m->biasTensor, bias_grad_ptr)); } // Compute data gradiant // NOTE: we use alpha for input_grad to accumulate gradients if (input_grad_ptr != NULL) { checkCUDNN(cudnnConvolutionBackwardData(m->handle.dnn, &alpha, m->filterDesc, kernel_ptr, m->outputTensor, output_grad_ptr, m->convDesc, m->bwdDataAlgo, m->handle.workSpace, m->handle.workSpaceSize, &alpha, m->inputTensor, input_grad_ptr)); } } /* region(I): input region(I/O): input_grad (if trainableInputs[0]) region(I): output region(I/O): output_grad region(I): filter region(I/O): filter_grad region(I/O): bias_grad (if use_bias) */ __host__ void Conv2D::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Conv2D* conv = (Conv2D*) task->args; const Conv2DMeta* m = *((Conv2DMeta**) task->local_args); assert(regions.size() == (5 + int(m->trainableInputs[0]) + int(m->use_bias))); assert(task->regions.size() == (5 + int(m->trainableInputs[0]) + int(m->use_bias))); size_t rid = 0; TensorAccessorR<float, 4> acc_input( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid ++; float* acc_input_grad_ptr = NULL; if (m->trainableInputs[0]) { TensorAccessorW<float, 4> acc_input_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); acc_input_grad_ptr = acc_input_grad.ptr; rid ++; } TensorAccessorR<float, 4> acc_output( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid ++; TensorAccessorW<float, 4> acc_output_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); rid ++; TensorAccessorR<float, 4> acc_kernel( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid ++; TensorAccessorW<float, 4> acc_kernel_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); rid ++; float* acc_bias_grad_ptr = NULL; if (m->use_bias) { TensorAccessorW<float, 1> acc_bias_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); acc_bias_grad_ptr = static_cast<float*>(acc_bias_grad.ptr); rid ++; } assert(rid == regions.size()); hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } Conv2D::backward_kernel(m, acc_input.ptr, acc_input_grad_ptr, acc_output.ptr, acc_output_grad.ptr, acc_kernel.ptr, acc_kernel_grad.ptr, acc_bias_grad_ptr, stream); if (m->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("%s [Conv2D] backward time = %.2fms\n", m->op_name, elapsed); //print_tensor<4, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Conv2D:backward:output_grad]"); //print_tensor<4, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Conv2D:backward:kernel_grad]"); //print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Conv2D:backward:bias_grad]"); //print_tensor<4, float>(acc_input_grad.ptr, acc_input_grad.rect, "[Conv2D:backward:input_grad]"); } } __host__ void Conv2D::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(CONV2D_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); int rid = 0; // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(rid++, FID_DATA); // regions[1](I/O): input_grad if (trainableInputs[0]) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(rid++, FID_DATA); } // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(rid++, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(rid++, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(rid++, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(rid++, FID_DATA); if (use_bias) { // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(rid++, FID_DATA); } FutureMap fm = runtime->execute_index_space(ctx, launcher); } #ifdef DEADCODE /* regions[0](I/O): filter regions[1](I): filter_grad regions[2](I/O): bias regions[3](I): bias_grad */ __host__ void Conv2D::update_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const Conv2D* conv = (Conv2D*) task->args; const AccessorRW<float, 1> acc_filter(regions[0], FID_DATA); const AccessorRO<float, 1> acc_filter_grad(regions[1], FID_DATA); const AccessorRW<float, 1> acc_bias(regions[2], FID_DATA); const AccessorRO<float, 1> acc_bias_grad(regions[3], FID_DATA); Rect<1> rect_filter, rect_filter_grad, rect_bias, rect_bias_grad; rect_filter = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_filter_grad = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_bias_grad = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); size_t filter_size = rect_filter.volume(); size_t bias_size = rect_bias.volume(); assert(filter_size == conv->in_channels * conv->out_channels * conv->kernel_w * conv->kernel_h); assert(bias_size == conv->out_channels); assert(filter_size * conv->num_replica == rect_filter_grad.volume()); assert(bias_size * conv->num_replica == rect_bias_grad.volume()); assert(acc_filter.accessor.is_dense_arbitrary(rect_filter)); assert(acc_filter_grad.accessor.is_dense_arbitrary(rect_filter_grad)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad)); float *filter_ptr = acc_filter.ptr(rect_filter.lo); const float *filter_grad_ptr = acc_filter_grad.ptr(rect_filter_grad.lo); float *bias_ptr = acc_bias.ptr(rect_bias.lo); const float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo); updateGAS(filter_ptr, filter_grad_ptr, filter_size, conv->num_replica, conv->learning_rate); updateGAS(bias_ptr, bias_grad_ptr, bias_size, conv->num_replica, conv->learning_rate); } __host__ void Conv2D::update(const FFModel& ff) { // Synchronize the learning rate learning_rate = ff.config.learningRate; assert(num_replica > 0); // Only aggregate parameters if more than one replica if (num_replica > 1) { Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; TaskLauncher launcher(CONV2D_UPD_TASK_ID, TaskArgument(this, sizeof(Conv2D))); launcher.add_region_requirement( RegionRequirement(locals[0].region, READ_WRITE, EXCLUSIVE, locals[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[0].region_grad, READ_ONLY, EXCLUSIVE, locals[0].region_grad)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[1].region, READ_WRITE, EXCLUSIVE, locals[1].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[1].region_grad, READ_ONLY, EXCLUSIVE, locals[1].region_grad)); launcher.add_field(3, FID_DATA); runtime->execute_task(ctx, launcher); } } #endif /* __host__ Parameter* Conv2D::get_parameter(int index) { if (index == 0) { return &weights[0]; } else if (index == 1) { return &weights[1]; } else { assert(0); return NULL; } }*/ __host__ void Conv2D::print_layer(const FFModel& ff) { printf("conv2d layer\n"); Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; #if 0 TaskLauncher launcher(CONV2D_PRINT_TASK_ID, TaskArgument(NULL, 0)); launcher.add_region_requirement( RegionRequirement(kernel.region, READ_ONLY, EXCLUSIVE, kernel.region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(bias.region, READ_ONLY, EXCLUSIVE, bias.region)); launcher.add_field(1, FID_DATA); Future fu = runtime->execute_task(ctx, launcher); fu.wait(); #else RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region); kernel_req.add_field(FID_DATA); InlineLauncher kernel_launcher(kernel_req); PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher); kernel_region.wait_until_valid(); /* RegionRequirement kernel_grad_req(kernel.region_grad, READ_WRITE, EXCLUSIVE, kernel.region_grad); kernel_grad_req.add_field(FID_DATA); InlineLauncher kernel_grad_launcher(kernel_grad_req); PhysicalRegion kernel_grad_region = runtime->map_region(ctx, kernel_grad_launcher); kernel_grad_region.wait_until_valid(); */ RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region); bias_req.add_field(FID_DATA); InlineLauncher bias_launcher(bias_req); PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher); bias_region.wait_until_valid(); /* RegionRequirement bias_grad_req(bias.region_grad, READ_WRITE, EXCLUSIVE, bias.region_grad); bias_grad_req.add_field(FID_DATA); InlineLauncher bias_grad_launcher(bias_grad_req); PhysicalRegion bias_grad_region = runtime->map_region(ctx, bias_grad_launcher); bias_grad_region.wait_until_valid(); */ TensorAccessorW<float, 4> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true); // const AccessorRW<float, 1> acc_kernel_grad(kernel_grad_region, FID_DATA); TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true); //const AccessorRW<float, 1> acc_bias_grad(bias_grad_region, FID_DATA); const float *kernel_ptr = acc_kernel.ptr; //float *kernel_grad_ptr = acc_kernel_grad.ptr; const float *bias_ptr = acc_bias.ptr; //float *bias_grad_ptr = acc_bias_grad.ptr; size_t kernel_size = acc_kernel.rect.volume(); int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1; int kernel_dim3 = acc_kernel.rect.hi[2] - acc_kernel.rect.lo[2] + 1; int kernel_dim4 = acc_kernel.rect.hi[3] - acc_kernel.rect.lo[3] + 1; //size_t kernel_grad_size = rect_kernel_grad.volume(); size_t bias_size = acc_bias.rect.volume(); //size_t bias_grad_size = rect_bias_grad.volume(); printf("kernel, %p, %zu, [%d, %d, %d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2, kernel_dim3, kernel_dim4); //printf("kernel_grad, %d\n", kernel_grad_size); printf("bias, %p, %zu\n", bias_ptr, bias_size); //printf("bias_grad, %d\n", bias_grad_size); for (int i = 0; i < bias_size; i++) { printf("%f ", bias_ptr[i]); } printf("\n"); /* for (int i = 0; i < bias_grad_size; i++) { printf("%f ", bias_grad_ptr); bias_grad_ptr ++; } printf("\n");*/ for (int i = 0; i < kernel_size; i++) { printf("%f ", kernel_ptr[i]); } printf("\n"); /* for (int i = 0; i < kernel_grad_size; i++) { printf("%f ", kernel_grad_ptr); kernel_grad_ptr ++; } printf("\n"); */ runtime->unmap_region(ctx, kernel_region); // runtime->unmap_region(ctx, kernel_grad_region); runtime->unmap_region(ctx, bias_region); // runtime->unmap_region(ctx, bias_grad_region); #endif } cudnnConvolutionFwdAlgo_t selectConvolutionForwardAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t yDesc, void* y) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx( handle, xDesc, x, wDesc, w, convDesc, yDesc, y, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("forwardAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } cudnnConvolutionBwdFilterAlgo_t selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnFilterDescriptor_t dwDesc, void* dw) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx( handle, xDesc, x, dyDesc, dy, convDesc, dwDesc, dw, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("bwdFilterAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } cudnnConvolutionBwdDataAlgo_t selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t dxDesc, void* dx) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx( handle, wDesc, w, dyDesc, dy, convDesc, dxDesc, dx, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("bwdDataAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } Conv2DMeta::Conv2DMeta(FFHandler handler) : OpMeta(handler) { checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc)); checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); } bool Conv2D::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { Tensor sub_output, sub_input; if(!outputs[0].get_output_sub_tensor(pc, sub_output, OP_CONV2D)) return false; if(!inputs[0].get_input_sub_tensor(pc, sub_input, OP_CONV2D)) return false; int input_w = sub_input.adim[0]; int input_h = sub_input.adim[1]; int input_c = sub_input.adim[2]; int input_n = sub_input.adim[3]; int output_w = sub_output.adim[0]; int output_h = sub_output.adim[1]; int output_c = sub_output.adim[2]; int output_n = sub_output.adim[3]; int pad_h = ((output_h - 1) * stride_h + kernel_h - input_h + 1) / 2; int pad_w = ((output_w - 1) * stride_w + kernel_w - input_w + 1) / 2; Conv2DMeta* m = sim->conv2d_meta; m->relu = activation == AC_MODE_RELU; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); // require input_c is divisible by groups assert(input_c % groups == 0); checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_c, input_c / groups, kernel_h, kernel_w)); checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc, pad_h, pad_w, stride_h, stride_w, 1/*dilationH*/, 1/*dilationW*/, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); checkCUDNN(cudnnSetConvolutionGroupCount(m->convDesc, groups)); if (m->handle.allowTensorOpMathConversion) { checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION)); } else { checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH)); } int n, c, h, w; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc, m->inputTensor, m->filterDesc, &n, &c, &h, &w)); assert(n == output_n); assert(c == output_c); assert(h == output_h); assert(w == output_w); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); // allocate tensors in simulator sim->free_all(); float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert(input_ptr != NULL); float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert(output_ptr != NULL); float* weight_ptr = (float*)sim->allocate((size_t)output_c * input_c * kernel_h * kernel_w / groups, DT_FLOAT); assert(weight_ptr != NULL); float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_ptr != NULL); // compute memory usage // Assume: // 1. all memory allocations use Simulator::allocate // 2. we call Simulator::free_all before measure an operator // Therefore, the memory usage of an operator is sim->offset cost_metrics.memory_requirement = (size_t)sim->offset; // select forward algorithm { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx( m->handle.dnn, m->inputTensor, input_ptr, m->filterDesc, weight_ptr, m->convDesc, m->outputTensor, output_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); cost_metrics.forward_time = perfResults[0].time; //for (int i = 0; i < cnt; i++) // printf("conv forward: algo(%d) time(%.4lf)\n", perfResults[i].algo, perfResults[i].time); } // select backward algorithm { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx( m->handle.dnn, m->inputTensor, input_ptr, m->outputTensor, output_ptr, m->convDesc, m->filterDesc, weight_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); cost_metrics.backward_time = perfResults[0].time; } if (trainableInputs[0]) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx( m->handle.dnn, m->filterDesc, weight_ptr, m->outputTensor, output_ptr, m->convDesc, m->inputTensor, input_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); cost_metrics.backward_time += perfResults[0].time; } printf("[Measure Conv2D] name(%s) input(%d %d %d %d) weight(%d %d %d %d) output(%d %d %d %d) stride(%d %d) padding(%d %d) forward_time(%.4lf) backward_time(%.4lf)\n", name, input_n, input_c, input_h, input_w, output_c, input_c / groups, kernel_h, kernel_w, output_n, output_c, output_h, output_w, stride_h, stride_w, padding_h, padding_w, cost_metrics.forward_time, cost_metrics.backward_time); return true; }
34a2a89b3aa4d99c3ac712f768c68fa205b63e27.cu
/* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::conv2d(const Tensor& input, int outChannels, int kernelH, int kernelW, int strideH, int strideW, int paddingH, int paddingW, ActiMode activation, int groups, bool use_bias, const Op* shared_op, Initializer* kernel_initializer, Initializer* bias_initializer, char const *name) { if (kernel_initializer == NULL) { int seed = std::rand(); kernel_initializer = new GlorotUniform(seed); } if (bias_initializer == NULL && use_bias) { bias_initializer = new ZeroInitializer(); } assert(input.numDim == 4); /*NCHW*/ Conv2D *conv = new Conv2D(*this, input, outChannels, kernelH, kernelW, strideH, strideW, paddingH, paddingW, groups, activation, use_bias, shared_op, kernel_initializer, bias_initializer, name); layers.push_back(conv); return conv->outputs[0]; } /* locals[0] = kernel locals[1] = bias */ Conv2D::Conv2D(FFModel& model, const Tensor& _input, int out_dim, int _kernel_h, int _kernel_w, int _stride_h, int _stride_w, int _padding_h, int _padding_w, int _groups, ActiMode _activation, bool _use_bias, const Op* shared_op, Initializer* _kernel_initializer, Initializer* _bias_initializer, const char* name) : Op(model, OP_CONV2D, shared_op, name, _input), in_channels(_input.adim[2]), out_channels(out_dim), kernel_h(_kernel_h), kernel_w(_kernel_w), stride_h(_stride_h), stride_w(_stride_w), padding_h(_padding_h), padding_w(_padding_w), groups(_groups), activation(_activation), use_bias(_use_bias), kernel_initializer(_kernel_initializer), bias_initializer(_bias_initializer) { assert(_input.numDim == 4); // Set output shape int input_w = inputs[0].adim[0]; int input_h = inputs[0].adim[1]; int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w; int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h; int output_c = out_channels; int output_n = inputs[0].adim[3]; numOutputs = 1; outputs[0].numDim = 4; outputs[0].adim[0] = output_w; outputs[0].adim[1] = output_h; outputs[0].adim[2] = output_c; outputs[0].adim[3] = output_n; weights[0].numDim = 4; weights[0].adim[0] = kernel_w; weights[0].adim[1] = kernel_h; // Require input channels is divisible by groups assert(in_channels % groups == 0); weights[0].adim[2] = in_channels / groups; weights[0].adim[3] = out_channels; numWeights = 1; if (use_bias) { weights[1].numDim = 1; weights[1].adim[0] = out_channels; numWeights = 2; } } void Conv2D::create_weights(FFModel& model) { // Retrive the task indexspace for the op task_is = (IndexSpaceT<4>)model.get_or_create_task_is(4, name); // TODO: temp work, will let users to pick either NCCL or PS #ifdef FF_USE_NCCL ParameterSyncType comm_type = ParameterSyncType::NCCL; #else ParameterSyncType comm_type = ParameterSyncType::PS; #endif // Create kernel { const int dims[4] = {out_channels, in_channels / groups, kernel_h, kernel_w}; weights[0] = model.create_conv_weight<4>(this, dims, DT_FLOAT, kernel_initializer, true/*create_grad*/, comm_type); } // Create bias tensor if (use_bias) { const int dims[1] = {out_channels}; weights[1] = model.create_conv_weight<1>(this, dims, DT_FLOAT, bias_initializer, true/*create_grad*/, comm_type); assert(numWeights == 2); } else { assert(numWeights == 1); } } void Conv2D::create_output_and_partition(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is); // Create output tensor int input_w = inputs[0].adim[0]; int input_h = inputs[0].adim[1]; int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w; int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h; int output_c = out_channels; int output_n = inputs[0].adim[3]; int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1; int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1; int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1; { const int dims[4] = {output_n, output_c, output_h, output_w}; outputs[0] = model.create_tensor<4>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Compute partition bound for input Rect<4> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); // Currently assume we didn't split across the channel dimension assert(num_par_c == 1); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition( inputs[0], (IndexSpaceT<4>)task_is, input_lps[0], input_grad_lps[0]); } } cudnnConvolutionFwdAlgo_t selectConvolutionForwardAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t yDesc, void* y); cudnnConvolutionBwdFilterAlgo_t selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnFilterDescriptor_t dwDesc, void* dw); cudnnConvolutionBwdDataAlgo_t selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t dxDesc, void* dx); /* regions[0]: input regions[1]: output regions[2](I): filter regions[3](I): bias regions[4](O): filter_grad regions[5](O): input_grad */ __host__ OpMeta* Conv2D::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const Conv2D* conv = (Conv2D*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 4> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); // TensorAccessorR<float, 1> acc_bias( // regions[3], task->regions[3], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_kernel_grad( regions[3], task->regions[3], FID_DATA, ctx, runtime, false/*readOutput*/); //TensorAccessorW<float, 4> acc_input_grad( // regions[4], task->regions[4], FID_DATA, ctx, runtime, // false/*readOutput*/); Conv2DMeta* m = new Conv2DMeta(handle); m->relu = conv->activation == AC_MODE_RELU; m->use_bias = conv->use_bias; m->profiling = conv->profiling; m->trainableInputs[0] = conv->trainableInputs[0]; std::strcpy(m->op_name, conv->name); int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1; int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1; int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1; int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1; int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1; int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1; printf("init conv (input): n(%d) c(%d) h(%d) w(%d)\n", input_n, input_c, input_h, input_w); printf("init conv (output): n(%d) c(%d) h(%d) w(%d)\n", output_n, output_c, output_h, output_w); checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); // Require that input_c is divisible by conv->groups assert(input_c % conv->groups == 0); printf("filterDim: kernel(%d %d) c_in(%d), c_out(%d)\n", conv->kernel_h, conv->kernel_w, input_c / conv->groups, output_c); checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_c, input_c / conv->groups, conv->kernel_h, conv->kernel_w)); //printf("convDim: padding(%d %d) stride(%d %d)\n", conv->padding_h, conv->padding_w, conv->stride_h, conv->stride_w); int pad_h = ((output_h - 1) * conv->stride_h + conv->kernel_h - input_h + 1) / 2; int pad_w = ((output_w - 1) * conv->stride_w + conv->kernel_w - input_w + 1) / 2; if (pad_h != conv->padding_h) printf("Warning: changing conv_padding_h to satisfy output_h size\n"); if (pad_w != conv->padding_w) printf("Warning: changing conv_padding_w to satisfy output_w size\n"); checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc, pad_h,//conv->padding_h, pad_w,//conv->padding_w, conv->stride_h, conv->stride_w, 1/*upscale_x*/, 1/*upscale_y*/, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); if (conv->groups != 1) { checkCUDNN(cudnnSetConvolutionGroupCount(m->convDesc, conv->groups)); } // enable tensor core when possible if (m->handle.allowTensorOpMathConversion) { checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION)); } else { checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH)); } int n, c, h, w; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc, m->inputTensor, m->filterDesc, &n, &c, &h, &w)); assert(n == output_n); assert(c == output_c); assert(h == output_h); assert(w == output_w); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); // select forward algorithm m->fwdAlgo = selectConvolutionForwardAlgorithm(m->handle.dnn, m->inputTensor, acc_input.ptr, m->filterDesc, acc_kernel.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->outputTensor, acc_output.ptr); // select backward filter algorithm m->bwdFilterAlgo = selectConvolutionBackwardFilterAlgorithm( m->handle.dnn, m->inputTensor, acc_input.ptr, m->outputTensor, acc_output.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->filterDesc, acc_kernel_grad.ptr); // select backward data algorithm m->bwdDataAlgo = selectConvolutionBackwardDataAlgorithm( m->handle.dnn, m->filterDesc, acc_kernel.ptr, m->outputTensor, acc_output.ptr, m->convDesc, m->handle.workSpace, m->handle.workSpaceSize, m->inputTensor, (float*)acc_input.ptr); if (m->relu) { checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); } return m; } void Conv2D::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); ParallelConfig pc; std::string pcname = name; ff.config.find_parallel_config(4, pcname, pc); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { FFHandler handle = ff.handlers[pc.device_ids[idx++]]; #ifdef FF_USE_NCCL handle.ncclComm = pc.nccl_comms[idx-1]; #endif argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(CONV2D_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Conv2D)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); // launcher.add_region_requirement( // RegionRequirement(weights[1].part, 0/*projection id*/, // READ_ONLY, EXCLUSIVE, weights[1].region)); // launcher.add_field(3, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(3, FID_DATA); //launcher.add_region_requirement( // RegionRequirement(input_grad_lps[0], 0/*projection id*/, // WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); //launcher.add_field(4, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /*static*/ void Conv2D::forward_kernel(const Conv2DMeta* m, const float* input_ptr, float* output_ptr, const float* filter_ptr, const float* bias_ptr, cudaStream_t stream) { checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); float alpha = 1.0f, beta = 0.0f; checkCUDNN(cudnnConvolutionForward(m->handle.dnn, &alpha, m->inputTensor, input_ptr, m->filterDesc, filter_ptr, m->convDesc, m->fwdAlgo, m->handle.workSpace, m->handle.workSpaceSize, &beta, m->outputTensor, output_ptr)); // use_bias == True if (bias_ptr != NULL) { checkCUDNN(cudnnAddTensor(m->handle.dnn, &alpha, m->biasTensor, bias_ptr, &alpha, m->outputTensor, output_ptr)); } if (m->relu) { checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc, &alpha, m->outputTensor, output_ptr, &beta, m->outputTensor, output_ptr)); } } /* regions[0](I): input regions[1](O): output regions[2](I): filter regions[3](I): bias */ __host__ void Conv2D::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Conv2D* conv = (Conv2D*) task->args; const Conv2DMeta* m = *((Conv2DMeta**) task->local_args); assert(regions.size() == (3 + int(m->use_bias))); assert(task->regions.size() == (3 + int(m->use_bias))); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 4> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); const float* acc_bias_ptr = NULL; if (m->use_bias) { TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); acc_bias_ptr = acc_bias.ptr; } //printf("fwdAlgo(%d), bwdFilterALgo(%d), bwdDataAlgo(%d)\n", (int)m->fwdAlgo,(int) m->bwdFilterAlgo,(int) m->bwdDataAlgo); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } Conv2D::forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_kernel.ptr, acc_bias_ptr, stream); if (m->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); //print_tensor<4, float>(acc_input.ptr, acc_input.rect, "[Conv2D:forward:input]"); //print_tensor<4, float>(acc_kernel.ptr, acc_kernel.rect, "[Conv2D:forward:kernel]"); //print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Conv2D:forward:bias]"); //print_tensor<4, float>(acc_output.ptr, acc_output.rect, "[Conv2D:forward:output]"); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("%s [Conv2D] forward time (CF) = %.2fms\n", m->op_name, elapsed); } } __host__ void Conv2D::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(CONV2D_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); if (use_bias) { launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); } runtime->execute_index_space(ctx, launcher); } /*static*/ void Conv2D::backward_kernel(const Conv2DMeta* m, const float* input_ptr, float* input_grad_ptr, const float* output_ptr, float* output_grad_ptr, const float* kernel_ptr, float* kernel_grad_ptr, float* bias_grad_ptr, cudaStream_t stream) { checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); float alpha = 1.0f; //float beta = 0.0f; if (m->relu) { cudnnDataType_t dataType; int n, c, h, w, nStride, cStride, hStride, wStride; checkCUDNN(cudnnGetTensor4dDescriptor(m->outputTensor, &dataType, &n, &c, &h, &w, &nStride, &cStride, &hStride, &wStride)); reluBackward<<<GET_BLOCKS(n*c*h*w), CUDA_NUM_THREADS, 0, stream>>>(output_grad_ptr, output_ptr, n*c*h*w); } // Compute filter gradiant // NOTE: we use alpha for kernel_grad to accumulate gradients checkCUDNN(cudnnConvolutionBackwardFilter(m->handle.dnn, &alpha, m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr, m->convDesc, m->bwdFilterAlgo, m->handle.workSpace, m->handle.workSpaceSize, &alpha, m->filterDesc, kernel_grad_ptr)); // Compute bias gradiant // NOTE: we use alpha for bias_grad to accumulate gradients if (bias_grad_ptr != NULL) { checkCUDNN(cudnnConvolutionBackwardBias(m->handle.dnn, &alpha, m->outputTensor, output_grad_ptr, &alpha, m->biasTensor, bias_grad_ptr)); } // Compute data gradiant // NOTE: we use alpha for input_grad to accumulate gradients if (input_grad_ptr != NULL) { checkCUDNN(cudnnConvolutionBackwardData(m->handle.dnn, &alpha, m->filterDesc, kernel_ptr, m->outputTensor, output_grad_ptr, m->convDesc, m->bwdDataAlgo, m->handle.workSpace, m->handle.workSpaceSize, &alpha, m->inputTensor, input_grad_ptr)); } } /* region(I): input region(I/O): input_grad (if trainableInputs[0]) region(I): output region(I/O): output_grad region(I): filter region(I/O): filter_grad region(I/O): bias_grad (if use_bias) */ __host__ void Conv2D::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Conv2D* conv = (Conv2D*) task->args; const Conv2DMeta* m = *((Conv2DMeta**) task->local_args); assert(regions.size() == (5 + int(m->trainableInputs[0]) + int(m->use_bias))); assert(task->regions.size() == (5 + int(m->trainableInputs[0]) + int(m->use_bias))); size_t rid = 0; TensorAccessorR<float, 4> acc_input( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid ++; float* acc_input_grad_ptr = NULL; if (m->trainableInputs[0]) { TensorAccessorW<float, 4> acc_input_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); acc_input_grad_ptr = acc_input_grad.ptr; rid ++; } TensorAccessorR<float, 4> acc_output( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid ++; TensorAccessorW<float, 4> acc_output_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); rid ++; TensorAccessorR<float, 4> acc_kernel( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid ++; TensorAccessorW<float, 4> acc_kernel_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); rid ++; float* acc_bias_grad_ptr = NULL; if (m->use_bias) { TensorAccessorW<float, 1> acc_bias_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); acc_bias_grad_ptr = static_cast<float*>(acc_bias_grad.ptr); rid ++; } assert(rid == regions.size()); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } Conv2D::backward_kernel(m, acc_input.ptr, acc_input_grad_ptr, acc_output.ptr, acc_output_grad.ptr, acc_kernel.ptr, acc_kernel_grad.ptr, acc_bias_grad_ptr, stream); if (m->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("%s [Conv2D] backward time = %.2fms\n", m->op_name, elapsed); //print_tensor<4, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Conv2D:backward:output_grad]"); //print_tensor<4, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Conv2D:backward:kernel_grad]"); //print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Conv2D:backward:bias_grad]"); //print_tensor<4, float>(acc_input_grad.ptr, acc_input_grad.rect, "[Conv2D:backward:input_grad]"); } } __host__ void Conv2D::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(CONV2D_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); int rid = 0; // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(rid++, FID_DATA); // regions[1](I/O): input_grad if (trainableInputs[0]) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(rid++, FID_DATA); } // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(rid++, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(rid++, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(rid++, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(rid++, FID_DATA); if (use_bias) { // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(rid++, FID_DATA); } FutureMap fm = runtime->execute_index_space(ctx, launcher); } #ifdef DEADCODE /* regions[0](I/O): filter regions[1](I): filter_grad regions[2](I/O): bias regions[3](I): bias_grad */ __host__ void Conv2D::update_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const Conv2D* conv = (Conv2D*) task->args; const AccessorRW<float, 1> acc_filter(regions[0], FID_DATA); const AccessorRO<float, 1> acc_filter_grad(regions[1], FID_DATA); const AccessorRW<float, 1> acc_bias(regions[2], FID_DATA); const AccessorRO<float, 1> acc_bias_grad(regions[3], FID_DATA); Rect<1> rect_filter, rect_filter_grad, rect_bias, rect_bias_grad; rect_filter = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_filter_grad = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_bias_grad = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); size_t filter_size = rect_filter.volume(); size_t bias_size = rect_bias.volume(); assert(filter_size == conv->in_channels * conv->out_channels * conv->kernel_w * conv->kernel_h); assert(bias_size == conv->out_channels); assert(filter_size * conv->num_replica == rect_filter_grad.volume()); assert(bias_size * conv->num_replica == rect_bias_grad.volume()); assert(acc_filter.accessor.is_dense_arbitrary(rect_filter)); assert(acc_filter_grad.accessor.is_dense_arbitrary(rect_filter_grad)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad)); float *filter_ptr = acc_filter.ptr(rect_filter.lo); const float *filter_grad_ptr = acc_filter_grad.ptr(rect_filter_grad.lo); float *bias_ptr = acc_bias.ptr(rect_bias.lo); const float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo); updateGAS(filter_ptr, filter_grad_ptr, filter_size, conv->num_replica, conv->learning_rate); updateGAS(bias_ptr, bias_grad_ptr, bias_size, conv->num_replica, conv->learning_rate); } __host__ void Conv2D::update(const FFModel& ff) { // Synchronize the learning rate learning_rate = ff.config.learningRate; assert(num_replica > 0); // Only aggregate parameters if more than one replica if (num_replica > 1) { Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; TaskLauncher launcher(CONV2D_UPD_TASK_ID, TaskArgument(this, sizeof(Conv2D))); launcher.add_region_requirement( RegionRequirement(locals[0].region, READ_WRITE, EXCLUSIVE, locals[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[0].region_grad, READ_ONLY, EXCLUSIVE, locals[0].region_grad)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[1].region, READ_WRITE, EXCLUSIVE, locals[1].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[1].region_grad, READ_ONLY, EXCLUSIVE, locals[1].region_grad)); launcher.add_field(3, FID_DATA); runtime->execute_task(ctx, launcher); } } #endif /* __host__ Parameter* Conv2D::get_parameter(int index) { if (index == 0) { return &weights[0]; } else if (index == 1) { return &weights[1]; } else { assert(0); return NULL; } }*/ __host__ void Conv2D::print_layer(const FFModel& ff) { printf("conv2d layer\n"); Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; #if 0 TaskLauncher launcher(CONV2D_PRINT_TASK_ID, TaskArgument(NULL, 0)); launcher.add_region_requirement( RegionRequirement(kernel.region, READ_ONLY, EXCLUSIVE, kernel.region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(bias.region, READ_ONLY, EXCLUSIVE, bias.region)); launcher.add_field(1, FID_DATA); Future fu = runtime->execute_task(ctx, launcher); fu.wait(); #else RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region); kernel_req.add_field(FID_DATA); InlineLauncher kernel_launcher(kernel_req); PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher); kernel_region.wait_until_valid(); /* RegionRequirement kernel_grad_req(kernel.region_grad, READ_WRITE, EXCLUSIVE, kernel.region_grad); kernel_grad_req.add_field(FID_DATA); InlineLauncher kernel_grad_launcher(kernel_grad_req); PhysicalRegion kernel_grad_region = runtime->map_region(ctx, kernel_grad_launcher); kernel_grad_region.wait_until_valid(); */ RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region); bias_req.add_field(FID_DATA); InlineLauncher bias_launcher(bias_req); PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher); bias_region.wait_until_valid(); /* RegionRequirement bias_grad_req(bias.region_grad, READ_WRITE, EXCLUSIVE, bias.region_grad); bias_grad_req.add_field(FID_DATA); InlineLauncher bias_grad_launcher(bias_grad_req); PhysicalRegion bias_grad_region = runtime->map_region(ctx, bias_grad_launcher); bias_grad_region.wait_until_valid(); */ TensorAccessorW<float, 4> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true); // const AccessorRW<float, 1> acc_kernel_grad(kernel_grad_region, FID_DATA); TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true); //const AccessorRW<float, 1> acc_bias_grad(bias_grad_region, FID_DATA); const float *kernel_ptr = acc_kernel.ptr; //float *kernel_grad_ptr = acc_kernel_grad.ptr; const float *bias_ptr = acc_bias.ptr; //float *bias_grad_ptr = acc_bias_grad.ptr; size_t kernel_size = acc_kernel.rect.volume(); int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1; int kernel_dim3 = acc_kernel.rect.hi[2] - acc_kernel.rect.lo[2] + 1; int kernel_dim4 = acc_kernel.rect.hi[3] - acc_kernel.rect.lo[3] + 1; //size_t kernel_grad_size = rect_kernel_grad.volume(); size_t bias_size = acc_bias.rect.volume(); //size_t bias_grad_size = rect_bias_grad.volume(); printf("kernel, %p, %zu, [%d, %d, %d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2, kernel_dim3, kernel_dim4); //printf("kernel_grad, %d\n", kernel_grad_size); printf("bias, %p, %zu\n", bias_ptr, bias_size); //printf("bias_grad, %d\n", bias_grad_size); for (int i = 0; i < bias_size; i++) { printf("%f ", bias_ptr[i]); } printf("\n"); /* for (int i = 0; i < bias_grad_size; i++) { printf("%f ", bias_grad_ptr); bias_grad_ptr ++; } printf("\n");*/ for (int i = 0; i < kernel_size; i++) { printf("%f ", kernel_ptr[i]); } printf("\n"); /* for (int i = 0; i < kernel_grad_size; i++) { printf("%f ", kernel_grad_ptr); kernel_grad_ptr ++; } printf("\n"); */ runtime->unmap_region(ctx, kernel_region); // runtime->unmap_region(ctx, kernel_grad_region); runtime->unmap_region(ctx, bias_region); // runtime->unmap_region(ctx, bias_grad_region); #endif } cudnnConvolutionFwdAlgo_t selectConvolutionForwardAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t yDesc, void* y) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx( handle, xDesc, x, wDesc, w, convDesc, yDesc, y, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("forwardAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } cudnnConvolutionBwdFilterAlgo_t selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const void* x, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnFilterDescriptor_t dwDesc, void* dw) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx( handle, xDesc, x, dyDesc, dy, convDesc, dwDesc, dw, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("bwdFilterAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } cudnnConvolutionBwdDataAlgo_t selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, const cudnnFilterDescriptor_t wDesc, const void* w, const cudnnTensorDescriptor_t dyDesc, const void* dy, const cudnnConvolutionDescriptor_t convDesc, void* workSpace, size_t workSpaceSize, const cudnnTensorDescriptor_t dxDesc, void* dx) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx( handle, wDesc, w, dyDesc, dy, convDesc, dxDesc, dx, reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); printf("bwdDataAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time); return perfResults[0].algo; } Conv2DMeta::Conv2DMeta(FFHandler handler) : OpMeta(handler) { checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc)); checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); } bool Conv2D::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { Tensor sub_output, sub_input; if(!outputs[0].get_output_sub_tensor(pc, sub_output, OP_CONV2D)) return false; if(!inputs[0].get_input_sub_tensor(pc, sub_input, OP_CONV2D)) return false; int input_w = sub_input.adim[0]; int input_h = sub_input.adim[1]; int input_c = sub_input.adim[2]; int input_n = sub_input.adim[3]; int output_w = sub_output.adim[0]; int output_h = sub_output.adim[1]; int output_c = sub_output.adim[2]; int output_n = sub_output.adim[3]; int pad_h = ((output_h - 1) * stride_h + kernel_h - input_h + 1) / 2; int pad_w = ((output_w - 1) * stride_w + kernel_w - input_w + 1) / 2; Conv2DMeta* m = sim->conv2d_meta; m->relu = activation == AC_MODE_RELU; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); // require input_c is divisible by groups assert(input_c % groups == 0); checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_c, input_c / groups, kernel_h, kernel_w)); checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc, pad_h, pad_w, stride_h, stride_w, 1/*dilationH*/, 1/*dilationW*/, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); checkCUDNN(cudnnSetConvolutionGroupCount(m->convDesc, groups)); if (m->handle.allowTensorOpMathConversion) { checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION)); } else { checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH)); } int n, c, h, w; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc, m->inputTensor, m->filterDesc, &n, &c, &h, &w)); assert(n == output_n); assert(c == output_c); assert(h == output_h); assert(w == output_w); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); // allocate tensors in simulator sim->free_all(); float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert(input_ptr != NULL); float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert(output_ptr != NULL); float* weight_ptr = (float*)sim->allocate((size_t)output_c * input_c * kernel_h * kernel_w / groups, DT_FLOAT); assert(weight_ptr != NULL); float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_ptr != NULL); // compute memory usage // Assume: // 1. all memory allocations use Simulator::allocate // 2. we call Simulator::free_all before measure an operator // Therefore, the memory usage of an operator is sim->offset cost_metrics.memory_requirement = (size_t)sim->offset; // select forward algorithm { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx( m->handle.dnn, m->inputTensor, input_ptr, m->filterDesc, weight_ptr, m->convDesc, m->outputTensor, output_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); cost_metrics.forward_time = perfResults[0].time; //for (int i = 0; i < cnt; i++) // printf("conv forward: algo(%d) time(%.4lf)\n", perfResults[i].algo, perfResults[i].time); } // select backward algorithm { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx( m->handle.dnn, m->inputTensor, input_ptr, m->outputTensor, output_ptr, m->convDesc, m->filterDesc, weight_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); cost_metrics.backward_time = perfResults[0].time; } if (trainableInputs[0]) { const int reqAlgCnt = 8; int cnt = 0; cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt]; checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx( m->handle.dnn, m->filterDesc, weight_ptr, m->outputTensor, output_ptr, m->convDesc, m->inputTensor, input_ptr, reqAlgCnt, &cnt, perfResults, m->handle.workSpace, m->handle.workSpaceSize)); assert(cnt > 0); checkCUDNN(perfResults[0].status); cost_metrics.backward_time += perfResults[0].time; } printf("[Measure Conv2D] name(%s) input(%d %d %d %d) weight(%d %d %d %d) output(%d %d %d %d) stride(%d %d) padding(%d %d) forward_time(%.4lf) backward_time(%.4lf)\n", name, input_n, input_c, input_h, input_w, output_c, input_c / groups, kernel_h, kernel_w, output_n, output_c, output_h, output_w, stride_h, stride_w, padding_h, padding_w, cost_metrics.forward_time, cost_metrics.backward_time); return true; }
053b72d1679a5e5293252ce649385573cb418ad9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #define N (33 * 1024) __global__ void add(int *a, int *b, int *c){ int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N){ c[tid] = a[tid] + b[tid]; tid += blockDim.x * gridDim.x; } } int main(void){ int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU hipMalloc((void**)&dev_a, N * sizeof(int)); hipMalloc((void**)&dev_b, N * sizeof(int)); hipMalloc((void**)&dev_c, N * sizeof(int)); // fill the arrays 'a' and 'b' on the CPU for (int i = 0; i < N; i++){ a[i] = i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice); // launch kernel hipLaunchKernelGGL(( add), dim3(128),dim3(128), 0, 0, dev_a, dev_b, dev_c); // copy the array 'c' back from the GPU to the CPU hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost); // verify that the GPU did the work we requested bool success = true; for (int i = 0; i < N; i++){ if ((a[i] + b[i]) != c[i]){ std::cout << "Error: " << a[i] << " + " << b[i] << " != " << c[i] << std::endl; success = false; } } if (success) std::cout << "We did it!" << std::endl; // free the memory allocated on the GPU hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
053b72d1679a5e5293252ce649385573cb418ad9.cu
#include <iostream> #define N (33 * 1024) __global__ void add(int *a, int *b, int *c){ int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N){ c[tid] = a[tid] + b[tid]; tid += blockDim.x * gridDim.x; } } int main(void){ int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU cudaMalloc((void**)&dev_a, N * sizeof(int)); cudaMalloc((void**)&dev_b, N * sizeof(int)); cudaMalloc((void**)&dev_c, N * sizeof(int)); // fill the arrays 'a' and 'b' on the CPU for (int i = 0; i < N; i++){ a[i] = i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice); // launch kernel add<<<128,128>>>(dev_a, dev_b, dev_c); // copy the array 'c' back from the GPU to the CPU cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost); // verify that the GPU did the work we requested bool success = true; for (int i = 0; i < N; i++){ if ((a[i] + b[i]) != c[i]){ std::cout << "Error: " << a[i] << " + " << b[i] << " != " << c[i] << std::endl; success = false; } } if (success) std::cout << "We did it!" << std::endl; // free the memory allocated on the GPU cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
caacc8beebbb47c9d4151927cbf41dbbde9c4371.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define blocksizex 16 #define blocksizey 16 #define N 40000 #define DOT(a,b) a[0]*b[0]+a[1]*b[1]+a[2]*b[2] #define DOTVA(a,b) a.x*b[0]+a.y*b[1]+a.z*b[2] typedef struct { float x; float y; float z; } vertex; typedef struct { vertex a; vertex n; } mata; __global__ void RunICP( int validPoints[N], vertex verProjD[N], vertex verNormD[N], float matRgPre[9], float matTransltgPre[3], const int focus, vertex verProS[N], vertex verNormS[N], const float epsD, const float epsN, const float epsDs, float Energy[N], mata Ak[N], float Bk[N], int flag[N] ) { int gidx=blockIdx.x*blockDim.x+threadIdx.x; int gidy=blockIdx.y*blockDim.y+threadIdx.y; int i=gidx+(gidy-1)*gridDim.y;//num of order float vertexProj[3]; float normalProj[3]; float disVdiff[3]; float disNdiff[3]; float error; if (validPoints[i] == true && i<N){ //transformV and trandformR vertexProj[0] = verProjD[i].x*matRgPre[0]+verProjD[i].y*matRgPre[1]+verProjD[i].z*matRgPre[2]+matTransltgPre[0]; vertexProj[1] = verProjD[i].x*matRgPre[3]+verProjD[i].y*matRgPre[4]+verProjD[i].z*matRgPre[5]+matTransltgPre[1]; vertexProj[2] = verProjD[i].x*matRgPre[6]+verProjD[i].y*matRgPre[7]+verProjD[i].z*matRgPre[8]+matTransltgPre[2]; normalProj[0] = verNormD[i].x*matRgPre[0]+verNormD[i].y*matRgPre[1]+verNormD[i].z*matRgPre[2]; normalProj[1] = verProjD[i].x*matRgPre[3]+verProjD[i].y*matRgPre[4]+verProjD[i].z*matRgPre[5]; normalProj[2] = verProjD[i].x*matRgPre[6]+verProjD[i].y*matRgPre[7]+verProjD[i].z*matRgPre[8]; //get index int x = (vertexProj[0] / vertexProj[2] * focus) + 320; int y = (vertexProj[1] / vertexProj[2] * focus) + 240; int index=y*640 + x; if(index < 0 || index >= N){ Energy[i]=0; flag[i]=0; return; } disVdiff[0] = verProS[index].x-vertexProj[0]; disNdiff[0] = verNormS[index].x-normalProj[0]; disVdiff[1] = verProS[index].y-vertexProj[1]; disNdiff[1] = verNormS[index].y-normalProj[1]; disVdiff[2] = verProS[index].y-vertexProj[2]; disNdiff[2] = verNormS[index].y-normalProj[2]; float disV = DOT(disVdiff, disVdiff); float disN = DOT(disNdiff, disNdiff); if( disV < epsD && disN >= epsN && disV > epsDs){ error=DOTVA(verNormS[index],disVdiff); Energy[i]= error*error/10;//remember to reduce sumEngry Ak[i].a.x=vertexProj[1]*verNormS[index].z-vertexProj[2]*verNormS[index].y; Ak[i].a.y=vertexProj[2]*verNormS[index].x-vertexProj[0]*verNormS[index].z; Ak[i].a.z=vertexProj[0]*verNormS[index].y-vertexProj[1]*verNormS[index].x; Ak[i].n.x=verNormS[index].x; Ak[i].n.x=verNormS[index].y; Ak[i].n.x=verNormS[index].z; Bk[i]=DOTVA(verNormS[index],disVdiff); flag[i]=1; } else flag[i]=0; } else {flag[i]=0;} } int main() { //kernel invocation with N threads /* int validPoints[N]; float verProjD[N][3]; float verNormD[N][3]; float matRgPre[3]; float matTransltgPre[3]; float verProS[N][3]; float verNormS[N][3]; float Energy[N]; float Ak[N][6]; float Bk[N]; int flag[N]; */ const int focus=550; const float epsD=40000.0; const float epsN=0.6; const float epsDs=100.0; //---------------------------------- //allocation for host //------------------------------------ size_t flsizeN =N*sizeof(float); size_t flsize3N =3*N*sizeof(float); size_t flsize3 =3*sizeof(float); size_t flsize6N=6*N*sizeof(float); int* validPoints=(int*)malloc(flsizeN); vertex* verProjD=(vertex*)malloc(flsize3N); vertex* verNormD=(vertex*)malloc(flsize3N); float* matRgPre=(float*)malloc(9*sizeof(float)); float* matTransltgPre=(float*)malloc(flsize3); vertex* verProS=(vertex*)malloc(flsize3N); vertex* verNormS=(vertex*)malloc(flsize3N); float* Energy=(float*)malloc(flsizeN); mata* Ak=(mata*)malloc(flsize6N); float* Bk=(float*)malloc(flsizeN); int* flag=(int*)malloc(flsizeN); if (validPoints == NULL || verProjD == NULL || verNormD == NULL || matRgPre == NULL||matTransltgPre==NULL||verProS==NULL||verNormS==NULL ||Energy==NULL||Ak==NULL||Bk==NULL||flag==NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } //----------------------------------- //allocation for device //----------------------------------- hipError_t err=hipSuccess; int* d_validPoints=NULL; err=hipMalloc((void **)&d_validPoints,flsizeN); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector d_validPoints (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } vertex* d_verProjD=NULL; err=hipMalloc((void **)&d_verProjD,flsize3N); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_verProjD (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } vertex* d_verNormD=NULL; err=hipMalloc((void **)&d_verNormD,flsize3N); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_verNormD (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float* d_matRgPre=NULL; err=hipMalloc((void **)&d_matRgPre,9*sizeof(float)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_matRgPre (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float* d_matTransltgPre=NULL; err=hipMalloc((void **)&d_matTransltgPre,flsize3); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_matTransltgPre (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } vertex* d_verProS=NULL; err=hipMalloc((void **)&d_verProS,flsize3N); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_verProS (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } vertex* d_verNormS=NULL; err=hipMalloc((void **)&d_verNormS,flsize3N); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_verNormS (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float* d_Energy=NULL; err=hipMalloc((void **)&d_Energy,flsizeN); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_Energy (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } mata* d_Ak=NULL; err=hipMalloc((void **)&d_Ak,flsize6N); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_Ak (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float* d_Bk=NULL; err=hipMalloc((void **)&d_Bk,flsizeN); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_Bk (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } int* d_flag=NULL; err=hipMalloc((void **)&d_flag,flsizeN); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device d_flag (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //-------------------------------------------------------------------------- // Copy the host input variables in host memory //to the device input variables in device memory //-------------------------------------------------------------------------- err = hipMemcpy(d_validPoints, validPoints, flsizeN,hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_verProjD, verProjD, flsize3N,hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_verNormD, verNormD, flsize3N,hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_matRgPre, matRgPre, 9*sizeof(float),hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_matTransltgPre, matTransltgPre, flsize3,hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_verProS, verProS, flsize3N,hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_verNormS, verNormS, flsize3N,hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //output ---------------------- dim3 threadsPerBlock(blocksizex,blocksizey); dim3 numBlocks(N/threadsPerBlock.x+1,N/threadsPerBlock.y+1); hipLaunchKernelGGL(( RunICP), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_validPoints, d_verProjD, d_verNormD, d_matRgPre, d_matTransltgPre, focus, d_verProS, d_verNormS, epsD, epsN, epsDs, d_Energy, d_Ak, d_Bk, d_flag ); //-------------------------------------------------------------------------- // Copy the device result variables in device memory to the host result variables // in host memory. //-------------------------------------------------------------------------- printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(Energy, d_Energy, flsizeN, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variable cube from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(Ak, d_Ak, flsize6N, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variable cube from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(Bk, d_Bk, flsizeN, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variable cube from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(flag, d_flag, flsizeN, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy variable cube from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } int sumEnergy=0; for(int i=0;i<N;i++) { if(flag[i]==1) { sumEnergy+=Energy[i]; //Ak.push_back(d_Ak[i]); //Bk.push_back(d_Bk[i]); } } }
caacc8beebbb47c9d4151927cbf41dbbde9c4371.cu
#include <stdio.h> #define blocksizex 16 #define blocksizey 16 #define N 40000 #define DOT(a,b) a[0]*b[0]+a[1]*b[1]+a[2]*b[2] #define DOTVA(a,b) a.x*b[0]+a.y*b[1]+a.z*b[2] typedef struct { float x; float y; float z; } vertex; typedef struct { vertex a; vertex n; } mata; __global__ void RunICP( int validPoints[N], vertex verProjD[N], vertex verNormD[N], float matRgPre[9], float matTransltgPre[3], const int focus, vertex verProS[N], vertex verNormS[N], const float epsD, const float epsN, const float epsDs, float Energy[N], mata Ak[N], float Bk[N], int flag[N] ) { int gidx=blockIdx.x*blockDim.x+threadIdx.x; int gidy=blockIdx.y*blockDim.y+threadIdx.y; int i=gidx+(gidy-1)*gridDim.y;//num of order float vertexProj[3]; float normalProj[3]; float disVdiff[3]; float disNdiff[3]; float error; if (validPoints[i] == true && i<N){ //transformV and trandformR vertexProj[0] = verProjD[i].x*matRgPre[0]+verProjD[i].y*matRgPre[1]+verProjD[i].z*matRgPre[2]+matTransltgPre[0]; vertexProj[1] = verProjD[i].x*matRgPre[3]+verProjD[i].y*matRgPre[4]+verProjD[i].z*matRgPre[5]+matTransltgPre[1]; vertexProj[2] = verProjD[i].x*matRgPre[6]+verProjD[i].y*matRgPre[7]+verProjD[i].z*matRgPre[8]+matTransltgPre[2]; normalProj[0] = verNormD[i].x*matRgPre[0]+verNormD[i].y*matRgPre[1]+verNormD[i].z*matRgPre[2]; normalProj[1] = verProjD[i].x*matRgPre[3]+verProjD[i].y*matRgPre[4]+verProjD[i].z*matRgPre[5]; normalProj[2] = verProjD[i].x*matRgPre[6]+verProjD[i].y*matRgPre[7]+verProjD[i].z*matRgPre[8]; //get index int x = (vertexProj[0] / vertexProj[2] * focus) + 320; int y = (vertexProj[1] / vertexProj[2] * focus) + 240; int index=y*640 + x; if(index < 0 || index >= N){ Energy[i]=0; flag[i]=0; return; } disVdiff[0] = verProS[index].x-vertexProj[0]; disNdiff[0] = verNormS[index].x-normalProj[0]; disVdiff[1] = verProS[index].y-vertexProj[1]; disNdiff[1] = verNormS[index].y-normalProj[1]; disVdiff[2] = verProS[index].y-vertexProj[2]; disNdiff[2] = verNormS[index].y-normalProj[2]; float disV = DOT(disVdiff, disVdiff); float disN = DOT(disNdiff, disNdiff); if( disV < epsD && disN >= epsN && disV > epsDs){ error=DOTVA(verNormS[index],disVdiff); Energy[i]= error*error/10;//remember to reduce sumEngry Ak[i].a.x=vertexProj[1]*verNormS[index].z-vertexProj[2]*verNormS[index].y; Ak[i].a.y=vertexProj[2]*verNormS[index].x-vertexProj[0]*verNormS[index].z; Ak[i].a.z=vertexProj[0]*verNormS[index].y-vertexProj[1]*verNormS[index].x; Ak[i].n.x=verNormS[index].x; Ak[i].n.x=verNormS[index].y; Ak[i].n.x=verNormS[index].z; Bk[i]=DOTVA(verNormS[index],disVdiff); flag[i]=1; } else flag[i]=0; } else {flag[i]=0;} } int main() { //kernel invocation with N threads /* int validPoints[N]; float verProjD[N][3]; float verNormD[N][3]; float matRgPre[3]; float matTransltgPre[3]; float verProS[N][3]; float verNormS[N][3]; float Energy[N]; float Ak[N][6]; float Bk[N]; int flag[N]; */ const int focus=550; const float epsD=40000.0; const float epsN=0.6; const float epsDs=100.0; //---------------------------------- //allocation for host //------------------------------------ size_t flsizeN =N*sizeof(float); size_t flsize3N =3*N*sizeof(float); size_t flsize3 =3*sizeof(float); size_t flsize6N=6*N*sizeof(float); int* validPoints=(int*)malloc(flsizeN); vertex* verProjD=(vertex*)malloc(flsize3N); vertex* verNormD=(vertex*)malloc(flsize3N); float* matRgPre=(float*)malloc(9*sizeof(float)); float* matTransltgPre=(float*)malloc(flsize3); vertex* verProS=(vertex*)malloc(flsize3N); vertex* verNormS=(vertex*)malloc(flsize3N); float* Energy=(float*)malloc(flsizeN); mata* Ak=(mata*)malloc(flsize6N); float* Bk=(float*)malloc(flsizeN); int* flag=(int*)malloc(flsizeN); if (validPoints == NULL || verProjD == NULL || verNormD == NULL || matRgPre == NULL||matTransltgPre==NULL||verProS==NULL||verNormS==NULL ||Energy==NULL||Ak==NULL||Bk==NULL||flag==NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } //----------------------------------- //allocation for device //----------------------------------- cudaError_t err=cudaSuccess; int* d_validPoints=NULL; err=cudaMalloc((void **)&d_validPoints,flsizeN); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector d_validPoints (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } vertex* d_verProjD=NULL; err=cudaMalloc((void **)&d_verProjD,flsize3N); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_verProjD (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } vertex* d_verNormD=NULL; err=cudaMalloc((void **)&d_verNormD,flsize3N); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_verNormD (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float* d_matRgPre=NULL; err=cudaMalloc((void **)&d_matRgPre,9*sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_matRgPre (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float* d_matTransltgPre=NULL; err=cudaMalloc((void **)&d_matTransltgPre,flsize3); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_matTransltgPre (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } vertex* d_verProS=NULL; err=cudaMalloc((void **)&d_verProS,flsize3N); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_verProS (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } vertex* d_verNormS=NULL; err=cudaMalloc((void **)&d_verNormS,flsize3N); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_verNormS (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float* d_Energy=NULL; err=cudaMalloc((void **)&d_Energy,flsizeN); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_Energy (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } mata* d_Ak=NULL; err=cudaMalloc((void **)&d_Ak,flsize6N); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_Ak (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float* d_Bk=NULL; err=cudaMalloc((void **)&d_Bk,flsizeN); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_Bk (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int* d_flag=NULL; err=cudaMalloc((void **)&d_flag,flsizeN); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device d_flag (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //-------------------------------------------------------------------------- // Copy the host input variables in host memory //to the device input variables in device memory //-------------------------------------------------------------------------- err = cudaMemcpy(d_validPoints, validPoints, flsizeN,cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_verProjD, verProjD, flsize3N,cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_verNormD, verNormD, flsize3N,cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_matRgPre, matRgPre, 9*sizeof(float),cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_matTransltgPre, matTransltgPre, flsize3,cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_verProS, verProS, flsize3N,cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_verNormS, verNormS, flsize3N,cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variables from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //output ---------------------- dim3 threadsPerBlock(blocksizex,blocksizey); dim3 numBlocks(N/threadsPerBlock.x+1,N/threadsPerBlock.y+1); RunICP<<<numBlocks,threadsPerBlock>>>( d_validPoints, d_verProjD, d_verNormD, d_matRgPre, d_matTransltgPre, focus, d_verProS, d_verNormS, epsD, epsN, epsDs, d_Energy, d_Ak, d_Bk, d_flag ); //-------------------------------------------------------------------------- // Copy the device result variables in device memory to the host result variables // in host memory. //-------------------------------------------------------------------------- printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(Energy, d_Energy, flsizeN, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variable cube from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(Ak, d_Ak, flsize6N, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variable cube from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(Bk, d_Bk, flsizeN, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variable cube from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(flag, d_flag, flsizeN, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy variable cube from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int sumEnergy=0; for(int i=0;i<N;i++) { if(flag[i]==1) { sumEnergy+=Energy[i]; //Ak.push_back(d_Ak[i]); //Bk.push_back(d_Bk[i]); } } }
1c909adcd2a10e59c27b9cfe2d0b5d2048056dde.hip
// !!! This is a file automatically generated by hipify!!! /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "testlayers.h" #include <array/NDArray.h> #include <array/NDArrayFactory.h> #include <graph/Context.h> #include <graph/Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <ops/specials_cuda.h> #include <helpers/TAD.h> #include <helpers/MmulHelper.h> #include <helpers/PointersManager.h> #include <hip/hip_runtime.h> #include <helpers/RandomLauncher.h> #include <helpers/ConstantShapeHelper.h> #include <helpers/ConstantTadHelper.h> #include <array/ShapeDescriptor.h> #include <array/ConstantDataBuffer.h> #include <helpers/ShapeUtils.h> #include <exceptions/cuda_exception.h> using namespace sd; using namespace sd::graph; class CudaBasicsTests1 : public testing::Test { public: }; ////////////////////////////////////////////////////////////////////////// static hipError_t allocateDeviceMem(LaunchContext& lc, std::vector<void*>& devicePtrs, const std::vector<std::pair<void*,size_t>>& hostData) { if(devicePtrs.size() != hostData.size()) throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !"); hipError_t cudaResult; void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); if(cudaResult != 0) return cudaResult; int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); if(cudaResult != 0) return cudaResult; lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); hipStream_t stream = *lc.getCudaStream(); for(int i = 0; i < devicePtrs.size(); ++i) { cudaResult = hipMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); if(cudaResult != 0) return cudaResult; hipMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, hipMemcpyHostToDevice, stream); } return cudaResult; } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, TestPairwise_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }, {0,0,0,0,0}); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); ASSERT_EQ(0, res); res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); ASSERT_EQ(0, res); res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); ASSERT_EQ(0, res); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(hipStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(hipStream_t)); hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream)); auto stream = reinterpret_cast<hipStream_t *>(&nativeStream); x.dataBuffer()->allocatePrimary(); x.syncToHost(); hipMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), hipMemcpyHostToDevice, *stream); hipMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), hipMemcpyHostToDevice, *stream); res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); LaunchContext lc(stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr, z.shapeInfo(), devBufferPtrZ, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr); res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); z.dataBuffer()->allocatePrimary(); hipMemcpyAsync(z.buffer(), devBufferPtrZ, z.lengthOf() * x.sizeOfT(), hipMemcpyDeviceToHost, *stream); res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); hipFree(devBufferPtrX); hipFree(devBufferPtrZ); hipFree(devShapePtrX); // needed due to memcpy z.tickWriteHost(); for (int e = 0; e < z.lengthOf(); e++) { //nd4j_printf("step %i\n", e); ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execIndexReduceScalar_1) { NDArray x1('c', {2,2}, {0, 1, 2, 3}, sd::DataType::INT32); NDArray x2('c', {2,2}, {0.5, 1.5, -4.5, 3.5}, sd::DataType::BFLOAT16); NDArray x3('c', {2,2}, {0, -1, 0, 1}, sd::DataType::BOOL); NDArray scalar('c', {}, std::vector<double>{0}, sd::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{3}, sd::DataType::INT64); NDArray exp2('c', {}, std::vector<double>{2}, sd::DataType::INT64); NDArray exp3('c', {}, std::vector<double>{1}, sd::DataType::INT64); void *dX1, *dX2, *dX3, *dZ; Nd4jLong *dX1ShapeInfo, *dX2ShapeInfo, *dX3ShapeInfo, *dZShapeInfo; hipError_t cudaResult; cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ), scalar.lengthOf() * scalar.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dX2ShapeInfo), shape::shapeInfoByteLength(x2.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dZShapeInfo), shape::shapeInfoByteLength(scalar.shapeInfo())); ASSERT_EQ(0, cudaResult); hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); x1.syncToHost(); x2.syncToHost(); x3.syncToHost(); scalar.syncToHost(); hipMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dX1ShapeInfo, x1.shapeInfo(), shape::shapeInfoByteLength(x1.shapeInfo()), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dX2ShapeInfo, x2.shapeInfo(), shape::shapeInfoByteLength(x2.shapeInfo()), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dX3ShapeInfo, x3.shapeInfo(), shape::shapeInfoByteLength(x3.shapeInfo()), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dZShapeInfo, scalar.shapeInfo(), shape::shapeInfoByteLength(scalar.shapeInfo()), hipMemcpyHostToDevice, stream); void* reductionPointer = nullptr; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); cudaResult = hipMemset(reductionPointer, 0, 1024 * 1024); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream, LaunchContext::defaultContext()->getReductionPointer(), LaunchContext::defaultContext()->getScalarPointer(), LaunchContext::defaultContext()->getAllocationPointer()); /***************************************/ NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, x1.buffer(), x1.shapeInfo(), dX1, dX1ShapeInfo, nullptr, scalar.buffer(), scalar.shapeInfo(), dZ, dZShapeInfo); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); hipMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), hipMemcpyDeviceToHost, stream); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); scalar.tickWriteHost(); ASSERT_NEAR(exp1.e<float>(0), scalar.e<float>(0), 1e-5); /***************************************/ NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, nullptr, x2.shapeInfo(), dX2, dX2ShapeInfo, nullptr, nullptr, scalar.shapeInfo(), dZ, dZShapeInfo); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); hipMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), hipMemcpyDeviceToHost, stream); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); ASSERT_NEAR(exp2.e<float>(0), scalar.e<float>(0), 1e-5); // ************************************* NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, nullptr, x3.shapeInfo(), dX3, dX3ShapeInfo, nullptr, nullptr, scalar.shapeInfo(), dZ, dZShapeInfo); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); hipMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), hipMemcpyDeviceToHost, stream); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); ASSERT_NEAR(exp3.e<float>(0), scalar.e<float>(0), 1e-5); /***************************************/ hipFree(dX1); hipFree(dX2); hipFree(dX3); hipFree(dZ); hipFree(dX1ShapeInfo); hipFree(dX2ShapeInfo); hipFree(dX3ShapeInfo); hipFree(dZShapeInfo); /***************************************/ cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3Scalar_1) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x1('c', {2,2}, {1,2,3,4}, sd::DataType::INT32); NDArray x2('c', {2,2}, {-1,-2,-3,-4}, sd::DataType::INT32); NDArray x3('c', {2,2}, {1.5,1.5,1.5,1.5}, sd::DataType::DOUBLE); NDArray x4('c', {2,2}, {1,2,3,4}, sd::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{-30.f}, sd::DataType::FLOAT32); NDArray exp2('c', {}, std::vector<double>{15.}, sd::DataType::DOUBLE); NDArray scalar1('c', {}, std::vector<double>{100.f}, sd::DataType::FLOAT32); NDArray scalar2('c', {}, std::vector<double>{100.}, sd::DataType::DOUBLE); void *dX1, *dX2, *dX3, *dX4, *dZ1, *dZ2; Nd4jLong *dX1ShapeInfo, *dX3ShapeInfo, *dZ1ShapeInfo, *dZ2ShapeInfo; hipError_t cudaResult; cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dX4), x4.lengthOf() * x4.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ1), scalar1.lengthOf() * scalar1.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ2), scalar2.lengthOf() * scalar2.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ1ShapeInfo), shape::shapeInfoByteLength(scalar1.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ2ShapeInfo), shape::shapeInfoByteLength(scalar2.shapeInfo())); ASSERT_EQ(0, cudaResult); hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); x1.syncToHost(); x2.syncToHost(); x3.syncToHost(); x4.syncToHost(); scalar1.syncToHost(); scalar2.syncToHost(); hipMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dX4, x4.buffer(), x4.lengthOf() * x4.sizeOfT(), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dX1ShapeInfo, x1.shapeInfo(), shape::shapeInfoByteLength(x1.shapeInfo()), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dX3ShapeInfo, x3.shapeInfo(), shape::shapeInfoByteLength(x3.shapeInfo()), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dZ1ShapeInfo, scalar1.shapeInfo(), shape::shapeInfoByteLength(scalar1.shapeInfo()), hipMemcpyHostToDevice, stream); hipMemcpyAsync(dZ2ShapeInfo, scalar2.shapeInfo(), shape::shapeInfoByteLength(scalar2.shapeInfo()), hipMemcpyHostToDevice, stream); /***************************************/ void* reductionPointer = nullptr; int* allocationPointer = nullptr; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream, reductionPointer, nullptr, allocationPointer); /***************************************/ NativeOpExecutioner::execReduce3Scalar(&lc, sd::reduce3::Dot,nullptr, x1.shapeInfo(),dX1, dX1ShapeInfo, nullptr, nullptr, x2.shapeInfo(),dX2, dX1ShapeInfo,nullptr, scalar1.shapeInfo(),dZ1, dZ1ShapeInfo); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); scalar1.tickWriteHost(); scalar2.tickWriteHost(); hipMemcpyAsync(scalar1.buffer(), dZ1, scalar1.lengthOf() * scalar1.sizeOfT(), hipMemcpyDeviceToHost, stream); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); ASSERT_NEAR(exp1.e<float>(0), scalar1.e<float>(0), 1e-5); /***************************************/ NativeOpExecutioner::execReduce3Scalar(&lc, sd::reduce3::Dot,nullptr, x3.shapeInfo(),dX3, dX3ShapeInfo, nullptr, nullptr, x4.shapeInfo(),dX4, dX3ShapeInfo,nullptr, scalar2.shapeInfo(),dZ2, dZ2ShapeInfo); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); hipMemcpyAsync(scalar2.buffer(), dZ2, scalar2.lengthOf() * scalar2.sizeOfT(), hipMemcpyDeviceToHost, stream); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); ASSERT_NEAR(exp2.e<float>(0), scalar2.e<float>(0), 1e-5); /***************************************/ hipFree(dX1); hipFree(dX2); hipFree(dX3); hipFree(dX4); hipFree(dZ1); hipFree(dZ2); hipFree(dX1ShapeInfo); hipFree(dX3ShapeInfo); hipFree(dZ1ShapeInfo); hipFree(dZ2ShapeInfo); /***************************************/ cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3_1) { NDArray x('c', {2,2}, {1,2,3,4}, sd::DataType::INT32); NDArray y('c', {2,2}, {-1,-2,-3,-4}, sd::DataType::INT32); NDArray exp('c', {}, std::vector<double>{-30.f}, sd::DataType::FLOAT32); NDArray z('c', {}, std::vector<double>{100.f}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0, 1}; x.syncToHost(); y.syncToHost(); z.syncToHost(); std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions std::vector<void*> devicePtrs(hostData.size(), nullptr); hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), nullptr, nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3_2) { NDArray x('c', {2,2}, {1.5,1.5,1.5,1.5}, sd::DataType::DOUBLE); NDArray y('c', {2,2}, {1,2,3,4}, sd::DataType::DOUBLE); NDArray exp('c', {}, std::vector<double>{15.}, sd::DataType::DOUBLE); NDArray z('c', {}, std::vector<double>{100.}, sd::DataType::DOUBLE); std::vector<int> dimensions = {0, 1}; // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), nullptr, nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3_3) { NDArray x('c', {2,3}, {1,2,3,4,5,6}, sd::DataType::INT32); NDArray y('c', {2,3}, {-6,-5,-4,-3,-2,-1}, sd::DataType::INT32); NDArray exp('c', {3}, {-18,-20,-18}, sd::DataType::FLOAT32); NDArray z('c', {3}, {100,100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // evaluate yTad data shape::TAD yTad; yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size()); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3_4) { NDArray x('c', {2,3}, {1,2,3,4,5,6}, sd::DataType::DOUBLE); NDArray y('c', {2,3}, {1.5,1.5,1.5,1.5,1.5,1.5}, sd::DataType::DOUBLE); NDArray exp('c', {2}, {9,22.5}, sd::DataType::DOUBLE); NDArray z('c', {2}, {100,100}, sd::DataType::DOUBLE); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // evaluate yTad data shape::TAD yTad; yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size()); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3_5) { NDArray x('c', {2,2,3}, {1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5}, sd::DataType::FLOAT32); NDArray y('c', {2,2,3}, {1,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::FLOAT32); NDArray exp('c', {2,3}, {7.5, 10.5, 13.5, 25.5, 28.5, 31.5}, sd::DataType::FLOAT32); NDArray z('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // evaluate yTad data shape::TAD yTad; yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size()); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3All_1) { NDArray x('c', {2,2}, {1,2,3,4}, sd::DataType::INT32); NDArray y('c', {2,3}, {-1,1,-1,1,-1,1}, sd::DataType::INT32); NDArray exp('c', {2,3}, {2,-2,2,2,-2,2}, sd::DataType::FLOAT32); NDArray z('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // evaluate yTad data shape::TAD yTad; yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size()); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4 -- yTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3All(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3All_2) { NDArray x('c', {2,2}, {1,2,3,4}, sd::DataType::DOUBLE); NDArray y('c', {2,3}, {1.5,1.5,1.5,1.5,1.5,1.5}, sd::DataType::DOUBLE); NDArray exp('c', {2,3}, {6,6,6,9,9,9}, sd::DataType::DOUBLE); NDArray z('c', {2,3}, {100,100,100,100,100,100,},sd::DataType::DOUBLE); std::vector<int> dimensions = {0}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // evaluate yTad data shape::TAD yTad; yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size()); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3All(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execIndexReduce_1) { NDArray x('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::DOUBLE); x.linspace(-2.); x.syncToDevice(); NDArray exp('c', {2}, {2, 2}, sd::DataType::INT64); NDArray z('c', {2}, {100,100}, sd::DataType::INT64); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]); cudaResult = hipStreamSynchronize(stream); if (cudaResult != 0) throw sd::cuda_exception::build("execIndexReduce failed", cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execIndexReduce_2) { NDArray x('c', {2,3,4,5}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::FLOAT32); x.linspace(-2.f); x.syncToDevice(); NDArray exp('c', {2,5}, {11,11,11,11,11,11,11,11,11,11}, sd::DataType::INT64); NDArray z('c', {2,5}, {100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT64); std::vector<int> dimensions = {1,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execIndexReduce_3) { NDArray x('c', {2,3,4,5}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::DOUBLE); x.linspace(-2.); x.syncToDevice(); NDArray exp('c', {3}, {39, 39, 39}, sd::DataType::INT64); NDArray z('c', {3}, {100,100,100}, sd::DataType::INT64); std::vector<int> dimensions = {0,2,3}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execScalar_1) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,3}, {0,1,2,3,4,5}, sd::DataType::INT64); NDArray exp('c',{2,3}, {0,0,1,1,2,2}, sd::DataType::INT64); NDArray scalar('c',{}, std::vector<double>{2.f}, sd::DataType::FLOAT32); NDArray z('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::INT64); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execScalar(&lc, sd::scalar::Divide, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, scalar.shapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(), nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execScalar_2) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,3}, {-1,-2,-3,-4,-5,-6}, sd::DataType::INT64); NDArray exp('c',{2,3}, {10,10,10,10,10,10}, sd::DataType::FLOAT32); NDArray scalar('c',{}, std::vector<double>{10.f}, sd::DataType::FLOAT32); NDArray z('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::FLOAT32); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execScalar(&lc, sd::scalar::CopyPws, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, scalar.shapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(), nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execScalar_3) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,3,2}, {0,1,2,3,4,5,6,7,8,9,10,11}, sd::DataType::INT64); NDArray scalars('c',{2,2}, {1,2,3,4}, sd::DataType::FLOAT32); NDArray exp('c', {2,3,2}, {0,0,2,1,4,2, 2,1,2,2,3,2}, sd::DataType::INT64); NDArray z('c', {2,3,2}, {100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT64); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execScalar(&lc, sd::scalar::Divide, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, scalars.shapeInfo(), scalars.specialBuffer(), scalars.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execScalarBool_1) { NDArray x('c', {2,3}, {-1,-2,0,1,2,3}, sd::DataType::BFLOAT16); NDArray scalar('c',{}, std::vector<double>{0}, sd::DataType::BFLOAT16); NDArray exp('c',{2,3}, {0,0,0,1,1,1}, sd::DataType::BOOL); NDArray z('c', {2,3}, {100,100,100,100,100,100,}, sd::DataType::BOOL); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result // call cuda kernel which calculates result NativeOpExecutioner::execScalarBool(&lc, sd::scalar::GreaterThan, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, scalar.shapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(), nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execScalarBool_2) { NDArray x('c', {2,3}, {0,1,2,3,4,5}, sd::DataType::FLOAT32); NDArray scalars('c',{2}, {-1,4}, sd::DataType::FLOAT32); NDArray exp('c', {2,3}, {1,1,1,0,0,1}, sd::DataType::BOOL); NDArray z('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::BOOL); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execScalarBool(&lc, sd::scalar::GreaterThan, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, scalars.shapeInfo(), scalars.specialBuffer(), scalars.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execBroadcast_1) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT32); NDArray y('c', {3}, {10, 20, 30}, sd::DataType::INT64); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT32); NDArray exp('c', {2,3,4}, {10, 11, 12, 13,24, 25, 26, 27,38, 39, 40, 41,22, 23, 24, 25,36, 37, 38, 39,50, 51, 52, 53}, sd::DataType::INT32); x.linspace(0); x.syncToDevice(); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(&lc, sd::broadcast::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execBroadcast_2) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT32); NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, sd::DataType::FLOAT32); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, sd::DataType::FLOAT32); x.linspace(0); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(&lc, sd::broadcast::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execBroadcastBool_1) { NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT32); NDArray y('c', {3}, {2, 12, 22}, sd::DataType::INT32); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,}, sd::DataType::BOOL); NDArray exp('c', {2,3,4}, {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0}, sd::DataType::BOOL); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcastBool(&lc, sd::broadcast::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execBroadcastBool_2) { NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100},sd::DataType::FLOAT32); NDArray y('c', {2,4}, {1,10,10,15,20,20,20,24}, sd::DataType::FLOAT32); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::BOOL); NDArray exp('c', {2,3,4}, {1, 0, 0, 0,0, 0, 0, 0,0, 1, 0, 0,0, 0, 0, 0,0, 0, 0, 0,0, 0, 0, 1}, sd::DataType::BOOL); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcastBool(&lc, sd::broadcast::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execPairwiseTransform_1) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,2,2}, {1,5,3,7,2,6,4,8}, sd::DataType::INT32); NDArray y('c', {4,2}, {0.1,0.2,0.3,0.4,1.5,0.6,0.7,1.8}, sd::DataType::DOUBLE); NDArray z('c', {8}, {100,100,100,100,100,100,100,100}, sd::DataType::INT32); NDArray exp('c', {8}, {0,1,2,3,3,5,6,6}, sd::DataType::INT32); x.permutei({2,1,0}); // -> {1,2,3,4,5,6,7,8} x.syncShape(); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execPairwiseTransform(&lc, sd::pairwise::Subtract, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execPairwiseBoolTransform_1) { NDArray x('c', {2,2,2}, {1,5,3,7,2,6,4,8}, sd::DataType::INT64); NDArray y('c', {4,2}, {0,2,0,4,0,6,0,8}, sd::DataType::INT64); NDArray z('c', {8}, {100,100,100,100,100,100,100,100}, sd::DataType::BOOL); NDArray exp('c', {8}, {0,1,0,1,0,1,0,1}, sd::DataType::BOOL); x.permutei({2,1,0}); // -> {1,2,3,4,5,6,7,8} x.syncShape(); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execPairwiseBoolTransform(&lc, sd::pairwise::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformFloat_1) { NDArray x('c', {2,2}, {0, 6.25, 2.25, 12.25}, sd::DataType::DOUBLE); NDArray z('c', {4}, {100,100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {4}, {0, 1.5, 2.5, 3.5}, sd::DataType::FLOAT32); x.permutei({1,0}); x.syncShape(); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformFloat(&lc, sd::transform::Sqrt, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformFloat_2) { NDArray x('c', {1,4}, {0, 4, 9, 16}, sd::DataType::INT64); NDArray z('c', {2,2}, {100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {2,2}, {0, 2, 3, 4}, sd::DataType::DOUBLE); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformFloat(&lc, sd::transform::Sqrt, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformAny_1) { NDArray x('c', {2,2}, {0, 6.25, 2.25, 12.25}, sd::DataType::DOUBLE); NDArray z('c', {4,1}, {100,100,100,100}, sd::DataType::INT32); NDArray exp('c', {4,1}, {0, 2, 6, 12}, sd::DataType::INT32); x.permutei({1,0}); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformAny(&lc, sd::transform::Assign, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformAny_2) { NDArray x('c', {1,4}, {0, 6.25, 2.25, 12.25}, sd::DataType::BFLOAT16); NDArray z('c', {2,2}, {100,100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {2,2}, {0, 6.25, 2.25, 12.25}, sd::DataType::FLOAT32); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformAny(&lc, sd::transform::Assign, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformStrict_1) { NDArray x('c', {2,3}, {0,2,4,1,3,5}, sd::DataType::DOUBLE); NDArray z('c', {3,2}, {100,100,100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {3,2}, {0, 3, 12, 27, 48, 75}, sd::DataType::DOUBLE); x.permutei({1,0}); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformStrict(&lc, sd::transform::CubeDerivative, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformStrict_2) { NDArray x('c', {6}, {0,1,2,3,4,5}, sd::DataType::FLOAT32); NDArray z('c', {3,2}, {100,100,100,100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {3,2}, {0, 3, 12, 27, 48, 75}, sd::DataType::FLOAT32); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformStrict(&lc, sd::transform::CubeDerivative, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformSame_1) { NDArray x('c', {2,3}, {0,2.5,4.5,1.5,3.5,5.5}, sd::DataType::DOUBLE); NDArray z('c', {1,6}, {100,100,100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {1,6}, {0,2.25,6.25,12.25,20.25,30.25}, sd::DataType::DOUBLE); x.permutei({1,0}); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformSame(&lc, sd::transform::Square, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformSame_2) { NDArray x('c', {6}, {0,1,2,3,4,5}, sd::DataType::INT32); NDArray z('c', {3,2}, {100,100,100,100,100,100}, sd::DataType::INT32); NDArray exp('c', {3,2}, {0,1,4,9,16,25}, sd::DataType::INT32); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformSame(&lc, sd::transform::Square, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformBool_1) { NDArray x('c', {2,3}, {0,2,4,-1,-3,-5}, sd::DataType::DOUBLE); NDArray z('c', {1,6}, {100,100,100,100,100,100}, sd::DataType::BOOL); NDArray exp('c', {1,6}, {0,0,1,0,1,0}, sd::DataType::BOOL); x.permutei({1,0}); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformBool(&lc, sd::transform::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformBool_2) { NDArray x('c', {6}, {0,-1,2,-3,4,-5}, sd::DataType::INT32); NDArray z('c', {3,2}, {100,100,100,100,100,100}, sd::DataType::BOOL); NDArray exp('c', {3,2}, {0,0,1,0,1,0}, sd::DataType::BOOL); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformBool(&lc, sd::transform::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceFloat_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {3}, {100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {3}, {2.5, 6.5, 10.5}, sd::DataType::FLOAT32); x.permutei({2,1,0}); std::vector<int> dimensions = {0,2}; // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceFloat(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceFloat_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {2,4}, {-1., 0., 1., 2.,11., 12., 13., 14.}, sd::DataType::DOUBLE); std::vector<int> dimensions = {1}; // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceFloat(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceSame_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {3}, {100,100,100}, sd::DataType::INT32); NDArray exp('c', {3}, {20, 52, 84}, sd::DataType::INT32); x.permutei({2,1,0}); std::vector<int> dimensions = {0,2}; // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceSame(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceSame_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::FLOAT32); NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {2,4}, {-3., 0., 3., 6.,33., 36., 39., 42.}, sd::DataType::FLOAT32); std::vector<int> dimensions = {1}; // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceSame(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceBool_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, sd::DataType::INT32); NDArray z('c', {3}, {100,100,100}, sd::DataType::BOOL); NDArray exp('c', {3}, {0, 1, 1}, sd::DataType::BOOL); x.permutei({2,1,0}); std::vector<int> dimensions = {0,2}; // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceBool(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceBool_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, sd::DataType::FLOAT32); NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, sd::DataType::BOOL); NDArray exp('c', {2,4}, {1, 1, 1, 1, 0, 0, 0, 0}, sd::DataType::BOOL); std::vector<int> dimensions = {1}; // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceBool(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceLong_1) { NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, sd::DataType::INT32); NDArray z('c', {3}, {100,100,100}, sd::DataType::INT64); NDArray exp('c', {3}, {5,6,6}, sd::DataType::INT64); x.permutei({2,1,0}); std::vector<int> dimensions = {0,2}; // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceLong(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceLong_2) { NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, sd::DataType::FLOAT32); NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, sd::DataType::INT64); NDArray exp('c', {2,4}, {3, 1, 3, 2, 2, 1, 2, 3}, sd::DataType::INT64); std::vector<int> dimensions = {1}; // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceLong(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceFloatScalar_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32); NDArray exp('c', {}, std::vector<double>{6.5}, sd::DataType::FLOAT32); x.permutei({2,1,0}); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceFloatScalar(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceFloatScalar_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::DOUBLE); NDArray exp('c', {}, std::vector<double>{6.5}, sd::DataType::DOUBLE); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceFloatScalar(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceSameScalar_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT32); NDArray exp('c', {}, std::vector<double>{156}, sd::DataType::INT32); x.permutei({2,1,0}); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceSameScalar(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceSameScalar_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::DOUBLE); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::DOUBLE); NDArray exp('c', {}, std::vector<double>{156}, sd::DataType::DOUBLE); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceSameScalar(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceBoolScalar_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, sd::DataType::INT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::BOOL); NDArray exp('c', {}, std::vector<double>{1}, sd::DataType::BOOL); x.permutei({2,1,0}); x.syncShape(); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceBoolScalar(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceBoolScalar_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, sd::DataType::DOUBLE); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::BOOL); NDArray exp('c', {}, std::vector<double>{1}, sd::DataType::BOOL); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceBoolScalar(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceLongScalar_1) { NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, sd::DataType::INT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT64); NDArray exp('c', {}, std::vector<double>{17}, sd::DataType::INT64); x.permutei({2,1,0}); x.syncShape(); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceLongScalar(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceLongScalar_2) { NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, sd::DataType::DOUBLE); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT64); NDArray exp('c', {}, std::vector<double>{17}, sd::DataType::INT64); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceLongScalar(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3TAD_1) { NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::FLOAT32); NDArray y('c', {2,2}, {1,2,3,4}, sd::DataType::FLOAT32); NDArray exp('c', {3}, {10,20,30}, sd::DataType::DOUBLE); NDArray z('c', {3}, {100,100,100}, sd::DataType::DOUBLE); std::vector<int> dimensions = {0,1}; auto packX = ConstantTadHelper::getInstance().tadForDimensions(x.shapeInfo(), dimensions); LaunchContext* context = x.getContext(); x.syncToDevice(); y.syncToDevice(); PointersManager pm(context, "execReduce3TAD_1"); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3TAD(context, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, dimensions.size(), packX.specialShapeInfo(), packX.specialOffsets(), nullptr, nullptr); pm.synchronize(); // cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // z.printIndexedBuffer("OutputReduce3TAD"); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3TAD_2) { NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::INT64); NDArray y('c', {2,3}, {1,2,3,4,5,6}, sd::DataType::INT64); NDArray exp('c', {2}, {10,73}, sd::DataType::FLOAT32); NDArray z('c', {2}, {100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3TAD(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3TAD_3) { NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::INT64); NDArray y('c', {3}, {1,2,3}, sd::DataType::INT64); NDArray exp('c', {2,2}, {-22,-4,14,32}, sd::DataType::FLOAT32); NDArray z('c', {2,2}, {100,100,100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3TAD(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3TAD_4) { NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::DOUBLE); NDArray y('c', {2,2,3}, {10,20,30,40,50,60,70,80,90,100,110,120}, sd::DataType::DOUBLE); NDArray exp('c', {}, std::vector<double>{1820}, sd::DataType::FLOAT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0,1,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3TAD(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execSummaryStats_1) { // FIXME: Yurii, this test should be fixed if (1 > 0) return; NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::INT64); NDArray exp('c', {}, std::vector<double>{3.605551}, sd::DataType::FLOAT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); // call cuda kernel which calculates result NativeOpExecutioner::execSummaryStats(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), true); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execSummaryStats_2) { NDArray x('c', {2,2,3}, {-5,-4,-3,-20,-1,0,1,2,3,4,5,6}, sd::DataType::DOUBLE); NDArray exp('c', {2}, {3.405877, 9.715966}, sd::DataType::FLOAT32); NDArray z('c', {2}, {100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execSummaryStats(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], true); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } /* //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execSummaryStats_3) { NDArray x('c', {2,2,3}, {-5,-4,-3,-20,-1,0,1,2,3,4,5,6}, sd::DataType::DOUBLE); NDArray exp('c', {2}, {10.606602, 2.121320}, sd::DataType::FLOAT32); NDArray z('c', {2}, {100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execSummaryStats(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.special(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], true); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } */ //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execSummaryStatsScalar_1) { NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::INT64); NDArray exp('c', {}, std::vector<double>{3.605551}, sd::DataType::FLOAT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); // call cuda kernel which calculates result NativeOpExecutioner::execSummaryStatsScalar(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), true); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execRandom_1) { // NDArray z('c', {10}, {100,0,0,0,0,0,0,0,0,0}, sd::DataType::DOUBLE); NDArray z('c', {10}, {100,0,0,0,0,0,0,0,0,100}, sd::DataType::FLOAT32); NDArray exp('c', {10}, {0.050942, -0.183229, -0.093921, 0.075469, 0.257166, -0.254838, 0.342227, -0.682188, -0.004345, 0.464633}, sd::DataType::FLOAT32); sd::graph::RandomGenerator gen(119,5); hipError_t cudaResult; NDArray* array = &z; ExtraArguments arguments({0.f, 0.5f}); auto context = z.getContext(); PointersManager pm(context, "tests::execRandom_1"); // z.printIndexedBuffer("Input data"); // z.syncToDevice(); NativeOpExecutioner::execRandom(context, random::GaussianDistribution, &gen, array->buffer(), array->shapeInfo(), array->specialBuffer(), array->specialShapeInfo(), array->buffer(), array->shapeInfo(), array->specialBuffer(), array->specialShapeInfo(), array->buffer(), array->shapeInfo(), array->specialBuffer(), array->specialShapeInfo(), arguments.argumentsAsT(array->dataType())); pm.synchronize(); z.tickWriteDevice(); // z.printIndexedBuffer("Output Gaussian"); // RandomLauncher::fillGaussian(context, gen, &z, 0.f, 0.5f); // pm.synchronize(); // z.tickWriteDevice(); // z.printIndexedBuffer("Output Gaussian"); // hipStream_t stream; // cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); // LaunchContext lc(&stream); // // // ::execRandom(extraPointers, random::GaussianDistribution, &gen, z.buffer(), z.shapeInfo(), z.specialBuffer(), z.special(), &extra); // // call cuda kernel which calculates result // NativeOpExecutioner::execRandom(&lc, sd::random::GaussianDistribution, // &gen, // nullptr, z.shapeInfo(), z.specialBuffer(), z.special(), // nullptr, z.shapeInfo(), z.specialBuffer(), z.special(), // nullptr, z.shapeInfo(), z.specialBuffer(), z.special(), // extraArguments.argumentsAsT(z.dataType())); // // cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); // ASSERT_EQ(cudaResult, 0); // z.tickWriteDevice(); // z.syncToHost(); // z.printIndexedBuffer("Random1"); ASSERT_EQ(exp, z); // // verify results // for (int e = 0; e < z.lengthOf(); e++) // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // hipFree(dExtraArgs); // free allocated global device memory // hipFree(dGen); // delete cuda stream // cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execRandom_2) { NDArray x('c', {10}, {0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1}, sd::DataType::DOUBLE); NDArray z('c', {2,5}, {100,100,100,100,100,100,100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {10}, {0., 0., 0.3, 0., 0.5, 0., 0.7, 0., 0., 1.}, sd::DataType::DOUBLE); ExtraArguments extraArguments({0.7}); sd::graph::RandomGenerator gen(119,5); // // prepare input arrays for prepareDataForCuda function // std::vector<std::pair<void*,size_t>> hostData; // hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions // std::vector<void*> devicePtrs(hostData.size(), nullptr); // // create cuda stream and LaunchContext hipError_t cudaResult; // hipStream_t stream; // cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext* lc = x.getContext(); //(&stream); // allocate required amount of global device memory and copy host data to it // cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execRandom(lc, sd::random::DropOut, &gen, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), extraArguments.argumentsAsT(z.dataType())); cudaResult = hipStreamSynchronize(*lc->getCudaStream()); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); z.syncToHost(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory // for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream // cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execRandom_3) { NDArray z('c', {10}, {100,100,100,100,100,100,100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {10}, {2.373649, 2.239791, 1.887353, 2.488636, 2.068904, 2.281399, 1.828228, 2.228222, 2.490847, 1.669537}, sd::DataType::DOUBLE); std::vector<double> extraArguments = {1.5, 2.5}; sd::graph::RandomGenerator gen(119,5); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execRandom(&lc, sd::random::UniformDistribution, &gen, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), devicePtrs[0]); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execRandom_4) { NDArray z('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, sd::DataType::FLOAT32); NDArray exp('c', {10}, {2.373649, 2.281399, 2.239791, 1.828228, 1.887353, 2.228222, 2.488636, 2.490847, 2.068904, 1.669537}, sd::DataType::FLOAT32); z.permutei({1,0}); ExtraArguments extraArguments({1.5, 2.5}); sd::graph::RandomGenerator gen(119,5); // // prepare input arrays for prepareDataForCuda function // std::vector<std::pair<void*,size_t>> hostData; // hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions // std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext // hipError_t cudaResult; // hipStream_t stream; // cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); // LaunchContext lc(&stream); // // // allocate required amount of global device memory and copy host data to it // cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); auto context = z.getContext(); PointersManager pm(context, "execRandom4"); // call cuda kernel which calculates result NativeOpExecutioner::execRandom(context, sd::random::UniformDistribution, &gen, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), extraArguments.argumentsAsT(z.dataType())); // cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // z.printIndexedBuffer("Output Uniform4"); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory // for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream // cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); }
1c909adcd2a10e59c27b9cfe2d0b5d2048056dde.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "testlayers.h" #include <array/NDArray.h> #include <array/NDArrayFactory.h> #include <graph/Context.h> #include <graph/Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <ops/specials_cuda.h> #include <helpers/TAD.h> #include <helpers/MmulHelper.h> #include <helpers/PointersManager.h> #include <cuda.h> #include <helpers/RandomLauncher.h> #include <helpers/ConstantShapeHelper.h> #include <helpers/ConstantTadHelper.h> #include <array/ShapeDescriptor.h> #include <array/ConstantDataBuffer.h> #include <helpers/ShapeUtils.h> #include <exceptions/cuda_exception.h> using namespace sd; using namespace sd::graph; class CudaBasicsTests1 : public testing::Test { public: }; ////////////////////////////////////////////////////////////////////////// static cudaError_t allocateDeviceMem(LaunchContext& lc, std::vector<void*>& devicePtrs, const std::vector<std::pair<void*,size_t>>& hostData) { if(devicePtrs.size() != hostData.size()) throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !"); cudaError_t cudaResult; void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); if(cudaResult != 0) return cudaResult; int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); if(cudaResult != 0) return cudaResult; lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); cudaStream_t stream = *lc.getCudaStream(); for(int i = 0; i < devicePtrs.size(); ++i) { cudaResult = cudaMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); if(cudaResult != 0) return cudaResult; cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, stream); } return cudaResult; } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, TestPairwise_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }, {0,0,0,0,0}); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); ASSERT_EQ(0, res); res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); ASSERT_EQ(0, res); res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); ASSERT_EQ(0, res); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream)); auto stream = reinterpret_cast<cudaStream_t *>(&nativeStream); x.dataBuffer()->allocatePrimary(); x.syncToHost(); cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream); cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice, *stream); res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); LaunchContext lc(stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr, z.shapeInfo(), devBufferPtrZ, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr); res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); z.dataBuffer()->allocatePrimary(); cudaMemcpyAsync(z.buffer(), devBufferPtrZ, z.lengthOf() * x.sizeOfT(), cudaMemcpyDeviceToHost, *stream); res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); cudaFree(devBufferPtrX); cudaFree(devBufferPtrZ); cudaFree(devShapePtrX); // needed due to memcpy z.tickWriteHost(); for (int e = 0; e < z.lengthOf(); e++) { //nd4j_printf("step %i\n", e); ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execIndexReduceScalar_1) { NDArray x1('c', {2,2}, {0, 1, 2, 3}, sd::DataType::INT32); NDArray x2('c', {2,2}, {0.5, 1.5, -4.5, 3.5}, sd::DataType::BFLOAT16); NDArray x3('c', {2,2}, {0, -1, 0, 1}, sd::DataType::BOOL); NDArray scalar('c', {}, std::vector<double>{0}, sd::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{3}, sd::DataType::INT64); NDArray exp2('c', {}, std::vector<double>{2}, sd::DataType::INT64); NDArray exp3('c', {}, std::vector<double>{1}, sd::DataType::INT64); void *dX1, *dX2, *dX3, *dZ; Nd4jLong *dX1ShapeInfo, *dX2ShapeInfo, *dX3ShapeInfo, *dZShapeInfo; cudaError_t cudaResult; cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ), scalar.lengthOf() * scalar.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX2ShapeInfo), shape::shapeInfoByteLength(x2.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZShapeInfo), shape::shapeInfoByteLength(scalar.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); x1.syncToHost(); x2.syncToHost(); x3.syncToHost(); scalar.syncToHost(); cudaMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dX1ShapeInfo, x1.shapeInfo(), shape::shapeInfoByteLength(x1.shapeInfo()), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dX2ShapeInfo, x2.shapeInfo(), shape::shapeInfoByteLength(x2.shapeInfo()), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dX3ShapeInfo, x3.shapeInfo(), shape::shapeInfoByteLength(x3.shapeInfo()), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dZShapeInfo, scalar.shapeInfo(), shape::shapeInfoByteLength(scalar.shapeInfo()), cudaMemcpyHostToDevice, stream); void* reductionPointer = nullptr; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); cudaResult = cudaMemset(reductionPointer, 0, 1024 * 1024); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream, LaunchContext::defaultContext()->getReductionPointer(), LaunchContext::defaultContext()->getScalarPointer(), LaunchContext::defaultContext()->getAllocationPointer()); /***************************************/ NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, x1.buffer(), x1.shapeInfo(), dX1, dX1ShapeInfo, nullptr, scalar.buffer(), scalar.shapeInfo(), dZ, dZShapeInfo); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); cudaMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), cudaMemcpyDeviceToHost, stream); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); scalar.tickWriteHost(); ASSERT_NEAR(exp1.e<float>(0), scalar.e<float>(0), 1e-5); /***************************************/ NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, nullptr, x2.shapeInfo(), dX2, dX2ShapeInfo, nullptr, nullptr, scalar.shapeInfo(), dZ, dZShapeInfo); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); cudaMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), cudaMemcpyDeviceToHost, stream); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); ASSERT_NEAR(exp2.e<float>(0), scalar.e<float>(0), 1e-5); // ************************************* NativeOpExecutioner::execIndexReduceScalar(&lc, sd::indexreduce::IndexAbsoluteMax, nullptr, x3.shapeInfo(), dX3, dX3ShapeInfo, nullptr, nullptr, scalar.shapeInfo(), dZ, dZShapeInfo); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); cudaMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), cudaMemcpyDeviceToHost, stream); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); ASSERT_NEAR(exp3.e<float>(0), scalar.e<float>(0), 1e-5); /***************************************/ cudaFree(dX1); cudaFree(dX2); cudaFree(dX3); cudaFree(dZ); cudaFree(dX1ShapeInfo); cudaFree(dX2ShapeInfo); cudaFree(dX3ShapeInfo); cudaFree(dZShapeInfo); /***************************************/ cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3Scalar_1) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x1('c', {2,2}, {1,2,3,4}, sd::DataType::INT32); NDArray x2('c', {2,2}, {-1,-2,-3,-4}, sd::DataType::INT32); NDArray x3('c', {2,2}, {1.5,1.5,1.5,1.5}, sd::DataType::DOUBLE); NDArray x4('c', {2,2}, {1,2,3,4}, sd::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{-30.f}, sd::DataType::FLOAT32); NDArray exp2('c', {}, std::vector<double>{15.}, sd::DataType::DOUBLE); NDArray scalar1('c', {}, std::vector<double>{100.f}, sd::DataType::FLOAT32); NDArray scalar2('c', {}, std::vector<double>{100.}, sd::DataType::DOUBLE); void *dX1, *dX2, *dX3, *dX4, *dZ1, *dZ2; Nd4jLong *dX1ShapeInfo, *dX3ShapeInfo, *dZ1ShapeInfo, *dZ2ShapeInfo; cudaError_t cudaResult; cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX4), x4.lengthOf() * x4.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ1), scalar1.lengthOf() * scalar1.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ2), scalar2.lengthOf() * scalar2.sizeOfT()); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ1ShapeInfo), shape::shapeInfoByteLength(scalar1.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ2ShapeInfo), shape::shapeInfoByteLength(scalar2.shapeInfo())); ASSERT_EQ(0, cudaResult); cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); x1.syncToHost(); x2.syncToHost(); x3.syncToHost(); x4.syncToHost(); scalar1.syncToHost(); scalar2.syncToHost(); cudaMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dX4, x4.buffer(), x4.lengthOf() * x4.sizeOfT(), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dX1ShapeInfo, x1.shapeInfo(), shape::shapeInfoByteLength(x1.shapeInfo()), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dX3ShapeInfo, x3.shapeInfo(), shape::shapeInfoByteLength(x3.shapeInfo()), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dZ1ShapeInfo, scalar1.shapeInfo(), shape::shapeInfoByteLength(scalar1.shapeInfo()), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(dZ2ShapeInfo, scalar2.shapeInfo(), shape::shapeInfoByteLength(scalar2.shapeInfo()), cudaMemcpyHostToDevice, stream); /***************************************/ void* reductionPointer = nullptr; int* allocationPointer = nullptr; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream, reductionPointer, nullptr, allocationPointer); /***************************************/ NativeOpExecutioner::execReduce3Scalar(&lc, sd::reduce3::Dot,nullptr, x1.shapeInfo(),dX1, dX1ShapeInfo, nullptr, nullptr, x2.shapeInfo(),dX2, dX1ShapeInfo,nullptr, scalar1.shapeInfo(),dZ1, dZ1ShapeInfo); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); scalar1.tickWriteHost(); scalar2.tickWriteHost(); cudaMemcpyAsync(scalar1.buffer(), dZ1, scalar1.lengthOf() * scalar1.sizeOfT(), cudaMemcpyDeviceToHost, stream); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); ASSERT_NEAR(exp1.e<float>(0), scalar1.e<float>(0), 1e-5); /***************************************/ NativeOpExecutioner::execReduce3Scalar(&lc, sd::reduce3::Dot,nullptr, x3.shapeInfo(),dX3, dX3ShapeInfo, nullptr, nullptr, x4.shapeInfo(),dX4, dX3ShapeInfo,nullptr, scalar2.shapeInfo(),dZ2, dZ2ShapeInfo); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); cudaMemcpyAsync(scalar2.buffer(), dZ2, scalar2.lengthOf() * scalar2.sizeOfT(), cudaMemcpyDeviceToHost, stream); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); ASSERT_NEAR(exp2.e<float>(0), scalar2.e<float>(0), 1e-5); /***************************************/ cudaFree(dX1); cudaFree(dX2); cudaFree(dX3); cudaFree(dX4); cudaFree(dZ1); cudaFree(dZ2); cudaFree(dX1ShapeInfo); cudaFree(dX3ShapeInfo); cudaFree(dZ1ShapeInfo); cudaFree(dZ2ShapeInfo); /***************************************/ cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3_1) { NDArray x('c', {2,2}, {1,2,3,4}, sd::DataType::INT32); NDArray y('c', {2,2}, {-1,-2,-3,-4}, sd::DataType::INT32); NDArray exp('c', {}, std::vector<double>{-30.f}, sd::DataType::FLOAT32); NDArray z('c', {}, std::vector<double>{100.f}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0, 1}; x.syncToHost(); y.syncToHost(); z.syncToHost(); std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions std::vector<void*> devicePtrs(hostData.size(), nullptr); cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), nullptr, nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3_2) { NDArray x('c', {2,2}, {1.5,1.5,1.5,1.5}, sd::DataType::DOUBLE); NDArray y('c', {2,2}, {1,2,3,4}, sd::DataType::DOUBLE); NDArray exp('c', {}, std::vector<double>{15.}, sd::DataType::DOUBLE); NDArray z('c', {}, std::vector<double>{100.}, sd::DataType::DOUBLE); std::vector<int> dimensions = {0, 1}; // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), nullptr, nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3_3) { NDArray x('c', {2,3}, {1,2,3,4,5,6}, sd::DataType::INT32); NDArray y('c', {2,3}, {-6,-5,-4,-3,-2,-1}, sd::DataType::INT32); NDArray exp('c', {3}, {-18,-20,-18}, sd::DataType::FLOAT32); NDArray z('c', {3}, {100,100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // evaluate yTad data shape::TAD yTad; yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size()); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3_4) { NDArray x('c', {2,3}, {1,2,3,4,5,6}, sd::DataType::DOUBLE); NDArray y('c', {2,3}, {1.5,1.5,1.5,1.5,1.5,1.5}, sd::DataType::DOUBLE); NDArray exp('c', {2}, {9,22.5}, sd::DataType::DOUBLE); NDArray z('c', {2}, {100,100}, sd::DataType::DOUBLE); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // evaluate yTad data shape::TAD yTad; yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size()); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3_5) { NDArray x('c', {2,2,3}, {1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5}, sd::DataType::FLOAT32); NDArray y('c', {2,2,3}, {1,2,3,4,5,6,7,8,9,10,11,12}, sd::DataType::FLOAT32); NDArray exp('c', {2,3}, {7.5, 10.5, 13.5, 25.5, 28.5, 31.5}, sd::DataType::FLOAT32); NDArray z('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // evaluate yTad data shape::TAD yTad; yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size()); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3All_1) { NDArray x('c', {2,2}, {1,2,3,4}, sd::DataType::INT32); NDArray y('c', {2,3}, {-1,1,-1,1,-1,1}, sd::DataType::INT32); NDArray exp('c', {2,3}, {2,-2,2,2,-2,2}, sd::DataType::FLOAT32); NDArray z('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // evaluate yTad data shape::TAD yTad; yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size()); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4 -- yTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3All(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3All_2) { NDArray x('c', {2,2}, {1,2,3,4}, sd::DataType::DOUBLE); NDArray y('c', {2,3}, {1.5,1.5,1.5,1.5,1.5,1.5}, sd::DataType::DOUBLE); NDArray exp('c', {2,3}, {6,6,6,9,9,9}, sd::DataType::DOUBLE); NDArray z('c', {2,3}, {100,100,100,100,100,100,},sd::DataType::DOUBLE); std::vector<int> dimensions = {0}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // evaluate yTad data shape::TAD yTad; yTad.init(y.shapeInfo(), dimensions.data(), dimensions.size()); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3All(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execIndexReduce_1) { NDArray x('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::DOUBLE); x.linspace(-2.); x.syncToDevice(); NDArray exp('c', {2}, {2, 2}, sd::DataType::INT64); NDArray z('c', {2}, {100,100}, sd::DataType::INT64); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]); cudaResult = cudaStreamSynchronize(stream); if (cudaResult != 0) throw sd::cuda_exception::build("execIndexReduce failed", cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execIndexReduce_2) { NDArray x('c', {2,3,4,5}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::FLOAT32); x.linspace(-2.f); x.syncToDevice(); NDArray exp('c', {2,5}, {11,11,11,11,11,11,11,11,11,11}, sd::DataType::INT64); NDArray z('c', {2,5}, {100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT64); std::vector<int> dimensions = {1,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execIndexReduce_3) { NDArray x('c', {2,3,4,5}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100, 100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::DOUBLE); x.linspace(-2.); x.syncToDevice(); NDArray exp('c', {3}, {39, 39, 39}, sd::DataType::INT64); NDArray z('c', {3}, {100,100,100}, sd::DataType::INT64); std::vector<int> dimensions = {0,2,3}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execIndexReduce(&lc, sd::indexreduce::IndexMax, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execScalar_1) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,3}, {0,1,2,3,4,5}, sd::DataType::INT64); NDArray exp('c',{2,3}, {0,0,1,1,2,2}, sd::DataType::INT64); NDArray scalar('c',{}, std::vector<double>{2.f}, sd::DataType::FLOAT32); NDArray z('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::INT64); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execScalar(&lc, sd::scalar::Divide, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, scalar.shapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(), nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execScalar_2) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,3}, {-1,-2,-3,-4,-5,-6}, sd::DataType::INT64); NDArray exp('c',{2,3}, {10,10,10,10,10,10}, sd::DataType::FLOAT32); NDArray scalar('c',{}, std::vector<double>{10.f}, sd::DataType::FLOAT32); NDArray z('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::FLOAT32); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execScalar(&lc, sd::scalar::CopyPws, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, scalar.shapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(), nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execScalar_3) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,3,2}, {0,1,2,3,4,5,6,7,8,9,10,11}, sd::DataType::INT64); NDArray scalars('c',{2,2}, {1,2,3,4}, sd::DataType::FLOAT32); NDArray exp('c', {2,3,2}, {0,0,2,1,4,2, 2,1,2,2,3,2}, sd::DataType::INT64); NDArray z('c', {2,3,2}, {100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT64); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execScalar(&lc, sd::scalar::Divide, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, scalars.shapeInfo(), scalars.specialBuffer(), scalars.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execScalarBool_1) { NDArray x('c', {2,3}, {-1,-2,0,1,2,3}, sd::DataType::BFLOAT16); NDArray scalar('c',{}, std::vector<double>{0}, sd::DataType::BFLOAT16); NDArray exp('c',{2,3}, {0,0,0,1,1,1}, sd::DataType::BOOL); NDArray z('c', {2,3}, {100,100,100,100,100,100,}, sd::DataType::BOOL); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result // call cuda kernel which calculates result NativeOpExecutioner::execScalarBool(&lc, sd::scalar::GreaterThan, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, scalar.shapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(), nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execScalarBool_2) { NDArray x('c', {2,3}, {0,1,2,3,4,5}, sd::DataType::FLOAT32); NDArray scalars('c',{2}, {-1,4}, sd::DataType::FLOAT32); NDArray exp('c', {2,3}, {1,1,1,0,0,1}, sd::DataType::BOOL); NDArray z('c', {2,3}, {100,100,100,100,100,100}, sd::DataType::BOOL); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execScalarBool(&lc, sd::scalar::GreaterThan, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, scalars.shapeInfo(), scalars.specialBuffer(), scalars.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execBroadcast_1) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT32); NDArray y('c', {3}, {10, 20, 30}, sd::DataType::INT64); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT32); NDArray exp('c', {2,3,4}, {10, 11, 12, 13,24, 25, 26, 27,38, 39, 40, 41,22, 23, 24, 25,36, 37, 38, 39,50, 51, 52, 53}, sd::DataType::INT32); x.linspace(0); x.syncToDevice(); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(&lc, sd::broadcast::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execBroadcast_2) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT32); NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, sd::DataType::FLOAT32); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, sd::DataType::FLOAT32); x.linspace(0); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(&lc, sd::broadcast::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execBroadcastBool_1) { NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::INT32); NDArray y('c', {3}, {2, 12, 22}, sd::DataType::INT32); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,}, sd::DataType::BOOL); NDArray exp('c', {2,3,4}, {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0}, sd::DataType::BOOL); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcastBool(&lc, sd::broadcast::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execBroadcastBool_2) { NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100},sd::DataType::FLOAT32); NDArray y('c', {2,4}, {1,10,10,15,20,20,20,24}, sd::DataType::FLOAT32); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, sd::DataType::BOOL); NDArray exp('c', {2,3,4}, {1, 0, 0, 0,0, 0, 0, 0,0, 1, 0, 0,0, 0, 0, 0,0, 0, 0, 0,0, 0, 0, 1}, sd::DataType::BOOL); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcastBool(&lc, sd::broadcast::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execPairwiseTransform_1) { if (!Environment::getInstance().isExperimentalBuild()) return; NDArray x('c', {2,2,2}, {1,5,3,7,2,6,4,8}, sd::DataType::INT32); NDArray y('c', {4,2}, {0.1,0.2,0.3,0.4,1.5,0.6,0.7,1.8}, sd::DataType::DOUBLE); NDArray z('c', {8}, {100,100,100,100,100,100,100,100}, sd::DataType::INT32); NDArray exp('c', {8}, {0,1,2,3,3,5,6,6}, sd::DataType::INT32); x.permutei({2,1,0}); // -> {1,2,3,4,5,6,7,8} x.syncShape(); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execPairwiseTransform(&lc, sd::pairwise::Subtract, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execPairwiseBoolTransform_1) { NDArray x('c', {2,2,2}, {1,5,3,7,2,6,4,8}, sd::DataType::INT64); NDArray y('c', {4,2}, {0,2,0,4,0,6,0,8}, sd::DataType::INT64); NDArray z('c', {8}, {100,100,100,100,100,100,100,100}, sd::DataType::BOOL); NDArray exp('c', {8}, {0,1,0,1,0,1,0,1}, sd::DataType::BOOL); x.permutei({2,1,0}); // -> {1,2,3,4,5,6,7,8} x.syncShape(); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execPairwiseBoolTransform(&lc, sd::pairwise::EqualTo, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformFloat_1) { NDArray x('c', {2,2}, {0, 6.25, 2.25, 12.25}, sd::DataType::DOUBLE); NDArray z('c', {4}, {100,100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {4}, {0, 1.5, 2.5, 3.5}, sd::DataType::FLOAT32); x.permutei({1,0}); x.syncShape(); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformFloat(&lc, sd::transform::Sqrt, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformFloat_2) { NDArray x('c', {1,4}, {0, 4, 9, 16}, sd::DataType::INT64); NDArray z('c', {2,2}, {100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {2,2}, {0, 2, 3, 4}, sd::DataType::DOUBLE); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformFloat(&lc, sd::transform::Sqrt, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformAny_1) { NDArray x('c', {2,2}, {0, 6.25, 2.25, 12.25}, sd::DataType::DOUBLE); NDArray z('c', {4,1}, {100,100,100,100}, sd::DataType::INT32); NDArray exp('c', {4,1}, {0, 2, 6, 12}, sd::DataType::INT32); x.permutei({1,0}); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformAny(&lc, sd::transform::Assign, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformAny_2) { NDArray x('c', {1,4}, {0, 6.25, 2.25, 12.25}, sd::DataType::BFLOAT16); NDArray z('c', {2,2}, {100,100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {2,2}, {0, 6.25, 2.25, 12.25}, sd::DataType::FLOAT32); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformAny(&lc, sd::transform::Assign, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformStrict_1) { NDArray x('c', {2,3}, {0,2,4,1,3,5}, sd::DataType::DOUBLE); NDArray z('c', {3,2}, {100,100,100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {3,2}, {0, 3, 12, 27, 48, 75}, sd::DataType::DOUBLE); x.permutei({1,0}); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformStrict(&lc, sd::transform::CubeDerivative, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformStrict_2) { NDArray x('c', {6}, {0,1,2,3,4,5}, sd::DataType::FLOAT32); NDArray z('c', {3,2}, {100,100,100,100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {3,2}, {0, 3, 12, 27, 48, 75}, sd::DataType::FLOAT32); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformStrict(&lc, sd::transform::CubeDerivative, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformSame_1) { NDArray x('c', {2,3}, {0,2.5,4.5,1.5,3.5,5.5}, sd::DataType::DOUBLE); NDArray z('c', {1,6}, {100,100,100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {1,6}, {0,2.25,6.25,12.25,20.25,30.25}, sd::DataType::DOUBLE); x.permutei({1,0}); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformSame(&lc, sd::transform::Square, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformSame_2) { NDArray x('c', {6}, {0,1,2,3,4,5}, sd::DataType::INT32); NDArray z('c', {3,2}, {100,100,100,100,100,100}, sd::DataType::INT32); NDArray exp('c', {3,2}, {0,1,4,9,16,25}, sd::DataType::INT32); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformSame(&lc, sd::transform::Square, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformBool_1) { NDArray x('c', {2,3}, {0,2,4,-1,-3,-5}, sd::DataType::DOUBLE); NDArray z('c', {1,6}, {100,100,100,100,100,100}, sd::DataType::BOOL); NDArray exp('c', {1,6}, {0,0,1,0,1,0}, sd::DataType::BOOL); x.permutei({1,0}); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformBool(&lc, sd::transform::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execTransformBool_2) { NDArray x('c', {6}, {0,-1,2,-3,4,-5}, sd::DataType::INT32); NDArray z('c', {3,2}, {100,100,100,100,100,100}, sd::DataType::BOOL); NDArray exp('c', {3,2}, {0,0,1,0,1,0}, sd::DataType::BOOL); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result NativeOpExecutioner::execTransformBool(&lc, sd::transform::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceFloat_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {3}, {100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {3}, {2.5, 6.5, 10.5}, sd::DataType::FLOAT32); x.permutei({2,1,0}); std::vector<int> dimensions = {0,2}; // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceFloat(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceFloat_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {2,4}, {-1., 0., 1., 2.,11., 12., 13., 14.}, sd::DataType::DOUBLE); std::vector<int> dimensions = {1}; // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceFloat(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceSame_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {3}, {100,100,100}, sd::DataType::INT32); NDArray exp('c', {3}, {20, 52, 84}, sd::DataType::INT32); x.permutei({2,1,0}); std::vector<int> dimensions = {0,2}; // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceSame(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceSame_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::FLOAT32); NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, sd::DataType::FLOAT32); NDArray exp('c', {2,4}, {-3., 0., 3., 6.,33., 36., 39., 42.}, sd::DataType::FLOAT32); std::vector<int> dimensions = {1}; // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceSame(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceBool_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, sd::DataType::INT32); NDArray z('c', {3}, {100,100,100}, sd::DataType::BOOL); NDArray exp('c', {3}, {0, 1, 1}, sd::DataType::BOOL); x.permutei({2,1,0}); std::vector<int> dimensions = {0,2}; // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceBool(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceBool_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, sd::DataType::FLOAT32); NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, sd::DataType::BOOL); NDArray exp('c', {2,4}, {1, 1, 1, 1, 0, 0, 0, 0}, sd::DataType::BOOL); std::vector<int> dimensions = {1}; // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceBool(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceLong_1) { NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, sd::DataType::INT32); NDArray z('c', {3}, {100,100,100}, sd::DataType::INT64); NDArray exp('c', {3}, {5,6,6}, sd::DataType::INT64); x.permutei({2,1,0}); std::vector<int> dimensions = {0,2}; // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceLong(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceLong_2) { NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, sd::DataType::FLOAT32); NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, sd::DataType::INT64); NDArray exp('c', {2,4}, {3, 1, 3, 2, 2, 1, 2, 3}, sd::DataType::INT64); std::vector<int> dimensions = {1}; // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // call cuda kernel which calculates result std::vector<int> dims = sd::ShapeUtils::evalDimsForReduceOp(x.rankOf(), dimensions); NativeOpExecutioner::execReduceLong(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), dims.data(), dims.size()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceFloatScalar_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32); NDArray exp('c', {}, std::vector<double>{6.5}, sd::DataType::FLOAT32); x.permutei({2,1,0}); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceFloatScalar(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceFloatScalar_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::DOUBLE); NDArray exp('c', {}, std::vector<double>{6.5}, sd::DataType::DOUBLE); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceFloatScalar(&lc, sd::reduce::Mean, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceSameScalar_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::INT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT32); NDArray exp('c', {}, std::vector<double>{156}, sd::DataType::INT32); x.permutei({2,1,0}); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceSameScalar(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceSameScalar_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, sd::DataType::DOUBLE); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::DOUBLE); NDArray exp('c', {}, std::vector<double>{156}, sd::DataType::DOUBLE); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceSameScalar(&lc, sd::reduce::Sum, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceBoolScalar_1) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, sd::DataType::INT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::BOOL); NDArray exp('c', {}, std::vector<double>{1}, sd::DataType::BOOL); x.permutei({2,1,0}); x.syncShape(); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceBoolScalar(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceBoolScalar_2) { NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, sd::DataType::DOUBLE); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::BOOL); NDArray exp('c', {}, std::vector<double>{1}, sd::DataType::BOOL); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceBoolScalar(&lc, sd::reduce::IsPositive, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceLongScalar_1) { NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, sd::DataType::INT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT64); NDArray exp('c', {}, std::vector<double>{17}, sd::DataType::INT64); x.permutei({2,1,0}); x.syncShape(); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceLongScalar(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduceLongScalar_2) { NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, sd::DataType::DOUBLE); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::INT64); NDArray exp('c', {}, std::vector<double>{17}, sd::DataType::INT64); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); // call cuda kernel which calculates result NativeOpExecutioner::execReduceLongScalar(&lc, sd::reduce::CountNonZero, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo()); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3TAD_1) { NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::FLOAT32); NDArray y('c', {2,2}, {1,2,3,4}, sd::DataType::FLOAT32); NDArray exp('c', {3}, {10,20,30}, sd::DataType::DOUBLE); NDArray z('c', {3}, {100,100,100}, sd::DataType::DOUBLE); std::vector<int> dimensions = {0,1}; auto packX = ConstantTadHelper::getInstance().tadForDimensions(x.shapeInfo(), dimensions); LaunchContext* context = x.getContext(); x.syncToDevice(); y.syncToDevice(); PointersManager pm(context, "execReduce3TAD_1"); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3TAD(context, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, dimensions.size(), packX.specialShapeInfo(), packX.specialOffsets(), nullptr, nullptr); pm.synchronize(); // cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // z.printIndexedBuffer("OutputReduce3TAD"); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3TAD_2) { NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::INT64); NDArray y('c', {2,3}, {1,2,3,4,5,6}, sd::DataType::INT64); NDArray exp('c', {2}, {10,73}, sd::DataType::FLOAT32); NDArray z('c', {2}, {100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3TAD(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3TAD_3) { NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::INT64); NDArray y('c', {3}, {1,2,3}, sd::DataType::INT64); NDArray exp('c', {2,2}, {-22,-4,14,32}, sd::DataType::FLOAT32); NDArray z('c', {2,2}, {100,100,100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3TAD(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execReduce3TAD_4) { NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::DOUBLE); NDArray y('c', {2,2,3}, {10,20,30,40,50,60,70,80,90,100,110,120}, sd::DataType::DOUBLE); NDArray exp('c', {}, std::vector<double>{1820}, sd::DataType::FLOAT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0,1,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execReduce3TAD(&lc, sd::reduce3::Dot, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execSummaryStats_1) { // FIXME: Yurii, this test should be fixed if (1 > 0) return; NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::INT64); NDArray exp('c', {}, std::vector<double>{3.605551}, sd::DataType::FLOAT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); // call cuda kernel which calculates result NativeOpExecutioner::execSummaryStats(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), true); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execSummaryStats_2) { NDArray x('c', {2,2,3}, {-5,-4,-3,-20,-1,0,1,2,3,4,5,6}, sd::DataType::DOUBLE); NDArray exp('c', {2}, {3.405877, 9.715966}, sd::DataType::FLOAT32); NDArray z('c', {2}, {100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execSummaryStats(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], true); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } /* //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execSummaryStats_3) { NDArray x('c', {2,2,3}, {-5,-4,-3,-20,-1,0,1,2,3,4,5,6}, sd::DataType::DOUBLE); NDArray exp('c', {2}, {10.606602, 2.121320}, sd::DataType::FLOAT32); NDArray z('c', {2}, {100,100}, sd::DataType::FLOAT32); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execSummaryStats(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.special(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], true); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } */ //////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execSummaryStatsScalar_1) { NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, sd::DataType::INT64); NDArray exp('c', {}, std::vector<double>{3.605551}, sd::DataType::FLOAT32); NDArray z('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult); lc.setReductionPointer(reductionPointer); // call cuda kernel which calculates result NativeOpExecutioner::execSummaryStatsScalar(&lc, sd::variance::SummaryStatsStandardDeviation, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), true); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execRandom_1) { // NDArray z('c', {10}, {100,0,0,0,0,0,0,0,0,0}, sd::DataType::DOUBLE); NDArray z('c', {10}, {100,0,0,0,0,0,0,0,0,100}, sd::DataType::FLOAT32); NDArray exp('c', {10}, {0.050942, -0.183229, -0.093921, 0.075469, 0.257166, -0.254838, 0.342227, -0.682188, -0.004345, 0.464633}, sd::DataType::FLOAT32); sd::graph::RandomGenerator gen(119,5); cudaError_t cudaResult; NDArray* array = &z; ExtraArguments arguments({0.f, 0.5f}); auto context = z.getContext(); PointersManager pm(context, "tests::execRandom_1"); // z.printIndexedBuffer("Input data"); // z.syncToDevice(); NativeOpExecutioner::execRandom(context, random::GaussianDistribution, &gen, array->buffer(), array->shapeInfo(), array->specialBuffer(), array->specialShapeInfo(), array->buffer(), array->shapeInfo(), array->specialBuffer(), array->specialShapeInfo(), array->buffer(), array->shapeInfo(), array->specialBuffer(), array->specialShapeInfo(), arguments.argumentsAsT(array->dataType())); pm.synchronize(); z.tickWriteDevice(); // z.printIndexedBuffer("Output Gaussian"); // RandomLauncher::fillGaussian(context, gen, &z, 0.f, 0.5f); // pm.synchronize(); // z.tickWriteDevice(); // z.printIndexedBuffer("Output Gaussian"); // cudaStream_t stream; // cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); // LaunchContext lc(&stream); // // // ::execRandom(extraPointers, random::GaussianDistribution, &gen, z.buffer(), z.shapeInfo(), z.specialBuffer(), z.special(), &extra); // // call cuda kernel which calculates result // NativeOpExecutioner::execRandom(&lc, sd::random::GaussianDistribution, // &gen, // nullptr, z.shapeInfo(), z.specialBuffer(), z.special(), // nullptr, z.shapeInfo(), z.specialBuffer(), z.special(), // nullptr, z.shapeInfo(), z.specialBuffer(), z.special(), // extraArguments.argumentsAsT(z.dataType())); // // cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); // ASSERT_EQ(cudaResult, 0); // z.tickWriteDevice(); // z.syncToHost(); // z.printIndexedBuffer("Random1"); ASSERT_EQ(exp, z); // // verify results // for (int e = 0; e < z.lengthOf(); e++) // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // cudaFree(dExtraArgs); // free allocated global device memory // cudaFree(dGen); // delete cuda stream // cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execRandom_2) { NDArray x('c', {10}, {0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1}, sd::DataType::DOUBLE); NDArray z('c', {2,5}, {100,100,100,100,100,100,100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {10}, {0., 0., 0.3, 0., 0.5, 0., 0.7, 0., 0., 1.}, sd::DataType::DOUBLE); ExtraArguments extraArguments({0.7}); sd::graph::RandomGenerator gen(119,5); // // prepare input arrays for prepareDataForCuda function // std::vector<std::pair<void*,size_t>> hostData; // hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions // std::vector<void*> devicePtrs(hostData.size(), nullptr); // // create cuda stream and LaunchContext cudaError_t cudaResult; // cudaStream_t stream; // cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext* lc = x.getContext(); //(&stream); // allocate required amount of global device memory and copy host data to it // cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execRandom(lc, sd::random::DropOut, &gen, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), extraArguments.argumentsAsT(z.dataType())); cudaResult = cudaStreamSynchronize(*lc->getCudaStream()); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); z.syncToHost(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory // for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream // cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execRandom_3) { NDArray z('c', {10}, {100,100,100,100,100,100,100,100,100,100}, sd::DataType::DOUBLE); NDArray exp('c', {10}, {2.373649, 2.239791, 1.887353, 2.488636, 2.068904, 2.281399, 1.828228, 2.228222, 2.490847, 1.669537}, sd::DataType::DOUBLE); std::vector<double> extraArguments = {1.5, 2.5}; sd::graph::RandomGenerator gen(119,5); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execRandom(&lc, sd::random::UniformDistribution, &gen, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), devicePtrs[0]); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } ////////////////////////////////////////////////////////////////////////// TEST_F(CudaBasicsTests1, execRandom_4) { NDArray z('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, sd::DataType::FLOAT32); NDArray exp('c', {10}, {2.373649, 2.281399, 2.239791, 1.828228, 1.887353, 2.228222, 2.488636, 2.490847, 2.068904, 1.669537}, sd::DataType::FLOAT32); z.permutei({1,0}); ExtraArguments extraArguments({1.5, 2.5}); sd::graph::RandomGenerator gen(119,5); // // prepare input arrays for prepareDataForCuda function // std::vector<std::pair<void*,size_t>> hostData; // hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions // std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext // cudaError_t cudaResult; // cudaStream_t stream; // cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); // LaunchContext lc(&stream); // // // allocate required amount of global device memory and copy host data to it // cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); auto context = z.getContext(); PointersManager pm(context, "execRandom4"); // call cuda kernel which calculates result NativeOpExecutioner::execRandom(context, sd::random::UniformDistribution, &gen, nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), extraArguments.argumentsAsT(z.dataType())); // cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // z.printIndexedBuffer("Output Uniform4"); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory // for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream // cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); }
aa756868ef14269630906f3e1a5077821a47bcca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/platform/device_context.h> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/partial_sum_op.h" #include "paddle/fluid/platform/float16.h" namespace plat = paddle::platform; namespace paddle { namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) using LoDTensor = framework::LoDTensor; using Tensor = phi::DenseTensor; template <class T> __global__ void SumArrayPartialCUDAKernel(T **in, T *out, int64_t lod_length, size_t in_size, int64_t start_index, int64_t length, int64_t row_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < lod_length) { T total = static_cast<T>(0); int b_id = id / length; int b_offset = id % length; for (int i = 0; i < in_size; ++i) { const T *tmp = in[i]; if (tmp) { total += tmp[start_index + b_id * row_length + b_offset]; } } out[id] = total; id += blockDim.x * gridDim.x; } } template <class T> __global__ void PartialSumGradCUDAKernel(T **res_grad, const T *out_grad, int64_t lod_length, size_t in_size, int64_t start_index, int64_t length, int64_t row_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < lod_length) { T total = static_cast<T>(0); int b_id = id / length; int b_offset = id % length; for (int i = 0; i < in_size; ++i) { T *tmp = res_grad[i]; tmp[start_index + b_id * row_length + b_offset] = out_grad[i]; } id += blockDim.x * gridDim.x; } } template <typename T> class PartialSumOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto in_vars = ctx.MultiInput<phi::DenseTensor>("X"); Tensor *out = ctx.Output<phi::DenseTensor>("Out"); PADDLE_ENFORCE_EQ( in_vars[0] != nullptr, true, platform::errors::InvalidArgument("The input should not be null.")); auto place = ctx.GetPlace(); // GPUPlace only now auto start_index = ctx.Attr<int>("start_index"); auto length = ctx.Attr<int>("length"); auto batch_size = in_vars[0]->dims()[0]; if (length == -1) { length = in_vars[0]->dims()[1] - start_index; } constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; auto lod_length = length * batch_size; auto row_length = in_vars[0]->dims()[1]; auto in_num = in_vars.size(); std::vector<const T *> in_data; for (int i = 0; i < in_num; ++i) { in_data.emplace_back(in_vars[i]->data<T>()); } if (!in_data.empty()) { auto tmp_in_array = memory::Alloc( dev_ctx.GetPlace(), in_data.size() * sizeof(T *), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); memory::Copy(dev_ctx.GetPlace(), tmp_in_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(lod_length); hipLaunchKernelGGL(( SumArrayPartialCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream, in_array_data, out->data<T>(), lod_length, in_data.size(), start_index, length, row_length); } } }; template <typename T> class PartialSumGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const Tensor *out_grad = ctx.Input<phi::DenseTensor>(framework::GradVarName("Out")); auto ins = ctx.MultiInput<LoDTensor>("X"); auto outs = ctx.MultiOutput<LoDTensor>(framework::GradVarName("X")); PADDLE_ENFORCE_EQ( ins[0] != nullptr, true, platform::errors::InvalidArgument("The input should not be null.")); auto start_index = ctx.Attr<int>("start_index"); auto length = ctx.Attr<int>("length"); if (length == -1) { length = ins[0]->dims()[1] - start_index; } // initialize auto &place = *ctx.template device_context<phi::GPUContext>().eigen_device(); for (size_t i = 0; i < outs.size(); ++i) { outs[i]->mutable_data<T>(ctx.GetPlace()); auto dxt = framework::EigenVector<T>::Flatten(*outs[i]); dxt.device(place) = dxt.constant(static_cast<T>(0)); } auto batch_size = ins[0]->dims()[0]; if (length == -1) { length = ins[0]->dims()[1] - start_index; } auto lod_length = length * batch_size; auto row_length = ins[0]->dims()[1]; auto out_num = outs.size(); constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; std::vector<const T *> out_data; for (int i = 0; i < out_num; ++i) { out_data.emplace_back(outs[i]->data<T>()); } if (!out_data.empty()) { auto tmp_out_array = memory::Alloc( dev_ctx.GetPlace(), out_data.size() * sizeof(T *), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); memory::Copy(dev_ctx.GetPlace(), tmp_out_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(out_data.data()), out_data.size() * sizeof(T *), dev_ctx.stream()); T **out_grad_data = reinterpret_cast<T **>(tmp_out_array->ptr()); ComputeKernelParameter(lod_length); hipLaunchKernelGGL(( PartialSumGradCUDAKernel<T>) , dim3(grids), dim3(blocks), 0, stream, out_grad_data, out_grad->data<T>(), lod_length, out_data.size(), start_index, length, row_length); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(partial_sum, ops::PartialSumOpCUDAKernel<float>, ops::PartialSumOpCUDAKernel<double>, ops::PartialSumOpCUDAKernel<int>, ops::PartialSumOpCUDAKernel<int64_t>, ops::PartialSumOpCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(partial_sum_grad, ops::PartialSumGradOpCUDAKernel<float>, ops::PartialSumGradOpCUDAKernel<double>, ops::PartialSumGradOpCUDAKernel<int>, ops::PartialSumGradOpCUDAKernel<int64_t>, ops::PartialSumGradOpCUDAKernel<plat::float16>);
aa756868ef14269630906f3e1a5077821a47bcca.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/platform/device_context.h> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/partial_sum_op.h" #include "paddle/fluid/platform/float16.h" namespace plat = paddle::platform; namespace paddle { namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) using LoDTensor = framework::LoDTensor; using Tensor = phi::DenseTensor; template <class T> __global__ void SumArrayPartialCUDAKernel(T **in, T *out, int64_t lod_length, size_t in_size, int64_t start_index, int64_t length, int64_t row_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < lod_length) { T total = static_cast<T>(0); int b_id = id / length; int b_offset = id % length; for (int i = 0; i < in_size; ++i) { const T *tmp = in[i]; if (tmp) { total += tmp[start_index + b_id * row_length + b_offset]; } } out[id] = total; id += blockDim.x * gridDim.x; } } template <class T> __global__ void PartialSumGradCUDAKernel(T **res_grad, const T *out_grad, int64_t lod_length, size_t in_size, int64_t start_index, int64_t length, int64_t row_length) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < lod_length) { T total = static_cast<T>(0); int b_id = id / length; int b_offset = id % length; for (int i = 0; i < in_size; ++i) { T *tmp = res_grad[i]; tmp[start_index + b_id * row_length + b_offset] = out_grad[i]; } id += blockDim.x * gridDim.x; } } template <typename T> class PartialSumOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto in_vars = ctx.MultiInput<phi::DenseTensor>("X"); Tensor *out = ctx.Output<phi::DenseTensor>("Out"); PADDLE_ENFORCE_EQ( in_vars[0] != nullptr, true, platform::errors::InvalidArgument("The input should not be null.")); auto place = ctx.GetPlace(); // GPUPlace only now auto start_index = ctx.Attr<int>("start_index"); auto length = ctx.Attr<int>("length"); auto batch_size = in_vars[0]->dims()[0]; if (length == -1) { length = in_vars[0]->dims()[1] - start_index; } constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; auto lod_length = length * batch_size; auto row_length = in_vars[0]->dims()[1]; auto in_num = in_vars.size(); std::vector<const T *> in_data; for (int i = 0; i < in_num; ++i) { in_data.emplace_back(in_vars[i]->data<T>()); } if (!in_data.empty()) { auto tmp_in_array = memory::Alloc( dev_ctx.GetPlace(), in_data.size() * sizeof(T *), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); memory::Copy(dev_ctx.GetPlace(), tmp_in_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(lod_length); SumArrayPartialCUDAKernel<T><<<grids, blocks, 0, stream>>>(in_array_data, out->data<T>(), lod_length, in_data.size(), start_index, length, row_length); } } }; template <typename T> class PartialSumGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const Tensor *out_grad = ctx.Input<phi::DenseTensor>(framework::GradVarName("Out")); auto ins = ctx.MultiInput<LoDTensor>("X"); auto outs = ctx.MultiOutput<LoDTensor>(framework::GradVarName("X")); PADDLE_ENFORCE_EQ( ins[0] != nullptr, true, platform::errors::InvalidArgument("The input should not be null.")); auto start_index = ctx.Attr<int>("start_index"); auto length = ctx.Attr<int>("length"); if (length == -1) { length = ins[0]->dims()[1] - start_index; } // initialize auto &place = *ctx.template device_context<phi::GPUContext>().eigen_device(); for (size_t i = 0; i < outs.size(); ++i) { outs[i]->mutable_data<T>(ctx.GetPlace()); auto dxt = framework::EigenVector<T>::Flatten(*outs[i]); dxt.device(place) = dxt.constant(static_cast<T>(0)); } auto batch_size = ins[0]->dims()[0]; if (length == -1) { length = ins[0]->dims()[1] - start_index; } auto lod_length = length * batch_size; auto row_length = ins[0]->dims()[1]; auto out_num = outs.size(); constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = ctx.template device_context<phi::GPUContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; std::vector<const T *> out_data; for (int i = 0; i < out_num; ++i) { out_data.emplace_back(outs[i]->data<T>()); } if (!out_data.empty()) { auto tmp_out_array = memory::Alloc( dev_ctx.GetPlace(), out_data.size() * sizeof(T *), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); memory::Copy(dev_ctx.GetPlace(), tmp_out_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(out_data.data()), out_data.size() * sizeof(T *), dev_ctx.stream()); T **out_grad_data = reinterpret_cast<T **>(tmp_out_array->ptr()); ComputeKernelParameter(lod_length); PartialSumGradCUDAKernel<T> <<<grids, blocks, 0, stream>>>(out_grad_data, out_grad->data<T>(), lod_length, out_data.size(), start_index, length, row_length); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(partial_sum, ops::PartialSumOpCUDAKernel<float>, ops::PartialSumOpCUDAKernel<double>, ops::PartialSumOpCUDAKernel<int>, ops::PartialSumOpCUDAKernel<int64_t>, ops::PartialSumOpCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(partial_sum_grad, ops::PartialSumGradOpCUDAKernel<float>, ops::PartialSumGradOpCUDAKernel<double>, ops::PartialSumGradOpCUDAKernel<int>, ops::PartialSumGradOpCUDAKernel<int64_t>, ops::PartialSumGradOpCUDAKernel<plat::float16>);
e767f82f1970cf7fcb18e464ef4d0b312715a873.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_z #define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)] #define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)] #define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b) #define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b) //============================================================ #define ldb m #define lda m #define ldc m #define fetch_x_A(i) (((i)<m*m)?Aval[i]:0) #define fetch_x_B(i) (((i)<m*m)?B[i]:0) // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel32( int m, int n, int kblocks, mdouble **Avals, double **Bval, double **Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = ty2*lda + tx2; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j < 64; j += 16) { for(int y=tx2; y < 64; y += 16) { Abs[y][j] = fetch_x_A(trackA + y-tx2); } trackA += 16*m; } for(int k=0; k < kblocks; k++) { B = Bval[k]; int trackB = tx2 + ty2*16*ldb; // Prefetch part of B #pragma unroll for(int y=0; y < 4; y++) { Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb); } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1 < m-16; k1 += 16) { trackB += 16; #pragma unroll for( int y=0; y < 4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) { Axs[y] = Abs[tx2+y*16][j1+k1]; } #pragma unroll for( int y=0; y < 4; y++) { Bxp[y]= Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } #pragma unroll for(int y=0; y < 4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + ty2*ldc; #pragma unroll for(int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1]; #pragma unroll for( int y=0; y < 4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0; y < 4; y++, gy += 16) { int gx = tx2; #pragma unroll for(int x=0; x < 4; x++, gx += 16) { if (gx < m && gy < n) { C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif #endif } // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel64( int m, int n, int kblocks, double **Avals, double **Bval, double **Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = ty2*lda + tx2; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j < 64; j += 16) { for(int y=tx2; y < 64; y += 16) { Abs[y][j] = fetch_x_A(trackA + y-tx2); } trackA += 16*m; } for(int k=0; k < kblocks; k++) { B = Bval[k]; int trackB = tx2 + ty2*4*ldb; // Prefetch part of B #pragma unroll for(int y=0; y < 4; y++) { Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb); } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1 < m-16; k1 += 16) { trackB += 16; #pragma unroll for( int y=0; y < 4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) { Axs[y] = Abs[tx2+y*16][j1+k1]; } #pragma unroll for( int y=0; y < 4; y++) { Bxp[y] = Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } __syncthreads(); #pragma unroll for(int y=0; y < 4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + ty2*ldc; #pragma unroll for(int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1]; #pragma unroll for( int y=0; y < 4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0; y < 4; y++, gy += 16) { int gx = tx2; #pragma unroll for(int x=0; x < 4; x++, gx += 16) { if (gx < m && gy < n) { C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif // PRECISION_d #endif // __CUDA_ARCH__ >= 200 } /** Purpose ------- For a Block-CSR ILU factorization, this routine updates all blocks in the trailing matrix. Arguments --------- @param[in] size_b magma_int_t blocksize in BCSR @param[in] num_brows magma_int_t number of block rows @param[in] kblocks magma_int_t number of blocks in row @param[in] dA magmaDoubleComplex** input blocks of matrix A @param[in] dB magmaDoubleComplex** input blocks of matrix B @param[in] dC magmaDoubleComplex** output blocks of matrix C @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbcsrluegemm( magma_int_t size_b, magma_int_t num_brows, magma_int_t kblocks, magmaDoubleComplex_ptr *dA, magmaDoubleComplex_ptr *dB, magmaDoubleComplex_ptr *dC, magma_queue_t queue ) { #if defined(PRECISION_d) magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { printf("error: magma_zbcsrluegemm needs a CUDA architecture" " with at least 48K shared memory (Fermi +).\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); } else { dim3 threads( 64, 4 ); dim3 grid(1, 1, num_brows); hipLaunchKernelGGL(( zbcsr_gemm_kernel64), dim3(grid), dim3(threads), 0, queue->cuda_stream() , size_b, size_b, kblocks, dA, dB, dC ); } #else printf("error: currently only supported for double precision.\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); #endif return MAGMA_SUCCESS; }
e767f82f1970cf7fcb18e464ef4d0b312715a873.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_z #define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)] #define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)] #define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b) #define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b) //============================================================ #define ldb m #define lda m #define ldc m #define fetch_x_A(i) (((i)<m*m)?Aval[i]:0) #define fetch_x_B(i) (((i)<m*m)?B[i]:0) // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel32( int m, int n, int kblocks, mdouble **Avals, double **Bval, double **Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = ty2*lda + tx2; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j < 64; j += 16) { for(int y=tx2; y < 64; y += 16) { Abs[y][j] = fetch_x_A(trackA + y-tx2); } trackA += 16*m; } for(int k=0; k < kblocks; k++) { B = Bval[k]; int trackB = tx2 + ty2*16*ldb; // Prefetch part of B #pragma unroll for(int y=0; y < 4; y++) { Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb); } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1 < m-16; k1 += 16) { trackB += 16; #pragma unroll for( int y=0; y < 4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) { Axs[y] = Abs[tx2+y*16][j1+k1]; } #pragma unroll for( int y=0; y < 4; y++) { Bxp[y]= Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } #pragma unroll for(int y=0; y < 4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + ty2*ldc; #pragma unroll for(int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1]; #pragma unroll for( int y=0; y < 4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0; y < 4; y++, gy += 16) { int gx = tx2; #pragma unroll for(int x=0; x < 4; x++, gx += 16) { if (gx < m && gy < n) { C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif #endif } // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel64( int m, int n, int kblocks, double **Avals, double **Bval, double **Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = ty2*lda + tx2; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j < 64; j += 16) { for(int y=tx2; y < 64; y += 16) { Abs[y][j] = fetch_x_A(trackA + y-tx2); } trackA += 16*m; } for(int k=0; k < kblocks; k++) { B = Bval[k]; int trackB = tx2 + ty2*4*ldb; // Prefetch part of B #pragma unroll for(int y=0; y < 4; y++) { Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb); } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1 < m-16; k1 += 16) { trackB += 16; #pragma unroll for( int y=0; y < 4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) { Axs[y] = Abs[tx2+y*16][j1+k1]; } #pragma unroll for( int y=0; y < 4; y++) { Bxp[y] = Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } __syncthreads(); #pragma unroll for(int y=0; y < 4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + ty2*ldc; #pragma unroll for(int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1]; #pragma unroll for( int y=0; y < 4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0; y < 4; y++, gy += 16) { int gx = tx2; #pragma unroll for(int x=0; x < 4; x++, gx += 16) { if (gx < m && gy < n) { C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif // PRECISION_d #endif // __CUDA_ARCH__ >= 200 } /** Purpose ------- For a Block-CSR ILU factorization, this routine updates all blocks in the trailing matrix. Arguments --------- @param[in] size_b magma_int_t blocksize in BCSR @param[in] num_brows magma_int_t number of block rows @param[in] kblocks magma_int_t number of blocks in row @param[in] dA magmaDoubleComplex** input blocks of matrix A @param[in] dB magmaDoubleComplex** input blocks of matrix B @param[in] dC magmaDoubleComplex** output blocks of matrix C @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbcsrluegemm( magma_int_t size_b, magma_int_t num_brows, magma_int_t kblocks, magmaDoubleComplex_ptr *dA, magmaDoubleComplex_ptr *dB, magmaDoubleComplex_ptr *dC, magma_queue_t queue ) { #if defined(PRECISION_d) magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { printf("error: magma_zbcsrluegemm needs a CUDA architecture" " with at least 48K shared memory (Fermi +).\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); } else { dim3 threads( 64, 4 ); dim3 grid(1, 1, num_brows); zbcsr_gemm_kernel64<<< grid, threads, 0, queue->cuda_stream() >>>( size_b, size_b, kblocks, dA, dB, dC ); } #else printf("error: currently only supported for double precision.\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); #endif return MAGMA_SUCCESS; }
2d3a70284f45a73b516cb67d2d73dfae49701d2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <assert.h> #include <atomic> #include <thread> #include <vector> #include <chrono> #include <pthread.h> #include <functional> // From: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } using std_clock = std::chrono::system_clock; using sec = std::chrono::duration<double>; using namespace std; typedef void (*tfunc)(uint* arg0, uint* arg1, uint* arg2); vector<string> kernel_names; vector<tfunc*> kernels; #include "kernels.h" #include "kernel_init.h" __global__ void test_launcher_kernel(uint * x, uint * y, uint* count, tfunc f) { f(x,y,count); } #define ITERS 20 int main(int argc, char **argv) { const unsigned int total_blocks = 65532; const unsigned int bufferSize = (total_blocks * sizeof(int)); int t_num = 0; int i_num = 0; if (argc == 3) { t_num = atoi(argv[1]); i_num = atoi(argv[2]); printf("executing t: %d, i: %d\n",t_num, i_num); } uint *dBufferX; uint *dBufferY; uint *dBufferCounter; uint *hBufferX; uint *hBufferY; uint *hBufferCounter; gpuErrchk(hipMalloc(&dBufferX, bufferSize)); gpuErrchk(hipMalloc(&dBufferY, bufferSize)); gpuErrchk(hipMalloc(&dBufferCounter, sizeof(uint))); hBufferX = (uint*) malloc(bufferSize); hBufferY = (uint*) malloc(bufferSize); hBufferCounter = (uint *) malloc(sizeof(uint)); init_kernels(); int total_killed = 0; tfunc host_function_ptr; // In case we want to do it iteratively //for (int t = t_num; t < kernel_names.size(); t++) { for (int t = t_num; t < t_num+1; t++) { cout << "running test: " << kernel_names[t] << "\n"; gpuErrchk(hipMemcpyFromSymbol(&host_function_ptr, *(kernels[t]), sizeof(tfunc))); int success = 0; int killed = 0; //for (int i = 0; i < ITERS; i++) { for (int i = i_num; i < i_num+1; i++) { //printf("test: %d, i: %d\n",t,i); //fflush(stdout); for (int i = 0; i < total_blocks; i++) { hBufferX[i] = 0; hBufferY[i] = 0; } hBufferCounter[0] = 0; gpuErrchk(hipMemcpy(dBufferX, hBufferX, bufferSize, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(dBufferY, hBufferY, bufferSize, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(dBufferCounter, hBufferCounter, sizeof(uint), hipMemcpyHostToDevice)); //*(kernels[t])<<<1024,1024,1024>>>(dBufferX, dBufferY, dBufferCounter); hipLaunchKernelGGL(( test_launcher_kernel), dim3(total_blocks),dim3(1), 0, 0, dBufferX, dBufferY, dBufferCounter, host_function_ptr); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipMemcpy(hBufferCounter, dBufferCounter, sizeof(uint), hipMemcpyDeviceToHost)); assert(*hBufferCounter == total_blocks); //printf("found %d\n", *hBufferCounter); } } gpuErrchk(hipFree(dBufferX)); gpuErrchk(hipFree(dBufferY)); gpuErrchk(hipFree(dBufferCounter)); free(hBufferX); free(hBufferY); free(hBufferCounter); return 0; }
2d3a70284f45a73b516cb67d2d73dfae49701d2b.cu
#include <iostream> #include <assert.h> #include <atomic> #include <thread> #include <vector> #include <chrono> #include <pthread.h> #include <functional> // From: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } using std_clock = std::chrono::system_clock; using sec = std::chrono::duration<double>; using namespace std; typedef void (*tfunc)(uint* arg0, uint* arg1, uint* arg2); vector<string> kernel_names; vector<tfunc*> kernels; #include "kernels.h" #include "kernel_init.h" __global__ void test_launcher_kernel(uint * x, uint * y, uint* count, tfunc f) { f(x,y,count); } #define ITERS 20 int main(int argc, char **argv) { const unsigned int total_blocks = 65532; const unsigned int bufferSize = (total_blocks * sizeof(int)); int t_num = 0; int i_num = 0; if (argc == 3) { t_num = atoi(argv[1]); i_num = atoi(argv[2]); printf("executing t: %d, i: %d\n",t_num, i_num); } uint *dBufferX; uint *dBufferY; uint *dBufferCounter; uint *hBufferX; uint *hBufferY; uint *hBufferCounter; gpuErrchk(cudaMalloc(&dBufferX, bufferSize)); gpuErrchk(cudaMalloc(&dBufferY, bufferSize)); gpuErrchk(cudaMalloc(&dBufferCounter, sizeof(uint))); hBufferX = (uint*) malloc(bufferSize); hBufferY = (uint*) malloc(bufferSize); hBufferCounter = (uint *) malloc(sizeof(uint)); init_kernels(); int total_killed = 0; tfunc host_function_ptr; // In case we want to do it iteratively //for (int t = t_num; t < kernel_names.size(); t++) { for (int t = t_num; t < t_num+1; t++) { cout << "running test: " << kernel_names[t] << "\n"; gpuErrchk(cudaMemcpyFromSymbol(&host_function_ptr, *(kernels[t]), sizeof(tfunc))); int success = 0; int killed = 0; //for (int i = 0; i < ITERS; i++) { for (int i = i_num; i < i_num+1; i++) { //printf("test: %d, i: %d\n",t,i); //fflush(stdout); for (int i = 0; i < total_blocks; i++) { hBufferX[i] = 0; hBufferY[i] = 0; } hBufferCounter[0] = 0; gpuErrchk(cudaMemcpy(dBufferX, hBufferX, bufferSize, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dBufferY, hBufferY, bufferSize, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dBufferCounter, hBufferCounter, sizeof(uint), cudaMemcpyHostToDevice)); //*(kernels[t])<<<1024,1024,1024>>>(dBufferX, dBufferY, dBufferCounter); test_launcher_kernel<<<total_blocks,1>>>(dBufferX, dBufferY, dBufferCounter, host_function_ptr); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(hBufferCounter, dBufferCounter, sizeof(uint), cudaMemcpyDeviceToHost)); assert(*hBufferCounter == total_blocks); //printf("found %d\n", *hBufferCounter); } } gpuErrchk(cudaFree(dBufferX)); gpuErrchk(cudaFree(dBufferY)); gpuErrchk(cudaFree(dBufferCounter)); free(hBufferX); free(hBufferY); free(hBufferCounter); return 0; }
a7303e64f3636e13c88b81718ef06635a88c0114.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N (32 * 1024) __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; while (tid < N) { c[tid] = a[tid] + b[tid]; tid += gridDim.x; } } int main( void ) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; // allocate the memory on the CPU a = (int*)malloc( N * sizeof(int) ); b = (int*)malloc( N * sizeof(int) ); c = (int*)malloc( N * sizeof(int) ); // allocate the memory on the GPU hipMalloc( (void**)&dev_a, N * sizeof(int) ); hipMalloc( (void**)&dev_b, N * sizeof(int) ); hipMalloc( (void**)&dev_c, N * sizeof(int) ); // fill the arrays 'a' and 'b' on the CPU for (int i=0; i<N; i++) { a[i] = i; b[i] = 2 * i; } // copy the arrays 'a' and 'b' to the GPU hipMemcpy( dev_a, a, N * sizeof(int),hipMemcpyHostToDevice ); hipMemcpy( dev_b, b, N * sizeof(int),hipMemcpyHostToDevice ); hipLaunchKernelGGL(( add), dim3(128),dim3(1), 0, 0, dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU hipMemcpy( c, dev_c, N * sizeof(int),hipMemcpyDeviceToHost ); // verify that the GPU did the work we requested bool success = true; for (int i=0; i<N; i++) { if ((a[i] + b[i]) != c[i]) { printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] ); success = false; } } if (success) printf( "We did it!\n" ); // free the memory we allocated on the GPU hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); // free the memory we allocated on the CPU free( a ); free( b ); free( c ); return 0; }
a7303e64f3636e13c88b81718ef06635a88c0114.cu
#include <stdio.h> #define N (32 * 1024) __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; while (tid < N) { c[tid] = a[tid] + b[tid]; tid += gridDim.x; } } int main( void ) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; // allocate the memory on the CPU a = (int*)malloc( N * sizeof(int) ); b = (int*)malloc( N * sizeof(int) ); c = (int*)malloc( N * sizeof(int) ); // allocate the memory on the GPU cudaMalloc( (void**)&dev_a, N * sizeof(int) ); cudaMalloc( (void**)&dev_b, N * sizeof(int) ); cudaMalloc( (void**)&dev_c, N * sizeof(int) ); // fill the arrays 'a' and 'b' on the CPU for (int i=0; i<N; i++) { a[i] = i; b[i] = 2 * i; } // copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a, a, N * sizeof(int),cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, N * sizeof(int),cudaMemcpyHostToDevice ); add<<<128,1>>>( dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU cudaMemcpy( c, dev_c, N * sizeof(int),cudaMemcpyDeviceToHost ); // verify that the GPU did the work we requested bool success = true; for (int i=0; i<N; i++) { if ((a[i] + b[i]) != c[i]) { printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] ); success = false; } } if (success) printf( "We did it!\n" ); // free the memory we allocated on the GPU cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); // free the memory we allocated on the CPU free( a ); free( b ); free( c ); return 0; }
108c9aab28fbd4533e2ec71bf86d1c93e97a1665.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simulator.h" #include "model.h" Simulator::Simulator(FFHandler _handle, void* _base_ptr, size_t _capacity) : handle(_handle), base_ptr((char*)_base_ptr), capacity(_capacity), offset(0), warmup_times(5), repeat_times(10) { hipEventCreate(&start_event); hipEventCreate(&end_event); conv2d_meta = new Conv2DMeta(handle); linear_meta = new LinearMeta(handle, 4096); } void Simulator::free_all() { offset = 0; } void* Simulator::allocate(size_t num_elements, DataType type) { size_t element_size = 0; switch (type) { case DT_FLOAT: element_size = sizeof(float); break; case DT_DOUBLE: element_size = sizeof(double); break; case DT_INT32: element_size = sizeof(int32_t); break; case DT_INT64: element_size = sizeof(int64_t); break; case DT_BOOLEAN: element_size = sizeof(bool); break; default: assert(false); } void* ret_ptr = base_ptr + offset; offset += element_size * num_elements; return ret_ptr; }
108c9aab28fbd4533e2ec71bf86d1c93e97a1665.cu
/* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simulator.h" #include "model.h" Simulator::Simulator(FFHandler _handle, void* _base_ptr, size_t _capacity) : handle(_handle), base_ptr((char*)_base_ptr), capacity(_capacity), offset(0), warmup_times(5), repeat_times(10) { cudaEventCreate(&start_event); cudaEventCreate(&end_event); conv2d_meta = new Conv2DMeta(handle); linear_meta = new LinearMeta(handle, 4096); } void Simulator::free_all() { offset = 0; } void* Simulator::allocate(size_t num_elements, DataType type) { size_t element_size = 0; switch (type) { case DT_FLOAT: element_size = sizeof(float); break; case DT_DOUBLE: element_size = sizeof(double); break; case DT_INT32: element_size = sizeof(int32_t); break; case DT_INT64: element_size = sizeof(int64_t); break; case DT_BOOLEAN: element_size = sizeof(bool); break; default: assert(false); } void* ret_ptr = base_ptr + offset; offset += element_size * num_elements; return ret_ptr; }
c4a771ab2943e8ae7a5b86bd3df5c2b090f26f0e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO CSV writer class implementation */ #include "writer_impl.hpp" #include <strings/utilities.cuh> #include <cudf/copying.hpp> #include <cudf/null_mask.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/combine.hpp> #include <cudf/strings/convert/convert_booleans.hpp> #include <cudf/strings/convert/convert_datetime.hpp> #include <cudf/strings/convert/convert_floats.hpp> #include <cudf/strings/convert/convert_integers.hpp> #include <cudf/strings/detail/modify_strings.cuh> #include <cudf/strings/replace.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/thrust_rmm_allocator.h> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <thrust/count.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/scan.h> #include <thrust/transform.h> #include <algorithm> #include <cstring> #include <iterator> #include <sstream> #include <type_traits> #include <utility> namespace cudf { namespace io { namespace detail { namespace csv { namespace { // anonym. // helpers: using namespace cudf::strings; // predicate to determine if a given string_view contains special characters: //{"\"", "\n", <delimiter>} // struct predicate_special_chars { explicit predicate_special_chars(string_view const& delimiter) : delimiter_(delimiter) {} __device__ bool operator()(string_view const& str_view) const { // if (any_of{"\"", "\n", <delimiter>} ) // constexpr char const* quote_str = "\""; constexpr char const* newline_str = "\n"; constexpr size_type len1byte{1}; if ((str_view.find(quote_str, len1byte) >= 0) || (str_view.find(newline_str, len1byte) >= 0) || (str_view.find(delimiter_) >= 0)) { return true; } else { return false; } } private: string_view const delimiter_; }; struct probe_special_chars { probe_special_chars(column_device_view const d_column, predicate_special_chars const& predicate) : d_column_(d_column), predicate_(predicate) { } __device__ int32_t operator()(size_type idx) const { if (d_column_.is_null(idx)) { return 0; // null string, so no-op } string_view d_str = d_column_.template element<string_view>(idx); if (predicate_(d_str)) { constexpr char const quote_char = '\"'; // count number of quotes "\"" size_type num_quotes = thrust::count_if( thrust::seq, d_str.begin(), d_str.end(), [](char_utf8 chr) { return chr == quote_char; }); return d_str.size_bytes() + num_quotes + 2; } else { return d_str.size_bytes(); } } private: column_device_view const d_column_; predicate_special_chars const predicate_; }; struct modify_special_chars { modify_special_chars(column_device_view const d_column, int32_t const* d_offsets, char* d_chars, predicate_special_chars const& predicate) : d_column_(d_column), d_offsets_(d_offsets), d_chars_(d_chars), predicate_(predicate) { } __device__ int32_t operator()(size_type idx) { using namespace cudf::strings::detail; if (d_column_.is_null(idx)) { return 0; // null string, so no-op } string_view d_str = d_column_.template element<string_view>(idx); size_type str_size_bytes = d_str.size_bytes(); char* d_buffer = get_output_ptr(idx); // assert( d_buffer != nullptr ); if (predicate_(d_str)) { constexpr char const quote_char = '\"'; constexpr char const* quote_str = "\""; constexpr char const* str_2quotes = "\"\""; size_type len1quote{1}; size_type len2quotes{2}; // modify d_str by duplicating all 2bl quotes // and surrounding whole string by 2bl quotes: // // pre-condition: `d_str` is _not_ modified by `d_buffer` manipulation // because it's a copy of `idx` entry in `d_column_` //(since `d_column` is const) // d_buffer = copy_and_increment(d_buffer, quote_str, len1quote); // add the quote prefix for (auto itr = d_str.begin(); itr != d_str.end(); ++itr) { char_utf8 the_chr = *itr; if (the_chr == quote_char) { d_buffer = copy_and_increment(d_buffer, str_2quotes, len2quotes); // double the quote; } else { d_buffer += from_char_utf8(the_chr, d_buffer); } } d_buffer = copy_and_increment(d_buffer, quote_str, len1quote); // add the quote suffix; } else { // copy the source string unmodified: //(pass-through) // memcpy(d_buffer, d_str.data(), str_size_bytes); } return 0; } __device__ char* get_output_ptr(size_type idx) { return d_chars_ && d_offsets_ ? d_chars_ + d_offsets_[idx] : nullptr; } private: column_device_view const d_column_; int32_t const* d_offsets_; char* d_chars_; predicate_special_chars const predicate_; }; struct column_to_strings_fn { // compile-time predicate that defines unsupported column types; // based on the conditions used for instantiations of individual // converters in strings/convert/convert_*.hpp; //(this should have been a `variable template`, // instead of a static function, but nvcc (10.0) // fails to compile var-templs); // template <typename column_type> constexpr static bool is_not_handled(void) { // Note: the case (not std::is_same<column_type, bool>::value) // is already covered by is_integral) // return not((std::is_same<column_type, cudf::string_view>::value) || (std::is_integral<column_type>::value) || (std::is_floating_point<column_type>::value) || (cudf::is_timestamp<column_type>()) || (cudf::is_duration<column_type>())); } explicit column_to_strings_fn(csv_writer_options const& options, rmm::mr::device_memory_resource* mr = nullptr, rmm::cuda_stream_view stream = nullptr) : options_(options), mr_(mr), stream_(stream) { } // Note: `null` replacement with `na_rep` deferred to `concatenate()` // instead of column-wise; might be faster // // Note: Cannot pass `stream` to detail::<fname> version of <fname> calls below, because they are // not exposed in header (see, for example, detail::concatenate(tbl_view, separator, na_rep, mr, // stream) is declared and defined in combine.cu); Possible solution: declare `extern`, or just // declare a prototype inside `namespace cudf::strings::detail`; // bools: // template <typename column_type> std::enable_if_t<std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()( column_view const& column) const { auto conv_col_ptr = cudf::strings::from_booleans( column, options_.get_true_value(), options_.get_false_value(), mr_); return conv_col_ptr; } // strings: // template <typename column_type> std::enable_if_t<std::is_same<column_type, cudf::string_view>::value, std::unique_ptr<column>> operator()(column_view const& column_v) const { using namespace cudf::strings::detail; // handle special characters: {delimiter, '\n', "} in row: // // algorithm outline: // // target = "\""; // repl = ""\"\"; // // str_column_ref = {}; // for each str_row: column_v { // if ((not null str_row) && // (str_row.find("\n") || str_row.find("\"") || str_row.find(delimiter) )) // str_column_modified = modify(str_row); // where modify() = duplicate the double quotes, if any; add 2bl quotes prefix/suffix; //} // string_scalar delimiter{std::string{options_.get_inter_column_delimiter()}, true, stream_}; predicate_special_chars pred{delimiter.value(stream_)}; return modify_strings<probe_special_chars, modify_special_chars>(column_v, stream_, mr_, pred); } // ints: // template <typename column_type> std::enable_if_t<std::is_integral<column_type>::value && !std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()(column_view const& column) const { auto conv_col_ptr = cudf::strings::from_integers(column, mr_); return conv_col_ptr; } // floats: // template <typename column_type> std::enable_if_t<std::is_floating_point<column_type>::value, std::unique_ptr<column>> operator()( column_view const& column) const { auto conv_col_ptr = cudf::strings::from_floats(column, mr_); return conv_col_ptr; } // timestamps: // template <typename column_type> std::enable_if_t<cudf::is_timestamp<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { std::string format = [&]() { if (std::is_same<cudf::timestamp_s, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%SZ"}; } else if (std::is_same<cudf::timestamp_ms, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%3fZ"}; } else if (std::is_same<cudf::timestamp_us, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%6fZ"}; } else if (std::is_same<cudf::timestamp_ns, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%9fZ"}; } else { return std::string{"%Y-%m-%d"}; } }(); // handle the cases where delimiter / line-terminator can be // "-" or ":", in which case they are to be dropped from the format: // std::string delimiter{options_.get_inter_column_delimiter()}; std::string newline{options_.get_line_terminator()}; constexpr char const* dash{"-"}; constexpr char const* colon{":"}; if (delimiter == dash || newline == dash) { format.erase(std::remove(format.begin(), format.end(), dash[0]), format.end()); } if (delimiter == colon || newline == colon) { format.erase(std::remove(format.begin(), format.end(), colon[0]), format.end()); } auto conv_col_ptr = cudf::strings::from_timestamps(column, format, mr_); return conv_col_ptr; } template <typename column_type> std::enable_if_t<cudf::is_duration<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { return cudf::io::detail::csv::pandas_format_durations(column, stream_); } // unsupported type of column: // template <typename column_type> std::enable_if_t<is_not_handled<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { CUDF_FAIL("Unsupported column type."); } private: csv_writer_options const& options_; rmm::mr::device_memory_resource* mr_; rmm::cuda_stream_view stream_; }; } // unnamed namespace // Forward to implementation writer::writer(std::unique_ptr<data_sink> sink, csv_writer_options const& options, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sink), options, mr)) { } // Destructor within this translation unit writer::~writer() = default; writer::impl::impl(std::unique_ptr<data_sink> sink, csv_writer_options const& options, rmm::mr::device_memory_resource* mr) : out_sink_(std::move(sink)), mr_(mr), options_(options) { } // write the header: column names: // void writer::impl::write_chunked_begin(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { if ((metadata != nullptr) && (options_.is_enabled_include_header())) { CUDF_EXPECTS(metadata->column_names.size() == static_cast<size_t>(table.num_columns()), "Mismatch between number of column headers and table columns."); std::string delimiter_str{options_.get_inter_column_delimiter()}; // avoid delimiter after last element: // std::stringstream ss; std::copy(metadata->column_names.begin(), metadata->column_names.end() - 1, std::ostream_iterator<std::string>(ss, delimiter_str.c_str())); ss << metadata->column_names.back() << options_.get_line_terminator(); out_sink_->host_write(ss.str().data(), ss.str().size()); } } void writer::impl::write_chunked(strings_column_view const& str_column_view, const table_metadata* metadata, rmm::cuda_stream_view stream) { // algorithm outline: // // for_each(strings_column.begin(), strings_column.end(), // [sink = out_sink_](auto str_row) mutable { // auto host_buffer = str_row.host_buffer(); // sink->host_write(host_buffer_.data(), host_buffer_.size()); // });//or...sink->device_write(device_buffer,...); // // added line_terminator functionality // CUDF_EXPECTS(str_column_view.size() > 0, "Unexpected empty strings column."); cudf::string_scalar newline{options_.get_line_terminator()}; auto p_str_col_w_nl = cudf::strings::join_strings(str_column_view, newline); strings_column_view strings_column{std::move(p_str_col_w_nl->view())}; auto total_num_bytes = strings_column.chars_size(); char const* ptr_all_bytes = strings_column.chars().data<char>(); if (out_sink_->supports_device_write()) { // host algorithm call, but the underlying call // is a device_write taking a device buffer; // out_sink_->device_write(ptr_all_bytes, total_num_bytes, stream); out_sink_->device_write(newline.data(), newline.size(), stream); // needs newline at the end, to separate from next chunk } else { // no device write possible; // // copy the bytes to host, too: // thrust::host_vector<char> h_bytes(total_num_bytes); CUDA_TRY(hipMemcpyAsync(h_bytes.data(), ptr_all_bytes, total_num_bytes * sizeof(char), hipMemcpyDeviceToHost, stream.value())); stream.synchronize(); // host algorithm call, where the underlying call // is also host_write taking a host buffer; // char const* ptr_h_bytes = h_bytes.data(); out_sink_->host_write(ptr_h_bytes, total_num_bytes); out_sink_->host_write(options_.get_line_terminator().data(), options_.get_line_terminator() .size()); // needs newline at the end, to separate from next chunk } } void writer::impl::write(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { CUDF_EXPECTS(table.num_columns() > 0, "Empty table."); // write header: column names separated by delimiter: // (even for tables with no rows) // write_chunked_begin(table, metadata, stream); if (table.num_rows() > 0) { // no need to check same-size columns constraint; auto-enforced by table_view auto n_rows_per_chunk = options_.get_rows_per_chunk(); // // This outputs the CSV in row chunks to save memory. // Maybe we can use the total_rows*count calculation and a memory threshold // instead of an arbitrary chunk count. // The entire CSV chunk must fit in CPU memory before writing it out. // if (n_rows_per_chunk % 8) // must be divisible by 8 n_rows_per_chunk += 8 - (n_rows_per_chunk % 8); CUDF_EXPECTS(n_rows_per_chunk >= 8, "write_csv: invalid chunk_rows; must be at least 8"); auto exec = rmm::exec_policy(stream); auto num_rows = table.num_rows(); std::vector<table_view> vector_views; if (num_rows <= n_rows_per_chunk) { vector_views.push_back(table); } else { std::vector<size_type> splits; auto n_chunks = num_rows / n_rows_per_chunk; splits.resize(n_chunks); rmm::device_vector<size_type> d_splits(n_chunks, n_rows_per_chunk); thrust::inclusive_scan( exec->on(stream.value()), d_splits.begin(), d_splits.end(), d_splits.begin()); CUDA_TRY(hipMemcpyAsync(splits.data(), d_splits.data().get(), n_chunks * sizeof(size_type), hipMemcpyDeviceToHost, stream.value())); stream.synchronize(); // split table_view into chunks: // vector_views = cudf::split(table, splits); } // convert each chunk to CSV: // column_to_strings_fn converter{options_, mr_}; for (auto&& sub_view : vector_views) { // Skip if the table has no rows if (sub_view.num_rows() == 0) continue; std::vector<std::unique_ptr<column>> str_column_vec; // populate vector of string-converted columns: // std::transform(sub_view.begin(), sub_view.end(), std::back_inserter(str_column_vec), [converter](auto const& current_col) { return cudf::type_dispatcher(current_col.type(), converter, current_col); }); // create string table view from str_column_vec: // auto str_table_ptr = std::make_unique<cudf::table>(std::move(str_column_vec)); auto str_table_view = str_table_ptr->view(); // concatenate columns in each row into one big string column //(using null representation and delimiter): // std::string delimiter_str{options_.get_inter_column_delimiter()}; auto str_concat_col = cudf::strings::concatenate(str_table_view, delimiter_str, options_.get_na_rep(), mr_); write_chunked(str_concat_col->view(), metadata, stream); } } // finalize (no-op, for now, but offers a hook for future extensions): // write_chunked_end(table, metadata, stream); } void writer::write(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { _impl->write(table, metadata, stream); } } // namespace csv } // namespace detail } // namespace io } // namespace cudf
c4a771ab2943e8ae7a5b86bd3df5c2b090f26f0e.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO CSV writer class implementation */ #include "writer_impl.hpp" #include <strings/utilities.cuh> #include <cudf/copying.hpp> #include <cudf/null_mask.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/combine.hpp> #include <cudf/strings/convert/convert_booleans.hpp> #include <cudf/strings/convert/convert_datetime.hpp> #include <cudf/strings/convert/convert_floats.hpp> #include <cudf/strings/convert/convert_integers.hpp> #include <cudf/strings/detail/modify_strings.cuh> #include <cudf/strings/replace.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/thrust_rmm_allocator.h> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <thrust/count.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/scan.h> #include <thrust/transform.h> #include <algorithm> #include <cstring> #include <iterator> #include <sstream> #include <type_traits> #include <utility> namespace cudf { namespace io { namespace detail { namespace csv { namespace { // anonym. // helpers: using namespace cudf::strings; // predicate to determine if a given string_view contains special characters: //{"\"", "\n", <delimiter>} // struct predicate_special_chars { explicit predicate_special_chars(string_view const& delimiter) : delimiter_(delimiter) {} __device__ bool operator()(string_view const& str_view) const { // if (any_of{"\"", "\n", <delimiter>} ) // constexpr char const* quote_str = "\""; constexpr char const* newline_str = "\n"; constexpr size_type len1byte{1}; if ((str_view.find(quote_str, len1byte) >= 0) || (str_view.find(newline_str, len1byte) >= 0) || (str_view.find(delimiter_) >= 0)) { return true; } else { return false; } } private: string_view const delimiter_; }; struct probe_special_chars { probe_special_chars(column_device_view const d_column, predicate_special_chars const& predicate) : d_column_(d_column), predicate_(predicate) { } __device__ int32_t operator()(size_type idx) const { if (d_column_.is_null(idx)) { return 0; // null string, so no-op } string_view d_str = d_column_.template element<string_view>(idx); if (predicate_(d_str)) { constexpr char const quote_char = '\"'; // count number of quotes "\"" size_type num_quotes = thrust::count_if( thrust::seq, d_str.begin(), d_str.end(), [](char_utf8 chr) { return chr == quote_char; }); return d_str.size_bytes() + num_quotes + 2; } else { return d_str.size_bytes(); } } private: column_device_view const d_column_; predicate_special_chars const predicate_; }; struct modify_special_chars { modify_special_chars(column_device_view const d_column, int32_t const* d_offsets, char* d_chars, predicate_special_chars const& predicate) : d_column_(d_column), d_offsets_(d_offsets), d_chars_(d_chars), predicate_(predicate) { } __device__ int32_t operator()(size_type idx) { using namespace cudf::strings::detail; if (d_column_.is_null(idx)) { return 0; // null string, so no-op } string_view d_str = d_column_.template element<string_view>(idx); size_type str_size_bytes = d_str.size_bytes(); char* d_buffer = get_output_ptr(idx); // assert( d_buffer != nullptr ); if (predicate_(d_str)) { constexpr char const quote_char = '\"'; constexpr char const* quote_str = "\""; constexpr char const* str_2quotes = "\"\""; size_type len1quote{1}; size_type len2quotes{2}; // modify d_str by duplicating all 2bl quotes // and surrounding whole string by 2bl quotes: // // pre-condition: `d_str` is _not_ modified by `d_buffer` manipulation // because it's a copy of `idx` entry in `d_column_` //(since `d_column` is const) // d_buffer = copy_and_increment(d_buffer, quote_str, len1quote); // add the quote prefix for (auto itr = d_str.begin(); itr != d_str.end(); ++itr) { char_utf8 the_chr = *itr; if (the_chr == quote_char) { d_buffer = copy_and_increment(d_buffer, str_2quotes, len2quotes); // double the quote; } else { d_buffer += from_char_utf8(the_chr, d_buffer); } } d_buffer = copy_and_increment(d_buffer, quote_str, len1quote); // add the quote suffix; } else { // copy the source string unmodified: //(pass-through) // memcpy(d_buffer, d_str.data(), str_size_bytes); } return 0; } __device__ char* get_output_ptr(size_type idx) { return d_chars_ && d_offsets_ ? d_chars_ + d_offsets_[idx] : nullptr; } private: column_device_view const d_column_; int32_t const* d_offsets_; char* d_chars_; predicate_special_chars const predicate_; }; struct column_to_strings_fn { // compile-time predicate that defines unsupported column types; // based on the conditions used for instantiations of individual // converters in strings/convert/convert_*.hpp; //(this should have been a `variable template`, // instead of a static function, but nvcc (10.0) // fails to compile var-templs); // template <typename column_type> constexpr static bool is_not_handled(void) { // Note: the case (not std::is_same<column_type, bool>::value) // is already covered by is_integral) // return not((std::is_same<column_type, cudf::string_view>::value) || (std::is_integral<column_type>::value) || (std::is_floating_point<column_type>::value) || (cudf::is_timestamp<column_type>()) || (cudf::is_duration<column_type>())); } explicit column_to_strings_fn(csv_writer_options const& options, rmm::mr::device_memory_resource* mr = nullptr, rmm::cuda_stream_view stream = nullptr) : options_(options), mr_(mr), stream_(stream) { } // Note: `null` replacement with `na_rep` deferred to `concatenate()` // instead of column-wise; might be faster // // Note: Cannot pass `stream` to detail::<fname> version of <fname> calls below, because they are // not exposed in header (see, for example, detail::concatenate(tbl_view, separator, na_rep, mr, // stream) is declared and defined in combine.cu); Possible solution: declare `extern`, or just // declare a prototype inside `namespace cudf::strings::detail`; // bools: // template <typename column_type> std::enable_if_t<std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()( column_view const& column) const { auto conv_col_ptr = cudf::strings::from_booleans( column, options_.get_true_value(), options_.get_false_value(), mr_); return conv_col_ptr; } // strings: // template <typename column_type> std::enable_if_t<std::is_same<column_type, cudf::string_view>::value, std::unique_ptr<column>> operator()(column_view const& column_v) const { using namespace cudf::strings::detail; // handle special characters: {delimiter, '\n', "} in row: // // algorithm outline: // // target = "\""; // repl = ""\"\"; // // str_column_ref = {}; // for each str_row: column_v { // if ((not null str_row) && // (str_row.find("\n") || str_row.find("\"") || str_row.find(delimiter) )) // str_column_modified = modify(str_row); // where modify() = duplicate the double quotes, if any; add 2bl quotes prefix/suffix; //} // string_scalar delimiter{std::string{options_.get_inter_column_delimiter()}, true, stream_}; predicate_special_chars pred{delimiter.value(stream_)}; return modify_strings<probe_special_chars, modify_special_chars>(column_v, stream_, mr_, pred); } // ints: // template <typename column_type> std::enable_if_t<std::is_integral<column_type>::value && !std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()(column_view const& column) const { auto conv_col_ptr = cudf::strings::from_integers(column, mr_); return conv_col_ptr; } // floats: // template <typename column_type> std::enable_if_t<std::is_floating_point<column_type>::value, std::unique_ptr<column>> operator()( column_view const& column) const { auto conv_col_ptr = cudf::strings::from_floats(column, mr_); return conv_col_ptr; } // timestamps: // template <typename column_type> std::enable_if_t<cudf::is_timestamp<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { std::string format = [&]() { if (std::is_same<cudf::timestamp_s, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%SZ"}; } else if (std::is_same<cudf::timestamp_ms, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%3fZ"}; } else if (std::is_same<cudf::timestamp_us, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%6fZ"}; } else if (std::is_same<cudf::timestamp_ns, column_type>::value) { return std::string{"%Y-%m-%dT%H:%M:%S.%9fZ"}; } else { return std::string{"%Y-%m-%d"}; } }(); // handle the cases where delimiter / line-terminator can be // "-" or ":", in which case they are to be dropped from the format: // std::string delimiter{options_.get_inter_column_delimiter()}; std::string newline{options_.get_line_terminator()}; constexpr char const* dash{"-"}; constexpr char const* colon{":"}; if (delimiter == dash || newline == dash) { format.erase(std::remove(format.begin(), format.end(), dash[0]), format.end()); } if (delimiter == colon || newline == colon) { format.erase(std::remove(format.begin(), format.end(), colon[0]), format.end()); } auto conv_col_ptr = cudf::strings::from_timestamps(column, format, mr_); return conv_col_ptr; } template <typename column_type> std::enable_if_t<cudf::is_duration<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { return cudf::io::detail::csv::pandas_format_durations(column, stream_); } // unsupported type of column: // template <typename column_type> std::enable_if_t<is_not_handled<column_type>(), std::unique_ptr<column>> operator()( column_view const& column) const { CUDF_FAIL("Unsupported column type."); } private: csv_writer_options const& options_; rmm::mr::device_memory_resource* mr_; rmm::cuda_stream_view stream_; }; } // unnamed namespace // Forward to implementation writer::writer(std::unique_ptr<data_sink> sink, csv_writer_options const& options, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sink), options, mr)) { } // Destructor within this translation unit writer::~writer() = default; writer::impl::impl(std::unique_ptr<data_sink> sink, csv_writer_options const& options, rmm::mr::device_memory_resource* mr) : out_sink_(std::move(sink)), mr_(mr), options_(options) { } // write the header: column names: // void writer::impl::write_chunked_begin(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { if ((metadata != nullptr) && (options_.is_enabled_include_header())) { CUDF_EXPECTS(metadata->column_names.size() == static_cast<size_t>(table.num_columns()), "Mismatch between number of column headers and table columns."); std::string delimiter_str{options_.get_inter_column_delimiter()}; // avoid delimiter after last element: // std::stringstream ss; std::copy(metadata->column_names.begin(), metadata->column_names.end() - 1, std::ostream_iterator<std::string>(ss, delimiter_str.c_str())); ss << metadata->column_names.back() << options_.get_line_terminator(); out_sink_->host_write(ss.str().data(), ss.str().size()); } } void writer::impl::write_chunked(strings_column_view const& str_column_view, const table_metadata* metadata, rmm::cuda_stream_view stream) { // algorithm outline: // // for_each(strings_column.begin(), strings_column.end(), // [sink = out_sink_](auto str_row) mutable { // auto host_buffer = str_row.host_buffer(); // sink->host_write(host_buffer_.data(), host_buffer_.size()); // });//or...sink->device_write(device_buffer,...); // // added line_terminator functionality // CUDF_EXPECTS(str_column_view.size() > 0, "Unexpected empty strings column."); cudf::string_scalar newline{options_.get_line_terminator()}; auto p_str_col_w_nl = cudf::strings::join_strings(str_column_view, newline); strings_column_view strings_column{std::move(p_str_col_w_nl->view())}; auto total_num_bytes = strings_column.chars_size(); char const* ptr_all_bytes = strings_column.chars().data<char>(); if (out_sink_->supports_device_write()) { // host algorithm call, but the underlying call // is a device_write taking a device buffer; // out_sink_->device_write(ptr_all_bytes, total_num_bytes, stream); out_sink_->device_write(newline.data(), newline.size(), stream); // needs newline at the end, to separate from next chunk } else { // no device write possible; // // copy the bytes to host, too: // thrust::host_vector<char> h_bytes(total_num_bytes); CUDA_TRY(cudaMemcpyAsync(h_bytes.data(), ptr_all_bytes, total_num_bytes * sizeof(char), cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); // host algorithm call, where the underlying call // is also host_write taking a host buffer; // char const* ptr_h_bytes = h_bytes.data(); out_sink_->host_write(ptr_h_bytes, total_num_bytes); out_sink_->host_write(options_.get_line_terminator().data(), options_.get_line_terminator() .size()); // needs newline at the end, to separate from next chunk } } void writer::impl::write(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { CUDF_EXPECTS(table.num_columns() > 0, "Empty table."); // write header: column names separated by delimiter: // (even for tables with no rows) // write_chunked_begin(table, metadata, stream); if (table.num_rows() > 0) { // no need to check same-size columns constraint; auto-enforced by table_view auto n_rows_per_chunk = options_.get_rows_per_chunk(); // // This outputs the CSV in row chunks to save memory. // Maybe we can use the total_rows*count calculation and a memory threshold // instead of an arbitrary chunk count. // The entire CSV chunk must fit in CPU memory before writing it out. // if (n_rows_per_chunk % 8) // must be divisible by 8 n_rows_per_chunk += 8 - (n_rows_per_chunk % 8); CUDF_EXPECTS(n_rows_per_chunk >= 8, "write_csv: invalid chunk_rows; must be at least 8"); auto exec = rmm::exec_policy(stream); auto num_rows = table.num_rows(); std::vector<table_view> vector_views; if (num_rows <= n_rows_per_chunk) { vector_views.push_back(table); } else { std::vector<size_type> splits; auto n_chunks = num_rows / n_rows_per_chunk; splits.resize(n_chunks); rmm::device_vector<size_type> d_splits(n_chunks, n_rows_per_chunk); thrust::inclusive_scan( exec->on(stream.value()), d_splits.begin(), d_splits.end(), d_splits.begin()); CUDA_TRY(cudaMemcpyAsync(splits.data(), d_splits.data().get(), n_chunks * sizeof(size_type), cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); // split table_view into chunks: // vector_views = cudf::split(table, splits); } // convert each chunk to CSV: // column_to_strings_fn converter{options_, mr_}; for (auto&& sub_view : vector_views) { // Skip if the table has no rows if (sub_view.num_rows() == 0) continue; std::vector<std::unique_ptr<column>> str_column_vec; // populate vector of string-converted columns: // std::transform(sub_view.begin(), sub_view.end(), std::back_inserter(str_column_vec), [converter](auto const& current_col) { return cudf::type_dispatcher(current_col.type(), converter, current_col); }); // create string table view from str_column_vec: // auto str_table_ptr = std::make_unique<cudf::table>(std::move(str_column_vec)); auto str_table_view = str_table_ptr->view(); // concatenate columns in each row into one big string column //(using null representation and delimiter): // std::string delimiter_str{options_.get_inter_column_delimiter()}; auto str_concat_col = cudf::strings::concatenate(str_table_view, delimiter_str, options_.get_na_rep(), mr_); write_chunked(str_concat_col->view(), metadata, stream); } } // finalize (no-op, for now, but offers a hook for future extensions): // write_chunked_end(table, metadata, stream); } void writer::write(table_view const& table, const table_metadata* metadata, rmm::cuda_stream_view stream) { _impl->write(table, metadata, stream); } } // namespace csv } // namespace detail } // namespace io } // namespace cudf
b23eb6bf2157ac678dac16b6a4aac12d5d7ae687.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> const int N = 1 << 20; __global__ void kernel(float *x, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; for(int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = sqrt(pow(3.14159,i)); } } int main(int argc, char **argv){ // seteo de numero de streams if(argc != 2){ fprintf(stderr, "run as ./prog numstreams\n"); exit(EXIT_FAILURE); } const int num_streams = atoi(argv[1]); hipStream_t streams[num_streams]; float *data[num_streams]; // creacion de streams y de datos for(int i = 0; i < num_streams; i++){ printf("creando stream %i\n", i); hipStreamCreate(&streams[i]); hipMalloc(&data[i], N * sizeof(float)); } // ejecucion de cada kernel hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); printf("ejecutando con %i streams....", num_streams); fflush(stdout); hipEventRecord(start); for (int i = 0; i < num_streams; i++) { // launch one worker kernel per stream hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, streams[i], data[i], N); hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, 0, 0); } hipDeviceSynchronize(); hipEventRecord(stop); printf("ok\n"); fflush(stdout); hipEventSynchronize(stop); float milliseconds = 0.0; hipEventElapsedTime(&milliseconds, start, stop); printf("Time GPU: %f\n", milliseconds ); hipDeviceReset(); return 0; }
b23eb6bf2157ac678dac16b6a4aac12d5d7ae687.cu
#include <cuda.h> #include <stdio.h> const int N = 1 << 20; __global__ void kernel(float *x, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; for(int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = sqrt(pow(3.14159,i)); } } int main(int argc, char **argv){ // seteo de numero de streams if(argc != 2){ fprintf(stderr, "run as ./prog numstreams\n"); exit(EXIT_FAILURE); } const int num_streams = atoi(argv[1]); cudaStream_t streams[num_streams]; float *data[num_streams]; // creacion de streams y de datos for(int i = 0; i < num_streams; i++){ printf("creando stream %i\n", i); cudaStreamCreate(&streams[i]); cudaMalloc(&data[i], N * sizeof(float)); } // ejecucion de cada kernel cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); printf("ejecutando con %i streams....", num_streams); fflush(stdout); cudaEventRecord(start); for (int i = 0; i < num_streams; i++) { // launch one worker kernel per stream kernel<<<1, 64, 0, streams[i]>>>(data[i], N); kernel<<<1, 1>>>(0, 0); } cudaDeviceSynchronize(); cudaEventRecord(stop); printf("ok\n"); fflush(stdout); cudaEventSynchronize(stop); float milliseconds = 0.0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time GPU: %f\n", milliseconds ); cudaDeviceReset(); return 0; }
51a2e3e92c7015ce5e52d29da3038eb657734593.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// Smooth Filter Parallel. /// /// Implementation of the smooth filter in CUDA using a convolutional operator, /// where the mask M is s.t. /// M_{ij} = 1 / (MASK_WIDTH^2), \forall (i, j) \in [0, MASK_WIDTH)^2 /// /// Authors: /// Lucas Oliveira David. /// Paulo Finardi. /// /// Note (in Brazilian Portuguese): /// Como nosso trabalho final e' relacionado `a redes convolucionais, /// possuindo um operador convolucao implementado em CUDA, ambos os alunos /// fizeram esta ultima tarefa juntos. /// /// /// Table 1: Speed-up /// ================= /// /// input | CPU_Serial | GPU_NOShared | GPU_Shared | Speedup (CPU/GPUSM) /// _______________________________________________________________________ /// | arq1.in | 0.172154 | 0.047532 | 0.044660 | 3,854769 /// | arq2.in | 0.371454 | 0.047155 | 0.043899 | 8,461559 /// | arq3.in | 1.533677 | 0.088398 | 0.073371 | 20,90304 /// /// /// Table 2: Reduction ratio /// ======================== /// /// n_elements_loaded = (O_TILE_WIDTH+MASK_WIDTH-1)^2 /// n_memory_accesses = OUT_TILE_WIDTH^2 threads * MASK_WIDTH^2 (pixels) /// * 3 (channels) /// = 3*(OUT_TILE_WIDTH * MASK_WIDTH)^2 /// /// Reduction ratio = n_memory_accesses / n_elements_loaded. /// = 3*(OUT_TILE_WIDTH*MASK_WIDTH)^2 /// / (OUT_TILE_WIDTH + MASK_WIDTH -1)^2 /// /// 8^2 |14^2 |15^2 |16^2 |32^2 /// ______|______|______|______|______ /// 5 | 33.33 48.00 60.75 71.70 81.12 /// 7 | 45.37 72.03 98.40 123.52 147.00 /// 9 | 46.75 75.00 103.36 130.68 156.48 /// 11 | 48.00 77.75 108.00 137.47 165.55 /// 13 | 59.26 104.24 155.52 210.72 268.17 /// /// License: MIT (c) 2016 /// #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define MASK_WIDTH 5 #define OUT_TILE_WIDTH 32 #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(PPMImage *img) { fprintf(stdout, "P6\n"); fprintf(stdout, "# %s\n", COMMENT); fprintf(stdout, "%d %d\n", img->x, img->y); fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR); fwrite(img->data, 3 * img->x, img->y, stdout); fclose(stdout); } __global__ void _k_conv(PPMPixel *image, PPMPixel *out, int lines, int columns) { // Loading strategy is "sliding window": // threads load subsequential elements from top to bottom, left to right. __shared__ PPMPixel image_s[OUT_TILE_WIDTH + MASK_WIDTH -1][OUT_TILE_WIDTH + MASK_WIDTH -1]; const int i_out = blockIdx.y*blockDim.y + threadIdx.y, j_out = blockIdx.x*blockDim.y + threadIdx.x, i0 = i_out - (MASK_WIDTH -1) / 2, j0 = j_out - (MASK_WIDTH -1) / 2; // Slide vertically... int i = 0; while (threadIdx.y + i < OUT_TILE_WIDTH + MASK_WIDTH - 1) { // Slide horizontally... int j = 0; while (threadIdx.x + j < OUT_TILE_WIDTH + MASK_WIDTH - 1) { if (0 <= i0 + i && i0 + i < lines && 0 <= j0 + j && j0 + j < columns) { image_s[threadIdx.y + i][threadIdx.x + j].red = image[(i0 + i) * columns + j0 + j].red; image_s[threadIdx.y + i][threadIdx.x + j].green = image[(i0 + i) * columns + j0 + j].green; image_s[threadIdx.y + i][threadIdx.x + j].blue = image[(i0 + i) * columns + j0 + j].blue; } else { image_s[threadIdx.y + i][threadIdx.x + j].red = 0; image_s[threadIdx.y + i][threadIdx.x + j].green = 0; image_s[threadIdx.y + i][threadIdx.x + j].blue = 0; } j += OUT_TILE_WIDTH; } i += OUT_TILE_WIDTH; } __syncthreads(); if (i_out < lines && j_out < columns) { int r, g, b; r=g=b=0; for (int i = 0; i < MASK_WIDTH; i++) for (int j = 0; j < MASK_WIDTH; j++) { r += image_s[threadIdx.y + i][threadIdx.x + j].red; g += image_s[threadIdx.y + i][threadIdx.x + j].green; b += image_s[threadIdx.y + i][threadIdx.x + j].blue; } out[i_out * columns + j_out].red = r / (MASK_WIDTH*MASK_WIDTH); out[i_out * columns + j_out].green = g / (MASK_WIDTH*MASK_WIDTH); out[i_out * columns + j_out].blue = b / (MASK_WIDTH*MASK_WIDTH); } } void smoothing_filter(PPMImage *image, PPMImage *output) { PPMPixel *d_image, *d_output; int size = image->x * image->y * sizeof(PPMPixel); hipMalloc((void **)&d_image, size); hipMalloc((void **)&d_output, size); hipMemcpy(d_image, image->data, size, hipMemcpyHostToDevice); dim3 dimGrid((image->x - 1) / OUT_TILE_WIDTH + 1, (image->y - 1) / OUT_TILE_WIDTH + 1, 1); dim3 dimBlock(OUT_TILE_WIDTH, OUT_TILE_WIDTH, 1); hipLaunchKernelGGL(( _k_conv), dim3(dimGrid), dim3(dimBlock), 0, 0, d_image, d_output, image->y, image->x); hipDeviceSynchronize(); hipMemcpy(output->data, d_output, size, hipMemcpyDeviceToHost); hipFree(d_image); hipFree(d_output); } int main(int argc, char *argv[]) { if( argc != 2 ) printf("Too many or no one arguments supplied.\n"); char *filename = argv[1]; PPMImage *image = readPPM(filename), *output = readPPM(filename); double t_start, t_end; t_start = rtclock(); smoothing_filter(image, output); t_end = rtclock(); // fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); writePPM(output); free(image); free(output); }
51a2e3e92c7015ce5e52d29da3038eb657734593.cu
/// Smooth Filter Parallel. /// /// Implementation of the smooth filter in CUDA using a convolutional operator, /// where the mask M is s.t. /// M_{ij} = 1 / (MASK_WIDTH^2), \forall (i, j) \in [0, MASK_WIDTH)^2 /// /// Authors: /// Lucas Oliveira David. /// Paulo Finardi. /// /// Note (in Brazilian Portuguese): /// Como nosso trabalho final e' relacionado `a redes convolucionais, /// possuindo um operador convolucao implementado em CUDA, ambos os alunos /// fizeram esta ultima tarefa juntos. /// /// /// Table 1: Speed-up /// ================= /// /// input | CPU_Serial | GPU_NOShared | GPU_Shared | Speedup (CPU/GPUSM) /// _______________________________________________________________________ /// | arq1.in | 0.172154 | 0.047532 | 0.044660 | 3,854769 /// | arq2.in | 0.371454 | 0.047155 | 0.043899 | 8,461559 /// | arq3.in | 1.533677 | 0.088398 | 0.073371 | 20,90304 /// /// /// Table 2: Reduction ratio /// ======================== /// /// n_elements_loaded = (O_TILE_WIDTH+MASK_WIDTH-1)^2 /// n_memory_accesses = OUT_TILE_WIDTH^2 threads * MASK_WIDTH^2 (pixels) /// * 3 (channels) /// = 3*(OUT_TILE_WIDTH * MASK_WIDTH)^2 /// /// Reduction ratio = n_memory_accesses / n_elements_loaded. /// = 3*(OUT_TILE_WIDTH*MASK_WIDTH)^2 /// / (OUT_TILE_WIDTH + MASK_WIDTH -1)^2 /// /// 8^2 |14^2 |15^2 |16^2 |32^2 /// ______|______|______|______|______ /// 5 | 33.33 48.00 60.75 71.70 81.12 /// 7 | 45.37 72.03 98.40 123.52 147.00 /// 9 | 46.75 75.00 103.36 130.68 156.48 /// 11 | 48.00 77.75 108.00 137.47 165.55 /// 13 | 59.26 104.24 155.52 210.72 268.17 /// /// License: MIT (c) 2016 /// #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define MASK_WIDTH 5 #define OUT_TILE_WIDTH 32 #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(PPMImage *img) { fprintf(stdout, "P6\n"); fprintf(stdout, "# %s\n", COMMENT); fprintf(stdout, "%d %d\n", img->x, img->y); fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR); fwrite(img->data, 3 * img->x, img->y, stdout); fclose(stdout); } __global__ void _k_conv(PPMPixel *image, PPMPixel *out, int lines, int columns) { // Loading strategy is "sliding window": // threads load subsequential elements from top to bottom, left to right. __shared__ PPMPixel image_s[OUT_TILE_WIDTH + MASK_WIDTH -1][OUT_TILE_WIDTH + MASK_WIDTH -1]; const int i_out = blockIdx.y*blockDim.y + threadIdx.y, j_out = blockIdx.x*blockDim.y + threadIdx.x, i0 = i_out - (MASK_WIDTH -1) / 2, j0 = j_out - (MASK_WIDTH -1) / 2; // Slide vertically... int i = 0; while (threadIdx.y + i < OUT_TILE_WIDTH + MASK_WIDTH - 1) { // Slide horizontally... int j = 0; while (threadIdx.x + j < OUT_TILE_WIDTH + MASK_WIDTH - 1) { if (0 <= i0 + i && i0 + i < lines && 0 <= j0 + j && j0 + j < columns) { image_s[threadIdx.y + i][threadIdx.x + j].red = image[(i0 + i) * columns + j0 + j].red; image_s[threadIdx.y + i][threadIdx.x + j].green = image[(i0 + i) * columns + j0 + j].green; image_s[threadIdx.y + i][threadIdx.x + j].blue = image[(i0 + i) * columns + j0 + j].blue; } else { image_s[threadIdx.y + i][threadIdx.x + j].red = 0; image_s[threadIdx.y + i][threadIdx.x + j].green = 0; image_s[threadIdx.y + i][threadIdx.x + j].blue = 0; } j += OUT_TILE_WIDTH; } i += OUT_TILE_WIDTH; } __syncthreads(); if (i_out < lines && j_out < columns) { int r, g, b; r=g=b=0; for (int i = 0; i < MASK_WIDTH; i++) for (int j = 0; j < MASK_WIDTH; j++) { r += image_s[threadIdx.y + i][threadIdx.x + j].red; g += image_s[threadIdx.y + i][threadIdx.x + j].green; b += image_s[threadIdx.y + i][threadIdx.x + j].blue; } out[i_out * columns + j_out].red = r / (MASK_WIDTH*MASK_WIDTH); out[i_out * columns + j_out].green = g / (MASK_WIDTH*MASK_WIDTH); out[i_out * columns + j_out].blue = b / (MASK_WIDTH*MASK_WIDTH); } } void smoothing_filter(PPMImage *image, PPMImage *output) { PPMPixel *d_image, *d_output; int size = image->x * image->y * sizeof(PPMPixel); cudaMalloc((void **)&d_image, size); cudaMalloc((void **)&d_output, size); cudaMemcpy(d_image, image->data, size, cudaMemcpyHostToDevice); dim3 dimGrid((image->x - 1) / OUT_TILE_WIDTH + 1, (image->y - 1) / OUT_TILE_WIDTH + 1, 1); dim3 dimBlock(OUT_TILE_WIDTH, OUT_TILE_WIDTH, 1); _k_conv<<<dimGrid, dimBlock>>>(d_image, d_output, image->y, image->x); cudaDeviceSynchronize(); cudaMemcpy(output->data, d_output, size, cudaMemcpyDeviceToHost); cudaFree(d_image); cudaFree(d_output); } int main(int argc, char *argv[]) { if( argc != 2 ) printf("Too many or no one arguments supplied.\n"); char *filename = argv[1]; PPMImage *image = readPPM(filename), *output = readPPM(filename); double t_start, t_end; t_start = rtclock(); smoothing_filter(image, output); t_end = rtclock(); // fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); writePPM(output); free(image); free(output); }
c1e828c8a167fe122aec9fa8dd0826874912bd33.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by Daniel Simon on 3/23/20. // #include "gtest/gtest.h" #include "cudapp/memory/allocators/managed_allocator.h" #include "cudapp_test/testing_helpers.h" template <typename T> __global__ void SaxpyKernel(T a, const T* x, const T* y, T* f, unsigned int n) { unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < n) { f[idx] = a * x[idx] + y[idx]; } } template <typename T, typename Allocator> std::vector<T, Allocator> SaxpyHost(T a, const std::vector<T, Allocator>& arr_x, const std::vector<T, Allocator>& arr_y) { assert(arr_x.size() == arr_y.size()); std::vector<T, Allocator> out(arr_x.size()); std::transform(arr_x.begin(), arr_x.end(), arr_y.begin(), out.begin(), [a](const auto& x, const auto& y)->float{ return a * x + y; }); return out; } TEST(ManagedAllocator, SaxpyVector) { unsigned int num = 32; float minimum = 0.0f; float maximum = 1024.0f; float a = cudapp::test::CreateUniformRandom(minimum, maximum); auto managed_x = cudapp::test::GenerateUniformlyRandom<float, cudapp::ManagedAllocator<float>>(num, minimum, maximum); auto managed_y = cudapp::test::GenerateUniformlyRandom<float, cudapp::ManagedAllocator<float>>(num, minimum, maximum); std::vector<float, cudapp::ManagedAllocator<float>> managed_f(num); dim3 block_size = dim3{32u, 1u, 1u}; assert(num > 0); dim3 grid_size((num - 1) / block_size.x + 1, 1u, 1u); hipLaunchKernelGGL(( SaxpyKernel), dim3(grid_size), dim3(block_size), 0, 0, a, managed_x.data(), managed_y.data(), managed_f.data(), num); EXPECT_EQ(hipDeviceSynchronize(), hipSuccess); auto expected_f = SaxpyHost(a, managed_x, managed_y); for (unsigned int i = 0; i < num; i++) { EXPECT_FLOAT_EQ(expected_f.at(i), managed_f.at(i)) << "Test failed at index: " << i; } }
c1e828c8a167fe122aec9fa8dd0826874912bd33.cu
// // Created by Daniel Simon on 3/23/20. // #include "gtest/gtest.h" #include "cudapp/memory/allocators/managed_allocator.h" #include "cudapp_test/testing_helpers.h" template <typename T> __global__ void SaxpyKernel(T a, const T* x, const T* y, T* f, unsigned int n) { unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < n) { f[idx] = a * x[idx] + y[idx]; } } template <typename T, typename Allocator> std::vector<T, Allocator> SaxpyHost(T a, const std::vector<T, Allocator>& arr_x, const std::vector<T, Allocator>& arr_y) { assert(arr_x.size() == arr_y.size()); std::vector<T, Allocator> out(arr_x.size()); std::transform(arr_x.begin(), arr_x.end(), arr_y.begin(), out.begin(), [a](const auto& x, const auto& y)->float{ return a * x + y; }); return out; } TEST(ManagedAllocator, SaxpyVector) { unsigned int num = 32; float minimum = 0.0f; float maximum = 1024.0f; float a = cudapp::test::CreateUniformRandom(minimum, maximum); auto managed_x = cudapp::test::GenerateUniformlyRandom<float, cudapp::ManagedAllocator<float>>(num, minimum, maximum); auto managed_y = cudapp::test::GenerateUniformlyRandom<float, cudapp::ManagedAllocator<float>>(num, minimum, maximum); std::vector<float, cudapp::ManagedAllocator<float>> managed_f(num); dim3 block_size = dim3{32u, 1u, 1u}; assert(num > 0); dim3 grid_size((num - 1) / block_size.x + 1, 1u, 1u); SaxpyKernel<<<grid_size, block_size>>>(a, managed_x.data(), managed_y.data(), managed_f.data(), num); EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess); auto expected_f = SaxpyHost(a, managed_x, managed_y); for (unsigned int i = 0; i < num; i++) { EXPECT_FLOAT_EQ(expected_f.at(i), managed_f.at(i)) << "Test failed at index: " << i; } }
44d87523226717087de1f3fe78afbea4accdf976.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2020 by Contributors * \file array/cpu/array_cumsum.cu * \brief Array cumsum GPU implementation */ #include <dgl/array.h> #include <hipcub/hipcub.hpp> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { template <DLDeviceType XPU, typename IdType> IdArray CumSum(IdArray array, bool prepend_zero) { const int64_t len = array.NumElements(); if (len == 0) return array; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(array->ctx); const IdType* in_d = array.Ptr<IdType>(); IdArray ret; IdType* out_d = nullptr; if (prepend_zero) { ret = aten::Full(0, len + 1, array->dtype.bits, array->ctx); out_d = ret.Ptr<IdType>() + 1; } else { ret = aten::NewIdArray(len, array->ctx, array->dtype.bits); out_d = ret.Ptr<IdType>(); } // Allocate workspace size_t workspace_size = 0; hipcub::DeviceScan::InclusiveSum(nullptr, workspace_size, in_d, out_d, len, thr_entry->stream); void* workspace = device->AllocWorkspace(array->ctx, workspace_size); // Compute cumsum hipcub::DeviceScan::InclusiveSum(workspace, workspace_size, in_d, out_d, len, thr_entry->stream); device->FreeWorkspace(array->ctx, workspace); return ret; } template IdArray CumSum<kDLGPU, int32_t>(IdArray, bool); template IdArray CumSum<kDLGPU, int64_t>(IdArray, bool); } // namespace impl } // namespace aten } // namespace dgl
44d87523226717087de1f3fe78afbea4accdf976.cu
/*! * Copyright (c) 2020 by Contributors * \file array/cpu/array_cumsum.cu * \brief Array cumsum GPU implementation */ #include <dgl/array.h> #include <cub/cub.cuh> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { template <DLDeviceType XPU, typename IdType> IdArray CumSum(IdArray array, bool prepend_zero) { const int64_t len = array.NumElements(); if (len == 0) return array; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(array->ctx); const IdType* in_d = array.Ptr<IdType>(); IdArray ret; IdType* out_d = nullptr; if (prepend_zero) { ret = aten::Full(0, len + 1, array->dtype.bits, array->ctx); out_d = ret.Ptr<IdType>() + 1; } else { ret = aten::NewIdArray(len, array->ctx, array->dtype.bits); out_d = ret.Ptr<IdType>(); } // Allocate workspace size_t workspace_size = 0; cub::DeviceScan::InclusiveSum(nullptr, workspace_size, in_d, out_d, len, thr_entry->stream); void* workspace = device->AllocWorkspace(array->ctx, workspace_size); // Compute cumsum cub::DeviceScan::InclusiveSum(workspace, workspace_size, in_d, out_d, len, thr_entry->stream); device->FreeWorkspace(array->ctx, workspace); return ret; } template IdArray CumSum<kDLGPU, int32_t>(IdArray, bool); template IdArray CumSum<kDLGPU, int64_t>(IdArray, bool); } // namespace impl } // namespace aten } // namespace dgl
13a698c9a826e3683b828c12108d08c8646c7e5b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel2_z [4][2]; static int dims_advec_mom_kernel2_z_h [4][2] = {0}; //user function __device__ inline void advec_mom_kernel2_z_gpu(ACC<double> &vel1, const ACC<double> &node_mass_post, const ACC<double> &node_mass_pre, const ACC<double> &mom_flux) { vel1(0,0,0) = ( vel1(0,0,0) * node_mass_pre(0,0,0) + mom_flux(0,0,-1) - mom_flux(0,0,0) ) / node_mass_post(0,0,0); } __global__ void ops_advec_mom_kernel2_z( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[0][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[0][0] * dims_advec_mom_kernel2_z[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[1][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[1][0] * dims_advec_mom_kernel2_z[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[2][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[2][0] * dims_advec_mom_kernel2_z[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[3][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[3][0] * dims_advec_mom_kernel2_z[3][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_advec_mom_kernel2_z[0][0], dims_advec_mom_kernel2_z[0][1], arg0); const ACC<double> argp1(dims_advec_mom_kernel2_z[1][0], dims_advec_mom_kernel2_z[1][1], arg1); const ACC<double> argp2(dims_advec_mom_kernel2_z[2][0], dims_advec_mom_kernel2_z[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel2_z[3][0], dims_advec_mom_kernel2_z[3][1], arg3); advec_mom_kernel2_z_gpu(argp0, argp1, argp2, argp3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel2_z_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,138)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(138,"advec_mom_kernel2_z"); OPS_kernels[138].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != dims_advec_mom_kernel2_z_h[0][0] || ydim0 != dims_advec_mom_kernel2_z_h[0][1] || xdim1 != dims_advec_mom_kernel2_z_h[1][0] || ydim1 != dims_advec_mom_kernel2_z_h[1][1] || xdim2 != dims_advec_mom_kernel2_z_h[2][0] || ydim2 != dims_advec_mom_kernel2_z_h[2][1] || xdim3 != dims_advec_mom_kernel2_z_h[3][0] || ydim3 != dims_advec_mom_kernel2_z_h[3][1]) { dims_advec_mom_kernel2_z_h[0][0] = xdim0; dims_advec_mom_kernel2_z_h[0][1] = ydim0; dims_advec_mom_kernel2_z_h[1][0] = xdim1; dims_advec_mom_kernel2_z_h[1][1] = ydim1; dims_advec_mom_kernel2_z_h[2][0] = xdim2; dims_advec_mom_kernel2_z_h[2][1] = ydim2; dims_advec_mom_kernel2_z_h[3][0] = xdim3; dims_advec_mom_kernel2_z_h[3][1] = ydim3; cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel2_z, dims_advec_mom_kernel2_z_h, sizeof(dims_advec_mom_kernel2_z))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[138].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel2_z), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[138].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[138].mpi_time += t2-t1; OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 138; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 138; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel2_z_execute; if (OPS_diags > 1) { ops_timing_realloc(138,"advec_mom_kernel2_z"); } ops_enqueue_kernel(desc); } #endif
13a698c9a826e3683b828c12108d08c8646c7e5b.cu
// // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel2_z [4][2]; static int dims_advec_mom_kernel2_z_h [4][2] = {0}; //user function __device__ inline void advec_mom_kernel2_z_gpu(ACC<double> &vel1, const ACC<double> &node_mass_post, const ACC<double> &node_mass_pre, const ACC<double> &mom_flux) { vel1(0,0,0) = ( vel1(0,0,0) * node_mass_pre(0,0,0) + mom_flux(0,0,-1) - mom_flux(0,0,0) ) / node_mass_post(0,0,0); } __global__ void ops_advec_mom_kernel2_z( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[0][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[0][0] * dims_advec_mom_kernel2_z[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[1][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[1][0] * dims_advec_mom_kernel2_z[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[2][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[2][0] * dims_advec_mom_kernel2_z[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[3][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[3][0] * dims_advec_mom_kernel2_z[3][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_advec_mom_kernel2_z[0][0], dims_advec_mom_kernel2_z[0][1], arg0); const ACC<double> argp1(dims_advec_mom_kernel2_z[1][0], dims_advec_mom_kernel2_z[1][1], arg1); const ACC<double> argp2(dims_advec_mom_kernel2_z[2][0], dims_advec_mom_kernel2_z[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel2_z[3][0], dims_advec_mom_kernel2_z[3][1], arg3); advec_mom_kernel2_z_gpu(argp0, argp1, argp2, argp3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel2_z_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,138)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(138,"advec_mom_kernel2_z"); OPS_kernels[138].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != dims_advec_mom_kernel2_z_h[0][0] || ydim0 != dims_advec_mom_kernel2_z_h[0][1] || xdim1 != dims_advec_mom_kernel2_z_h[1][0] || ydim1 != dims_advec_mom_kernel2_z_h[1][1] || xdim2 != dims_advec_mom_kernel2_z_h[2][0] || ydim2 != dims_advec_mom_kernel2_z_h[2][1] || xdim3 != dims_advec_mom_kernel2_z_h[3][0] || ydim3 != dims_advec_mom_kernel2_z_h[3][1]) { dims_advec_mom_kernel2_z_h[0][0] = xdim0; dims_advec_mom_kernel2_z_h[0][1] = ydim0; dims_advec_mom_kernel2_z_h[1][0] = xdim1; dims_advec_mom_kernel2_z_h[1][1] = ydim1; dims_advec_mom_kernel2_z_h[2][0] = xdim2; dims_advec_mom_kernel2_z_h[2][1] = ydim2; dims_advec_mom_kernel2_z_h[3][0] = xdim3; dims_advec_mom_kernel2_z_h[3][1] = ydim3; cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel2_z, dims_advec_mom_kernel2_z_h, sizeof(dims_advec_mom_kernel2_z))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[138].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel2_z<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[138].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[138].mpi_time += t2-t1; OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 138; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 138; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel2_z_execute; if (OPS_diags > 1) { ops_timing_realloc(138,"advec_mom_kernel2_z"); } ops_enqueue_kernel(desc); } #endif
1092ec09462affb18d2cab07a0354c7447dd41c5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <rmm/rmm_api.h> #include <rmm/thrust_rmm_allocator.h> namespace cudf { // Trivially copy all members but the children column_device_view::column_device_view(column_view source) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.null_count(), source.offset()}, _num_children{source.num_children()} {} // Free device memory allocated for children void column_device_view::destroy() { RMM_FREE(d_children,0); delete this; } // Place any child objects in host memory (h_ptr) and use the device // memory ptr (d_ptr) to set any child object pointers. column_device_view::column_device_view( column_view source, ptrdiff_t h_ptr, ptrdiff_t d_ptr ) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.null_count(), source.offset()}, _num_children{source.num_children()} { size_type num_children = source.num_children(); if( num_children > 0 ) { // The beginning of the memory must be the fixed-sized column_device_view // struct objects in order for d_children to be used as an array. // Therefore, any child data is assigned past the end of this array. auto h_column = reinterpret_cast<column_device_view*>(h_ptr); auto d_column = reinterpret_cast<column_device_view*>(d_ptr); auto h_end = reinterpret_cast<int8_t*>(h_column + num_children); auto d_end = reinterpret_cast<int8_t*>(d_column + num_children); d_children = d_column; // set member ptr to device memory for( size_type idx=0; idx < _num_children; ++idx ) { // inplace-new each child into host memory column_view child = source.child(idx); new(h_column) column_device_view(child,reinterpret_cast<ptrdiff_t>(h_end),reinterpret_cast<ptrdiff_t>(d_end)); h_column++; // adv to next child // update the pointers for holding this child column's child data auto col_child_data_size = extent(child) - sizeof(child); h_end += col_child_data_size; d_end += col_child_data_size; } } } // Construct a unique_ptr that invokes `destroy()` as it's deleter std::unique_ptr<column_device_view, std::function<void(column_device_view*)>> column_device_view::create(column_view source, hipStream_t stream) { auto deleter = [](column_device_view* v) { v->destroy(); }; size_type num_children = source.num_children(); if( num_children == 0 ) { std::unique_ptr<column_device_view, decltype(deleter)> p{ new column_device_view(source), deleter}; return p; } // First calculate the size of memory needed to hold the // child columns. This is done by calling extent() // for each of the children. size_type size_bytes = 0; for( size_type idx=0; idx < num_children; ++idx ) size_bytes += extent(source.child(idx)); // A buffer of CPU memory is allocated to hold the column_device_view // objects. Once filled, the CPU memory is copied to device memory // and then set into the d_children member pointer. std::vector<int8_t> h_buffer(size_bytes); auto h_start = h_buffer.data(); // Each column_device_view instance may have child objects that // require setting some internal device pointers before being copied // from CPU to device. int8_t* d_start; RMM_TRY(RMM_ALLOC(&d_start, size_bytes, stream)); std::unique_ptr<column_device_view, decltype(deleter)> p{ new column_device_view(source,reinterpret_cast<ptrdiff_t>(h_start),reinterpret_cast<ptrdiff_t>(d_start)), deleter}; // copy the CPU memory with all the children into device memory CUDA_TRY(hipMemcpyAsync(d_start, h_start, size_bytes, hipMemcpyHostToDevice, stream)); p->_num_children = num_children; p->d_children = reinterpret_cast<column_device_view*>(d_start); CUDA_TRY(hipStreamSynchronize(stream)); return p; } size_type column_device_view::extent(column_view source) { size_type data_size = sizeof(column_device_view); for( size_type idx=0; idx < source.num_children(); ++idx ) data_size += extent(source.child(idx)); return data_size; } // For use with inplace-new to pre-fill memory to be copied to device mutable_column_device_view::mutable_column_device_view( mutable_column_view source ) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.null_count(), source.offset()} { // TODO children may not be actually possible for mutable columns } mutable_column_device_view::mutable_column_device_view( mutable_column_view source, ptrdiff_t h_ptr, ptrdiff_t d_ptr ) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.null_count(), source.offset()} { // TODO children may not be actually possible for mutable columns } // Handle freeing children void mutable_column_device_view::destroy() { RMM_FREE(mutable_children,0); delete this; } // Construct a unique_ptr that invokes `destroy()` as it's deleter std::unique_ptr<mutable_column_device_view, std::function<void(mutable_column_device_view*)>> mutable_column_device_view::create(mutable_column_view source, hipStream_t stream) { // TODO children may not be actually possible for mutable columns auto deleter = [](mutable_column_device_view* v) { v->destroy(); }; std::unique_ptr<mutable_column_device_view, decltype(deleter)> p{ new mutable_column_device_view(source), deleter}; return p; } size_type mutable_column_device_view::extent(mutable_column_view source) { size_type data_size = sizeof(mutable_column_device_view); for( size_type idx=0; idx < source.num_children(); ++idx ) data_size += extent(source.child(idx)); return data_size; } } // namespace cudf
1092ec09462affb18d2cab07a0354c7447dd41c5.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <rmm/rmm_api.h> #include <rmm/thrust_rmm_allocator.h> namespace cudf { // Trivially copy all members but the children column_device_view::column_device_view(column_view source) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.null_count(), source.offset()}, _num_children{source.num_children()} {} // Free device memory allocated for children void column_device_view::destroy() { RMM_FREE(d_children,0); delete this; } // Place any child objects in host memory (h_ptr) and use the device // memory ptr (d_ptr) to set any child object pointers. column_device_view::column_device_view( column_view source, ptrdiff_t h_ptr, ptrdiff_t d_ptr ) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.null_count(), source.offset()}, _num_children{source.num_children()} { size_type num_children = source.num_children(); if( num_children > 0 ) { // The beginning of the memory must be the fixed-sized column_device_view // struct objects in order for d_children to be used as an array. // Therefore, any child data is assigned past the end of this array. auto h_column = reinterpret_cast<column_device_view*>(h_ptr); auto d_column = reinterpret_cast<column_device_view*>(d_ptr); auto h_end = reinterpret_cast<int8_t*>(h_column + num_children); auto d_end = reinterpret_cast<int8_t*>(d_column + num_children); d_children = d_column; // set member ptr to device memory for( size_type idx=0; idx < _num_children; ++idx ) { // inplace-new each child into host memory column_view child = source.child(idx); new(h_column) column_device_view(child,reinterpret_cast<ptrdiff_t>(h_end),reinterpret_cast<ptrdiff_t>(d_end)); h_column++; // adv to next child // update the pointers for holding this child column's child data auto col_child_data_size = extent(child) - sizeof(child); h_end += col_child_data_size; d_end += col_child_data_size; } } } // Construct a unique_ptr that invokes `destroy()` as it's deleter std::unique_ptr<column_device_view, std::function<void(column_device_view*)>> column_device_view::create(column_view source, cudaStream_t stream) { auto deleter = [](column_device_view* v) { v->destroy(); }; size_type num_children = source.num_children(); if( num_children == 0 ) { std::unique_ptr<column_device_view, decltype(deleter)> p{ new column_device_view(source), deleter}; return p; } // First calculate the size of memory needed to hold the // child columns. This is done by calling extent() // for each of the children. size_type size_bytes = 0; for( size_type idx=0; idx < num_children; ++idx ) size_bytes += extent(source.child(idx)); // A buffer of CPU memory is allocated to hold the column_device_view // objects. Once filled, the CPU memory is copied to device memory // and then set into the d_children member pointer. std::vector<int8_t> h_buffer(size_bytes); auto h_start = h_buffer.data(); // Each column_device_view instance may have child objects that // require setting some internal device pointers before being copied // from CPU to device. int8_t* d_start; RMM_TRY(RMM_ALLOC(&d_start, size_bytes, stream)); std::unique_ptr<column_device_view, decltype(deleter)> p{ new column_device_view(source,reinterpret_cast<ptrdiff_t>(h_start),reinterpret_cast<ptrdiff_t>(d_start)), deleter}; // copy the CPU memory with all the children into device memory CUDA_TRY(cudaMemcpyAsync(d_start, h_start, size_bytes, cudaMemcpyHostToDevice, stream)); p->_num_children = num_children; p->d_children = reinterpret_cast<column_device_view*>(d_start); CUDA_TRY(cudaStreamSynchronize(stream)); return p; } size_type column_device_view::extent(column_view source) { size_type data_size = sizeof(column_device_view); for( size_type idx=0; idx < source.num_children(); ++idx ) data_size += extent(source.child(idx)); return data_size; } // For use with inplace-new to pre-fill memory to be copied to device mutable_column_device_view::mutable_column_device_view( mutable_column_view source ) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.null_count(), source.offset()} { // TODO children may not be actually possible for mutable columns } mutable_column_device_view::mutable_column_device_view( mutable_column_view source, ptrdiff_t h_ptr, ptrdiff_t d_ptr ) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.null_count(), source.offset()} { // TODO children may not be actually possible for mutable columns } // Handle freeing children void mutable_column_device_view::destroy() { RMM_FREE(mutable_children,0); delete this; } // Construct a unique_ptr that invokes `destroy()` as it's deleter std::unique_ptr<mutable_column_device_view, std::function<void(mutable_column_device_view*)>> mutable_column_device_view::create(mutable_column_view source, cudaStream_t stream) { // TODO children may not be actually possible for mutable columns auto deleter = [](mutable_column_device_view* v) { v->destroy(); }; std::unique_ptr<mutable_column_device_view, decltype(deleter)> p{ new mutable_column_device_view(source), deleter}; return p; } size_type mutable_column_device_view::extent(mutable_column_view source) { size_type data_size = sizeof(mutable_column_device_view); for( size_type idx=0; idx < source.num_children(); ++idx ) data_size += extent(source.child(idx)); return data_size; } } // namespace cudf
a28fdc10eafbca611d9c6d0d26e285bd376f6dcd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/RReLU.hip" #else #include <THHUNN/common.h> #include <ATen/CUDAGenerator.h> void THNN_(RReLU_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *noise, double lower, double upper, bool train, bool inplace, void *generator) { THCUNN_assertSameGPU(state, 3, input, output, noise); auto gen = at::cuda::detail::getDefaultCUDAGenerator(); if (train) { input = THCTensor_(newContiguous)(state, input); THCTensor_(resizeAs)(state, noise, input); scalar_t *input_data = THCTensor_(data)(state, input); scalar_t *noise_data = THCTensor_(data)(state, noise); ptrdiff_t n = THCTensor_(nElement)(state, input); // philox offset calculation for grid-stride loop utilizing hiprand4 const uint32_t curand4_engine_calls = 4; dim3 grid = NUM_BLOCKS(n); uint64_t counter_offset = ((n - 1) / (BLOCK_SIZE * grid.x) + 1) * curand4_engine_calls; std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(counter_offset); } if (inplace) { hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(grid), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), n, rng_engine_inputs, input_data, noise_data, input_data, lower, upper); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); scalar_t *output_data = THCTensor_(data)(state, output); hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(grid), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), n, rng_engine_inputs, input_data, noise_data, output_data, lower, upper); } THCudaCheck(hipGetLastError()); THCTensor_(free)(state, input); } else { const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2); if (inplace) { THC_pointwiseApply1<scalar_t>(state, input, RReLUUpdateOutputEvalIP_functor<scalar_t>(negSlope)); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, RReLUUpdateOutputEval_functor<scalar_t>(negSlope)); } } } void THNN_(RReLU_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *noise, double lower, double upper, bool train, bool inplace) { THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise); gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU { // multiply the gradient by the noise tensor if (inplace) { THCTensor_(cmul)(state, gradOutput, gradOutput, noise); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(cmul)(state, gradInput, gradOutput, noise); } } else { // use constant factor for negative input values const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2); if (inplace) { THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor<scalar_t>(negSlope)); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor<scalar_t>(negSlope)); } } THCTensor_(free)(state, gradOutput); } #endif
a28fdc10eafbca611d9c6d0d26e285bd376f6dcd.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/RReLU.cu" #else #include <THCUNN/common.h> #include <ATen/CUDAGenerator.h> void THNN_(RReLU_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *noise, double lower, double upper, bool train, bool inplace, void *generator) { THCUNN_assertSameGPU(state, 3, input, output, noise); auto gen = at::cuda::detail::getDefaultCUDAGenerator(); if (train) { input = THCTensor_(newContiguous)(state, input); THCTensor_(resizeAs)(state, noise, input); scalar_t *input_data = THCTensor_(data)(state, input); scalar_t *noise_data = THCTensor_(data)(state, noise); ptrdiff_t n = THCTensor_(nElement)(state, input); // philox offset calculation for grid-stride loop utilizing curand4 const uint32_t curand4_engine_calls = 4; dim3 grid = NUM_BLOCKS(n); uint64_t counter_offset = ((n - 1) / (BLOCK_SIZE * grid.x) + 1) * curand4_engine_calls; std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(counter_offset); } if (inplace) { rreluUpdateOutputTrain<<<grid, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( n, rng_engine_inputs, input_data, noise_data, input_data, lower, upper); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); scalar_t *output_data = THCTensor_(data)(state, output); rreluUpdateOutputTrain<<<grid, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( n, rng_engine_inputs, input_data, noise_data, output_data, lower, upper); } THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, input); } else { const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2); if (inplace) { THC_pointwiseApply1<scalar_t>(state, input, RReLUUpdateOutputEvalIP_functor<scalar_t>(negSlope)); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, RReLUUpdateOutputEval_functor<scalar_t>(negSlope)); } } } void THNN_(RReLU_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *noise, double lower, double upper, bool train, bool inplace) { THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise); gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU { // multiply the gradient by the noise tensor if (inplace) { THCTensor_(cmul)(state, gradOutput, gradOutput, noise); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(cmul)(state, gradInput, gradOutput, noise); } } else { // use constant factor for negative input values const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2); if (inplace) { THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor<scalar_t>(negSlope)); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor<scalar_t>(negSlope)); } } THCTensor_(free)(state, gradOutput); } #endif
82706463bbc5b0294704d21890917cc0fd90029d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file alg5.cu * @brief CUDA device code for GPU-Efficient Recursive Filtering Algorithm 5 * @author Rodolfo Lima * @date September, 2011 */ //== INCLUDES ================================================================= #include <cmath> #include <cstdio> #include <cfloat> #include <cassert> #include <iostream> #include <algorithm> #include <symbol.h> #include <dvector.h> #include <extension.h> #include <gpufilter.h> #include <gpuconsts.cuh> #include <alg5.cuh> //== NAMESPACES =============================================================== namespace gpufilter { //== IMPLEMENTATION =========================================================== //-- Algorithm 5_1 Stage 1 ---------------------------------------------------- __global__ __launch_bounds__(WS*DW, ONB) void alg5_stage1( float *g_transp_pybar, float *g_transp_ezhat, float *g_ptucheck, float *g_etvtilde ) { int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y; // Each cuda block will work on two horizontally adjacent WSxWS // input data blocks, so allocate enough shared memory for these. __shared__ float s_block[WS*2][WS+1]; float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx], (*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx]; // Load data into shared memory float tu = ((m-c_border)*WS+tx+.5f)*c_inv_width, tv = ((n-c_border)*WS+ty+.5f)*c_inv_height; int i; #pragma unroll for (i=0; i<WS-(WS%DW); i+=DW) { **bdata = tex2D(t_in, tu, tv); bdata += DW; **bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv); bdata2 += DW; tv += DW*c_inv_height; } if (ty < WS%DW) { **bdata = tex2D(t_in, tu, tv); **bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv); } m += ty; // We use a transposed matrix for pybar and ezhat to have // coalesced memory accesses. This is the index for these // transposed buffers. g_transp_pybar += m*c_carry_height + n*WS + tx; g_transp_ezhat += m*c_carry_height + n*WS + tx; g_ptucheck += n*c_carry_width + m*WS + tx; g_etvtilde += n*c_carry_width + m*WS + tx; __syncthreads(); if (m >= c_m_size) return; float prev; if (ty < 2) { { // scan rows float *bdata = s_block[tx+ty*WS]; // calculate pybar, scan left -> right prev = *bdata++; #pragma unroll for (int j=1; j<WS; ++j, ++bdata) prev = *bdata -= prev*c_a1; *g_transp_pybar = prev*c_b0; // calculate ezhat, scan right -> left prev = *--bdata; --bdata; #pragma unroll for (int j=WS-2; j>=0; --j, --bdata) prev = *bdata -= prev*c_a1; *g_transp_ezhat = prev*(c_b0*c_b0); } { // scan columns // ty*WS makes this warp's bdata point to the right data float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty*WS][tx]; // calculate ptucheck, scan top -> down prev = **bdata++; #pragma unroll for (int i=1; i<WS; ++i, ++bdata) prev = **bdata -= prev*c_a1; *g_ptucheck = prev*c_b0*c_b0*c_b0; // calculate etvtilde, scan bottom -> up if (n > 0) { prev = **--bdata; --bdata; #pragma unroll for (int i=WS-2; i>=0; --i, --bdata) prev = **bdata - prev*c_a1; *g_etvtilde = prev*c_b0*c_b0*c_b0*c_b0; } } } } //-- Algorithm 5_1 Stage 2 and 3 ---------------------------------------------- __global__ __launch_bounds__(WS*DW, DNB) void alg5_stage2_3( float *g_transp_pybar, float *g_transp_ezhat ) { int tx = threadIdx.x, ty = threadIdx.y, n = blockIdx.y; __shared__ float s_transp_block[DW][WS]; float *bdata = &s_transp_block[ty][tx]; // P(ybar) -> P(y) processing -------------------------------------- if (c_m_size<=1) return; float *transp_pybar = g_transp_pybar + ty*c_carry_height + n*WS+tx; // first column-block // read P(ybar) *bdata = *transp_pybar; float py; // P(Y) __syncthreads(); if (ty == 0) { float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; // (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y) py = **bdata++; #pragma unroll for (int m=1; m<blockDim.y; ++m, ++bdata) **bdata = py = **bdata + c_AbF*py; } __syncthreads(); // write P(y) if (ty > 0) // first one doesn't need fixing *transp_pybar = *bdata; transp_pybar += c_carry_height*blockDim.y; // middle column-blocks int m = blockDim.y; if (m == DW) { int mmax = c_m_size-(c_m_size%DW)-1; for (; m<mmax; m+=DW) { *bdata = *transp_pybar; __syncthreads(); if (ty == 0) { float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; #pragma unroll for (int dm=0; dm<DW; ++dm, ++bdata) **bdata = py = **bdata + c_AbF*py; } __syncthreads(); *transp_pybar = *bdata; transp_pybar += c_carry_height*DW; } } // remaining column-blocks if (m < c_m_size-1) { if (m+ty < c_m_size-1) *bdata = *transp_pybar; int remaining = c_m_size-1 - m; __syncthreads(); if (ty == 0) { float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; #pragma unroll for (int dm=0; dm<remaining; ++dm, ++bdata) **bdata = py = **bdata + c_AbF*py; } __syncthreads(); if (m+ty < c_m_size-1) *transp_pybar = *bdata; } // E(zhat) -> E(z) processing -------------------------------------- int idx = (c_m_size-1-ty)*c_carry_height + n*WS+tx; const float *transp_pm1y = g_transp_pybar + idx - c_carry_height; // last column-block float *transp_ezhat = g_transp_ezhat + idx; m = c_m_size-1; // all pybars must be updated! __syncthreads(); float ez; { *bdata = *transp_ezhat; if (m-ty > 0) *bdata += *transp_pm1y*c_HARB_AFP; __syncthreads(); if (ty == 0) { float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; ez = **bdata++; for (int dm=1; dm<blockDim.y; ++dm, ++bdata) **bdata = ez = **bdata + c_AbR*ez; } __syncthreads(); *transp_ezhat = *bdata; } transp_ezhat -= c_carry_height*blockDim.y; transp_pm1y -= c_carry_height*blockDim.y; // middle column-blocks m = c_m_size-1 - blockDim.y; if (blockDim.y == DW) { int mmin = c_m_size%DW; for (; m>=mmin; m-=DW) { if (m > 0) { *bdata = *transp_ezhat; if (m-ty > 0) *bdata += *transp_pm1y*c_HARB_AFP; __syncthreads(); if (ty == 0) { float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; #pragma unroll for (int dm=0; dm<DW; ++dm, ++bdata) **bdata = ez = **bdata + c_AbR*ez; } __syncthreads(); *transp_ezhat = *bdata; } transp_ezhat -= DW*c_carry_height; transp_pm1y -= DW*c_carry_height; } } // remaining column-blocks (except first column-block, it isn't needed) if (m > 0) { if (m-ty > 0) { *bdata = *transp_ezhat; if (m-ty > 0) *bdata += *transp_pm1y*c_HARB_AFP; } __syncthreads(); if (ty == 0) { int remaining = m; float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; // (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y) #pragma unroll for (int dm=0; dm<remaining; ++dm, ++bdata) **bdata = ez = **bdata + c_AbR*ez; } __syncthreads(); if (m-ty > 0) *transp_ezhat = *bdata; } } //-- Algorithm 5_1 Stage 4 and 5 ---------------------------------------------- __global__ __launch_bounds__(WS*CHW, ONB) void alg5_stage4_5( float *g_ptucheck, float *g_etvtilde, const float *g_transp_py, const float *g_transp_ez ) { int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x; __shared__ float s_block[CHW][WS]; float *bdata = &s_block[ty][tx]; // P(ucheck) -> P(u) processing -------------------------------------- volatile __shared__ float s_block_RD_raw[CHW][WS/2+WS+1]; volatile float (*block_RD)[WS/2+WS+1] = (float (*)[WS/2+WS+1]) &s_block_RD_raw[0][WS/2]; if (ty < CHW) s_block_RD_raw[ty][tx] = 0; #define CALC_DOT(RES, V1, V2, last) \ block_RD[ty][tx] = V1*V2; \ block_RD[ty][tx] += block_RD[ty][tx-1]; \ block_RD[ty][tx] += block_RD[ty][tx-2]; \ block_RD[ty][tx] += block_RD[ty][tx-4]; \ block_RD[ty][tx] += block_RD[ty][tx-8]; \ block_RD[ty][tx] += block_RD[ty][tx-16]; \ float RES = block_RD[ty][last]; { float *ptucheck = g_ptucheck + m*WS+tx + ty*c_carry_width; // first row-block int idx = m*c_carry_height + ty*WS+tx; const float *transp_pm1y = g_transp_py + idx - c_carry_height, *transp_em1z = g_transp_ez + idx + c_carry_height; float ptu; if (ty < c_n_size-1) { // read P(ucheck) *bdata = *ptucheck; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_TAFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_TAFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } transp_pm1y += WS*blockDim.y; transp_em1z += WS*blockDim.y; __syncthreads(); if (ty == 0) { float (*bdata2)[WS] = (float (*)[WS]) bdata; ptu = **bdata2++; #pragma unroll for (int n=1; n<blockDim.y; ++n, ++bdata2) **bdata2 = ptu = **bdata2 + c_AbF*ptu; } __syncthreads(); // write P(u) *ptucheck = *bdata; } ptucheck += blockDim.y*c_carry_width; // middle row-blocks int n = blockDim.y; if (n == CHW) { int nmax = c_n_size-(c_n_size%CHW); for (; n<nmax; n+=CHW) { if (n < c_n_size-1) { *bdata = *ptucheck; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_TAFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_TAFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } transp_pm1y += WS*CHW; transp_em1z += WS*CHW; __syncthreads(); if (ty == 0) { float (*bdata2)[WS] = (float (*)[WS]) bdata; #pragma unroll for (int dn=0; dn<CHW; ++dn, ++bdata2) **bdata2 = ptu = **bdata2 + c_AbF*ptu; } __syncthreads(); *ptucheck = *bdata; } ptucheck += CHW*c_carry_width; } } // remaining row-blocks if (n < c_n_size-1) { if (n+ty < c_n_size-1) { *bdata = *ptucheck; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_TAFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_TAFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } } __syncthreads(); if (ty == 0) { int remaining = c_n_size-1-n; float (*bdata2)[WS] = (float (*)[WS]) bdata; #pragma unroll for (int dn=0; dn<remaining; ++dn, ++bdata2) **bdata2 = ptu = **bdata2 + c_AbF*ptu; } __syncthreads(); if (n+ty < c_n_size-1) *ptucheck = *bdata; } } // E(utilde) -> E(u) processing -------------------------------------- // last row-block int idx = (c_n_size-1-ty)*c_carry_width + m*WS+tx; float *etvtilde = g_etvtilde + idx; const float *ptmn1u = g_ptucheck + idx - c_carry_width; int transp_idx = m*c_carry_height + (c_n_size-1-ty)*WS+tx; const float *transp_pm1y = g_transp_py + transp_idx-c_carry_height; const float *transp_em1z = g_transp_ez + transp_idx+c_carry_height; // all ptuchecks must be updated! __syncthreads(); float etv; int n = c_n_size-1 - ty; { *bdata = *etvtilde; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } if (n > 0) *bdata += *ptmn1u*c_HARB_AFP; transp_pm1y -= WS*blockDim.y; transp_em1z -= WS*blockDim.y; ptmn1u -= c_carry_width*blockDim.y; __syncthreads(); if (ty == 0) { float (*bdata2)[WS] = (float (*)[WS]) bdata; etv = **bdata2++; #pragma unroll for (int dn=1; dn<blockDim.y; ++dn, ++bdata2) **bdata2 = etv = **bdata2 + c_AbR*etv; } __syncthreads(); *etvtilde = *bdata; etvtilde -= c_carry_width*blockDim.y; n -= blockDim.y; } // middle row-blocks if (blockDim.y == CHW) { int nmin = c_n_size%CHW; for (; n>=nmin; n-=CHW) { *bdata = *etvtilde; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } if (n > 0) *bdata += *ptmn1u*c_HARB_AFP; transp_pm1y -= WS*CHW; transp_em1z -= WS*CHW; ptmn1u -= CHW*c_carry_width; __syncthreads(); if (ty == 0) { float (*bdata2)[WS] = (float (*)[WS]) bdata; #pragma unroll for (int dn=0; dn<CHW; ++dn, ++bdata2) **bdata2 = etv = **bdata2 + c_AbR*etv; } __syncthreads(); *etvtilde = *bdata; etvtilde -= CHW*c_carry_width; } } // remaining row-blocks if (n+ty >= 0) { if (n > 0) { *bdata = *etvtilde + *ptmn1u*c_HARB_AFP; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } } __syncthreads(); if (ty == 0) { int remaining = n+1; float (*bdata2)[WS] = (float (*)[WS]) bdata; #pragma unroll for (int dn=0; dn<remaining; ++dn, ++bdata2) **bdata2 = etv = **bdata2 + c_AbR*etv; } __syncthreads(); if (n > 0) *etvtilde = *bdata; } #undef CALC_DOT } //-- Algorithm 5_1 Stage 6 ---------------------------------------------------- __global__ __launch_bounds__(WS*DW, CHB) void alg5_stage6( float *g_out, const float *g_transp_py, const float *g_transp_ez, const float *g_ptu, const float *g_etv ) { int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y; __shared__ float s_block[2*WS][WS+1]; __shared__ float s_py[2][WS], s_ez[2][WS], s_ptu[2][WS], s_etv[2][WS]; float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx], (*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx]; bool inside = m+1 >= c_border && m <= c_last_m && n >= c_border && n <= c_last_n; if (inside) { { // load data into shared memory float tu = ((m-c_border)*WS+tx + 0.5f)*c_inv_width, tv = ((n-c_border)*WS+ty + 0.5f)*c_inv_height; #pragma unroll for (int i=0; i<WS-(WS%DW); i+=DW) { **bdata = tex2D(t_in, tu, tv); bdata += DW; **bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv); bdata2 += DW; tv += DW*c_inv_height; } if (ty < WS%DW) { **bdata = tex2D(t_in, tu, tv); **bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv); } } if (ty < 2) { m += ty; if (m >= c_border && m <= c_last_m) { if (m > 0) s_py[ty][tx] = g_transp_py[(n*WS + tx) + (m-1)*c_carry_height] * c_inv_b0; else s_py[ty][tx] = 0; } } else if (ty < 4) { m += ty-2; if (m >= c_border && m <= c_last_m) { if (m < c_m_size-1) s_ez[ty-2][tx] = g_transp_ez[(n*WS + tx) + (m+1)*c_carry_height]; else s_ez[ty-2][tx] = 0; } } else if (ty < 6) { m += ty-4; if (m >= c_border && m <= c_last_m) { if (n > 0) s_ptu[ty-4][tx] = g_ptu[(m*WS + tx) + (n-1)*c_carry_width] * c_inv_b0; else s_ptu[ty-4][tx] = 0; } } else if (ty < 8) { m += ty-6; if (m >= c_border && m <= c_last_m) { if (n < c_n_size-1) s_etv[ty-6][tx] = g_etv[(m*WS + tx) + (n+1)*c_carry_width]; else s_etv[ty-6][tx] = 0; } } } __syncthreads(); if (!inside || m < c_border || m > c_last_m) return; if (ty < 2) { const float b0_2 = c_b0*c_b0; // scan rows { float *bdata = s_block[tx+ty*WS]; // calculate y --------------------- float prev = s_py[ty][tx]; #pragma unroll for (int j=0; j<WS; ++j, ++bdata) *bdata = prev = *bdata - prev*c_a1; // calculate z --------------------- prev = s_ez[ty][tx]; --bdata; #pragma unroll for (int j=WS-1; j>=0; --j, --bdata) *bdata = prev = *bdata*b0_2 - prev*c_a1; } // scan columns { float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty*WS][tx]; // calculate u --------------------- float prev = s_ptu[ty][tx]; #pragma unroll for (int i=0; i<WS; ++i, ++bdata) **bdata = prev = **bdata - prev*c_a1; // calculate v --------------------- int x = (m-c_border)*WS+tx; if (x >= c_width) return; prev = s_etv[ty][tx]; --bdata; int y = (n-c_border+1)*WS-1; if (y >= c_height) { int i; #pragma unroll for (i=y; i>=c_height; --i) prev = **bdata-- *b0_2 - prev*c_a1; float *out = g_out + (c_height-1)*c_width + x; #pragma unroll for (;i>=(n-c_border)*WS; --i) { *out = prev = **bdata-- *b0_2 - prev*c_a1; out -= c_width; } } else { float *out = g_out + y*c_width + x; #pragma unroll for (int i=WS-1; i>=0; --i) { *out = prev = **bdata-- *b0_2 - prev*c_a1; out -= c_width; } } } } } //-- Host --------------------------------------------------------------------- __host__ void prepare_alg5( alg_setup& algs, dvector<float>& d_out, dvector<float>& d_transp_pybar, dvector<float>& d_transp_ezhat, dvector<float>& d_ptucheck, dvector<float>& d_etvtilde, hipArray *& a_in, const float *h_in, const int& w, const int& h, const float& b0, const float& a1, const int& extb, const initcond& ic ) { up_constants_coefficients1( b0, a1 ); d_out.resize( w * h ); calc_alg_setup( algs, w, h, extb ); up_alg_setup( algs ); d_transp_pybar.resize( algs.m_size * algs.carry_height ); d_transp_ezhat.resize( algs.m_size * algs.carry_height ); d_ptucheck.resize( algs.n_size * algs.carry_width ); d_etvtilde.resize( algs.n_size * algs.carry_width ); d_transp_pybar.fill_zero(); d_transp_ezhat.fill_zero(); d_ptucheck.fill_zero(); d_etvtilde.fill_zero(); up_texture( a_in, h_in, w, h, ic ); } __host__ void alg5( dvector<float>& d_out, dvector<float>& d_transp_pybar, dvector<float>& d_transp_ezhat, dvector<float>& d_ptucheck, dvector<float>& d_etvtilde, const hipArray *a_in, const alg_setup& algs ) { dvector<float> d_transp_py, d_transp_ez, d_ptu, d_etv; hipBindTextureToArray( t_in, a_in ); hipLaunchKernelGGL(( alg5_stage1), dim3(dim3((algs.m_size+2-1)/2, algs.n_size)), dim3(dim3(WS, DW)) , 0, 0, d_transp_pybar, d_transp_ezhat, d_ptucheck, d_etvtilde ); hipLaunchKernelGGL(( alg5_stage2_3), dim3(dim3(1, algs.n_size)), dim3(dim3(WS, std::min<int>(algs.m_size, DW))) , 0, 0, d_transp_pybar, d_transp_ezhat ); swap(d_transp_pybar, d_transp_py); swap(d_transp_ezhat, d_transp_ez); hipLaunchKernelGGL(( alg5_stage4_5), dim3(dim3(algs.m_size, 1)), dim3(dim3(WS, std::min<int>(algs.n_size, CHW))) , 0, 0, d_ptucheck, d_etvtilde, d_transp_py, d_transp_ez ); swap(d_ptucheck, d_ptu); swap(d_etvtilde, d_etv); hipLaunchKernelGGL(( alg5_stage6), dim3(dim3((algs.m_size+2-1)/2, algs.n_size)), dim3(dim3(WS, DW)) , 0, 0, d_out, d_transp_py, d_transp_ez, d_ptu, d_etv ); swap(d_etv, d_etvtilde); swap(d_ptu, d_ptucheck); swap(d_transp_ez, d_transp_ezhat); swap(d_transp_py, d_transp_pybar); hipUnbindTexture( t_in ); } __host__ void alg5( float *h_inout, const int& w, const int& h, const float& b0, const float& a1, const int& extb, const initcond& ic ) { alg_setup algs; dvector<float> d_out; dvector<float> d_transp_pybar, d_transp_ezhat, d_ptucheck, d_etvtilde; hipArray *a_in; prepare_alg5( algs, d_out, d_transp_pybar, d_transp_ezhat, d_ptucheck, d_etvtilde, a_in, h_inout, w, h, b0, a1, extb, ic ); alg5( d_out, d_transp_pybar, d_transp_ezhat, d_ptucheck, d_etvtilde, a_in, algs ); d_out.copy_to( h_inout, w * h ); hipFreeArray( a_in ); } //============================================================================= } // namespace gpufilter //============================================================================= // vi: ai ts=4 sw=4
82706463bbc5b0294704d21890917cc0fd90029d.cu
/** * @file alg5.cu * @brief CUDA device code for GPU-Efficient Recursive Filtering Algorithm 5 * @author Rodolfo Lima * @date September, 2011 */ //== INCLUDES ================================================================= #include <cmath> #include <cstdio> #include <cfloat> #include <cassert> #include <iostream> #include <algorithm> #include <symbol.h> #include <dvector.h> #include <extension.h> #include <gpufilter.h> #include <gpuconsts.cuh> #include <alg5.cuh> //== NAMESPACES =============================================================== namespace gpufilter { //== IMPLEMENTATION =========================================================== //-- Algorithm 5_1 Stage 1 ---------------------------------------------------- __global__ __launch_bounds__(WS*DW, ONB) void alg5_stage1( float *g_transp_pybar, float *g_transp_ezhat, float *g_ptucheck, float *g_etvtilde ) { int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y; // Each cuda block will work on two horizontally adjacent WSxWS // input data blocks, so allocate enough shared memory for these. __shared__ float s_block[WS*2][WS+1]; float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx], (*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx]; // Load data into shared memory float tu = ((m-c_border)*WS+tx+.5f)*c_inv_width, tv = ((n-c_border)*WS+ty+.5f)*c_inv_height; int i; #pragma unroll for (i=0; i<WS-(WS%DW); i+=DW) { **bdata = tex2D(t_in, tu, tv); bdata += DW; **bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv); bdata2 += DW; tv += DW*c_inv_height; } if (ty < WS%DW) { **bdata = tex2D(t_in, tu, tv); **bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv); } m += ty; // We use a transposed matrix for pybar and ezhat to have // coalesced memory accesses. This is the index for these // transposed buffers. g_transp_pybar += m*c_carry_height + n*WS + tx; g_transp_ezhat += m*c_carry_height + n*WS + tx; g_ptucheck += n*c_carry_width + m*WS + tx; g_etvtilde += n*c_carry_width + m*WS + tx; __syncthreads(); if (m >= c_m_size) return; float prev; if (ty < 2) { { // scan rows float *bdata = s_block[tx+ty*WS]; // calculate pybar, scan left -> right prev = *bdata++; #pragma unroll for (int j=1; j<WS; ++j, ++bdata) prev = *bdata -= prev*c_a1; *g_transp_pybar = prev*c_b0; // calculate ezhat, scan right -> left prev = *--bdata; --bdata; #pragma unroll for (int j=WS-2; j>=0; --j, --bdata) prev = *bdata -= prev*c_a1; *g_transp_ezhat = prev*(c_b0*c_b0); } { // scan columns // ty*WS makes this warp's bdata point to the right data float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty*WS][tx]; // calculate ptucheck, scan top -> down prev = **bdata++; #pragma unroll for (int i=1; i<WS; ++i, ++bdata) prev = **bdata -= prev*c_a1; *g_ptucheck = prev*c_b0*c_b0*c_b0; // calculate etvtilde, scan bottom -> up if (n > 0) { prev = **--bdata; --bdata; #pragma unroll for (int i=WS-2; i>=0; --i, --bdata) prev = **bdata - prev*c_a1; *g_etvtilde = prev*c_b0*c_b0*c_b0*c_b0; } } } } //-- Algorithm 5_1 Stage 2 and 3 ---------------------------------------------- __global__ __launch_bounds__(WS*DW, DNB) void alg5_stage2_3( float *g_transp_pybar, float *g_transp_ezhat ) { int tx = threadIdx.x, ty = threadIdx.y, n = blockIdx.y; __shared__ float s_transp_block[DW][WS]; float *bdata = &s_transp_block[ty][tx]; // P(ybar) -> P(y) processing -------------------------------------- if (c_m_size<=1) return; float *transp_pybar = g_transp_pybar + ty*c_carry_height + n*WS+tx; // first column-block // read P(ybar) *bdata = *transp_pybar; float py; // P(Y) __syncthreads(); if (ty == 0) { float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; // (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y) py = **bdata++; #pragma unroll for (int m=1; m<blockDim.y; ++m, ++bdata) **bdata = py = **bdata + c_AbF*py; } __syncthreads(); // write P(y) if (ty > 0) // first one doesn't need fixing *transp_pybar = *bdata; transp_pybar += c_carry_height*blockDim.y; // middle column-blocks int m = blockDim.y; if (m == DW) { int mmax = c_m_size-(c_m_size%DW)-1; for (; m<mmax; m+=DW) { *bdata = *transp_pybar; __syncthreads(); if (ty == 0) { float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; #pragma unroll for (int dm=0; dm<DW; ++dm, ++bdata) **bdata = py = **bdata + c_AbF*py; } __syncthreads(); *transp_pybar = *bdata; transp_pybar += c_carry_height*DW; } } // remaining column-blocks if (m < c_m_size-1) { if (m+ty < c_m_size-1) *bdata = *transp_pybar; int remaining = c_m_size-1 - m; __syncthreads(); if (ty == 0) { float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; #pragma unroll for (int dm=0; dm<remaining; ++dm, ++bdata) **bdata = py = **bdata + c_AbF*py; } __syncthreads(); if (m+ty < c_m_size-1) *transp_pybar = *bdata; } // E(zhat) -> E(z) processing -------------------------------------- int idx = (c_m_size-1-ty)*c_carry_height + n*WS+tx; const float *transp_pm1y = g_transp_pybar + idx - c_carry_height; // last column-block float *transp_ezhat = g_transp_ezhat + idx; m = c_m_size-1; // all pybars must be updated! __syncthreads(); float ez; { *bdata = *transp_ezhat; if (m-ty > 0) *bdata += *transp_pm1y*c_HARB_AFP; __syncthreads(); if (ty == 0) { float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; ez = **bdata++; for (int dm=1; dm<blockDim.y; ++dm, ++bdata) **bdata = ez = **bdata + c_AbR*ez; } __syncthreads(); *transp_ezhat = *bdata; } transp_ezhat -= c_carry_height*blockDim.y; transp_pm1y -= c_carry_height*blockDim.y; // middle column-blocks m = c_m_size-1 - blockDim.y; if (blockDim.y == DW) { int mmin = c_m_size%DW; for (; m>=mmin; m-=DW) { if (m > 0) { *bdata = *transp_ezhat; if (m-ty > 0) *bdata += *transp_pm1y*c_HARB_AFP; __syncthreads(); if (ty == 0) { float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; #pragma unroll for (int dm=0; dm<DW; ++dm, ++bdata) **bdata = ez = **bdata + c_AbR*ez; } __syncthreads(); *transp_ezhat = *bdata; } transp_ezhat -= DW*c_carry_height; transp_pm1y -= DW*c_carry_height; } } // remaining column-blocks (except first column-block, it isn't needed) if (m > 0) { if (m-ty > 0) { *bdata = *transp_ezhat; if (m-ty > 0) *bdata += *transp_pm1y*c_HARB_AFP; } __syncthreads(); if (ty == 0) { int remaining = m; float (*bdata)[WS] = (float (*)[WS]) &s_transp_block[0][tx]; // (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y) #pragma unroll for (int dm=0; dm<remaining; ++dm, ++bdata) **bdata = ez = **bdata + c_AbR*ez; } __syncthreads(); if (m-ty > 0) *transp_ezhat = *bdata; } } //-- Algorithm 5_1 Stage 4 and 5 ---------------------------------------------- __global__ __launch_bounds__(WS*CHW, ONB) void alg5_stage4_5( float *g_ptucheck, float *g_etvtilde, const float *g_transp_py, const float *g_transp_ez ) { int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x; __shared__ float s_block[CHW][WS]; float *bdata = &s_block[ty][tx]; // P(ucheck) -> P(u) processing -------------------------------------- volatile __shared__ float s_block_RD_raw[CHW][WS/2+WS+1]; volatile float (*block_RD)[WS/2+WS+1] = (float (*)[WS/2+WS+1]) &s_block_RD_raw[0][WS/2]; if (ty < CHW) s_block_RD_raw[ty][tx] = 0; #define CALC_DOT(RES, V1, V2, last) \ block_RD[ty][tx] = V1*V2; \ block_RD[ty][tx] += block_RD[ty][tx-1]; \ block_RD[ty][tx] += block_RD[ty][tx-2]; \ block_RD[ty][tx] += block_RD[ty][tx-4]; \ block_RD[ty][tx] += block_RD[ty][tx-8]; \ block_RD[ty][tx] += block_RD[ty][tx-16]; \ float RES = block_RD[ty][last]; { float *ptucheck = g_ptucheck + m*WS+tx + ty*c_carry_width; // first row-block int idx = m*c_carry_height + ty*WS+tx; const float *transp_pm1y = g_transp_py + idx - c_carry_height, *transp_em1z = g_transp_ez + idx + c_carry_height; float ptu; if (ty < c_n_size-1) { // read P(ucheck) *bdata = *ptucheck; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_TAFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_TAFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } transp_pm1y += WS*blockDim.y; transp_em1z += WS*blockDim.y; __syncthreads(); if (ty == 0) { float (*bdata2)[WS] = (float (*)[WS]) bdata; ptu = **bdata2++; #pragma unroll for (int n=1; n<blockDim.y; ++n, ++bdata2) **bdata2 = ptu = **bdata2 + c_AbF*ptu; } __syncthreads(); // write P(u) *ptucheck = *bdata; } ptucheck += blockDim.y*c_carry_width; // middle row-blocks int n = blockDim.y; if (n == CHW) { int nmax = c_n_size-(c_n_size%CHW); for (; n<nmax; n+=CHW) { if (n < c_n_size-1) { *bdata = *ptucheck; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_TAFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_TAFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } transp_pm1y += WS*CHW; transp_em1z += WS*CHW; __syncthreads(); if (ty == 0) { float (*bdata2)[WS] = (float (*)[WS]) bdata; #pragma unroll for (int dn=0; dn<CHW; ++dn, ++bdata2) **bdata2 = ptu = **bdata2 + c_AbF*ptu; } __syncthreads(); *ptucheck = *bdata; } ptucheck += CHW*c_carry_width; } } // remaining row-blocks if (n < c_n_size-1) { if (n+ty < c_n_size-1) { *bdata = *ptucheck; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_TAFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_TAFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } } __syncthreads(); if (ty == 0) { int remaining = c_n_size-1-n; float (*bdata2)[WS] = (float (*)[WS]) bdata; #pragma unroll for (int dn=0; dn<remaining; ++dn, ++bdata2) **bdata2 = ptu = **bdata2 + c_AbF*ptu; } __syncthreads(); if (n+ty < c_n_size-1) *ptucheck = *bdata; } } // E(utilde) -> E(u) processing -------------------------------------- // last row-block int idx = (c_n_size-1-ty)*c_carry_width + m*WS+tx; float *etvtilde = g_etvtilde + idx; const float *ptmn1u = g_ptucheck + idx - c_carry_width; int transp_idx = m*c_carry_height + (c_n_size-1-ty)*WS+tx; const float *transp_pm1y = g_transp_py + transp_idx-c_carry_height; const float *transp_em1z = g_transp_ez + transp_idx+c_carry_height; // all ptuchecks must be updated! __syncthreads(); float etv; int n = c_n_size-1 - ty; { *bdata = *etvtilde; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } if (n > 0) *bdata += *ptmn1u*c_HARB_AFP; transp_pm1y -= WS*blockDim.y; transp_em1z -= WS*blockDim.y; ptmn1u -= c_carry_width*blockDim.y; __syncthreads(); if (ty == 0) { float (*bdata2)[WS] = (float (*)[WS]) bdata; etv = **bdata2++; #pragma unroll for (int dn=1; dn<blockDim.y; ++dn, ++bdata2) **bdata2 = etv = **bdata2 + c_AbR*etv; } __syncthreads(); *etvtilde = *bdata; etvtilde -= c_carry_width*blockDim.y; n -= blockDim.y; } // middle row-blocks if (blockDim.y == CHW) { int nmin = c_n_size%CHW; for (; n>=nmin; n-=CHW) { *bdata = *etvtilde; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } if (n > 0) *bdata += *ptmn1u*c_HARB_AFP; transp_pm1y -= WS*CHW; transp_em1z -= WS*CHW; ptmn1u -= CHW*c_carry_width; __syncthreads(); if (ty == 0) { float (*bdata2)[WS] = (float (*)[WS]) bdata; #pragma unroll for (int dn=0; dn<CHW; ++dn, ++bdata2) **bdata2 = etv = **bdata2 + c_AbR*etv; } __syncthreads(); *etvtilde = *bdata; etvtilde -= CHW*c_carry_width; } } // remaining row-blocks if (n+ty >= 0) { if (n > 0) { *bdata = *etvtilde + *ptmn1u*c_HARB_AFP; if (m < c_m_size-1) { CALC_DOT(dot, *transp_em1z, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARE[tx]; } if (m > 0) { CALC_DOT(dot, *transp_pm1y, c_HARB_AFB[tx], WS-1); *bdata += dot*c_ARB_AFP_T[tx]; } } __syncthreads(); if (ty == 0) { int remaining = n+1; float (*bdata2)[WS] = (float (*)[WS]) bdata; #pragma unroll for (int dn=0; dn<remaining; ++dn, ++bdata2) **bdata2 = etv = **bdata2 + c_AbR*etv; } __syncthreads(); if (n > 0) *etvtilde = *bdata; } #undef CALC_DOT } //-- Algorithm 5_1 Stage 6 ---------------------------------------------------- __global__ __launch_bounds__(WS*DW, CHB) void alg5_stage6( float *g_out, const float *g_transp_py, const float *g_transp_ez, const float *g_ptu, const float *g_etv ) { int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y; __shared__ float s_block[2*WS][WS+1]; __shared__ float s_py[2][WS], s_ez[2][WS], s_ptu[2][WS], s_etv[2][WS]; float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx], (*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx]; bool inside = m+1 >= c_border && m <= c_last_m && n >= c_border && n <= c_last_n; if (inside) { { // load data into shared memory float tu = ((m-c_border)*WS+tx + 0.5f)*c_inv_width, tv = ((n-c_border)*WS+ty + 0.5f)*c_inv_height; #pragma unroll for (int i=0; i<WS-(WS%DW); i+=DW) { **bdata = tex2D(t_in, tu, tv); bdata += DW; **bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv); bdata2 += DW; tv += DW*c_inv_height; } if (ty < WS%DW) { **bdata = tex2D(t_in, tu, tv); **bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv); } } if (ty < 2) { m += ty; if (m >= c_border && m <= c_last_m) { if (m > 0) s_py[ty][tx] = g_transp_py[(n*WS + tx) + (m-1)*c_carry_height] * c_inv_b0; else s_py[ty][tx] = 0; } } else if (ty < 4) { m += ty-2; if (m >= c_border && m <= c_last_m) { if (m < c_m_size-1) s_ez[ty-2][tx] = g_transp_ez[(n*WS + tx) + (m+1)*c_carry_height]; else s_ez[ty-2][tx] = 0; } } else if (ty < 6) { m += ty-4; if (m >= c_border && m <= c_last_m) { if (n > 0) s_ptu[ty-4][tx] = g_ptu[(m*WS + tx) + (n-1)*c_carry_width] * c_inv_b0; else s_ptu[ty-4][tx] = 0; } } else if (ty < 8) { m += ty-6; if (m >= c_border && m <= c_last_m) { if (n < c_n_size-1) s_etv[ty-6][tx] = g_etv[(m*WS + tx) + (n+1)*c_carry_width]; else s_etv[ty-6][tx] = 0; } } } __syncthreads(); if (!inside || m < c_border || m > c_last_m) return; if (ty < 2) { const float b0_2 = c_b0*c_b0; // scan rows { float *bdata = s_block[tx+ty*WS]; // calculate y --------------------- float prev = s_py[ty][tx]; #pragma unroll for (int j=0; j<WS; ++j, ++bdata) *bdata = prev = *bdata - prev*c_a1; // calculate z --------------------- prev = s_ez[ty][tx]; --bdata; #pragma unroll for (int j=WS-1; j>=0; --j, --bdata) *bdata = prev = *bdata*b0_2 - prev*c_a1; } // scan columns { float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty*WS][tx]; // calculate u --------------------- float prev = s_ptu[ty][tx]; #pragma unroll for (int i=0; i<WS; ++i, ++bdata) **bdata = prev = **bdata - prev*c_a1; // calculate v --------------------- int x = (m-c_border)*WS+tx; if (x >= c_width) return; prev = s_etv[ty][tx]; --bdata; int y = (n-c_border+1)*WS-1; if (y >= c_height) { int i; #pragma unroll for (i=y; i>=c_height; --i) prev = **bdata-- *b0_2 - prev*c_a1; float *out = g_out + (c_height-1)*c_width + x; #pragma unroll for (;i>=(n-c_border)*WS; --i) { *out = prev = **bdata-- *b0_2 - prev*c_a1; out -= c_width; } } else { float *out = g_out + y*c_width + x; #pragma unroll for (int i=WS-1; i>=0; --i) { *out = prev = **bdata-- *b0_2 - prev*c_a1; out -= c_width; } } } } } //-- Host --------------------------------------------------------------------- __host__ void prepare_alg5( alg_setup& algs, dvector<float>& d_out, dvector<float>& d_transp_pybar, dvector<float>& d_transp_ezhat, dvector<float>& d_ptucheck, dvector<float>& d_etvtilde, cudaArray *& a_in, const float *h_in, const int& w, const int& h, const float& b0, const float& a1, const int& extb, const initcond& ic ) { up_constants_coefficients1( b0, a1 ); d_out.resize( w * h ); calc_alg_setup( algs, w, h, extb ); up_alg_setup( algs ); d_transp_pybar.resize( algs.m_size * algs.carry_height ); d_transp_ezhat.resize( algs.m_size * algs.carry_height ); d_ptucheck.resize( algs.n_size * algs.carry_width ); d_etvtilde.resize( algs.n_size * algs.carry_width ); d_transp_pybar.fill_zero(); d_transp_ezhat.fill_zero(); d_ptucheck.fill_zero(); d_etvtilde.fill_zero(); up_texture( a_in, h_in, w, h, ic ); } __host__ void alg5( dvector<float>& d_out, dvector<float>& d_transp_pybar, dvector<float>& d_transp_ezhat, dvector<float>& d_ptucheck, dvector<float>& d_etvtilde, const cudaArray *a_in, const alg_setup& algs ) { dvector<float> d_transp_py, d_transp_ez, d_ptu, d_etv; cudaBindTextureToArray( t_in, a_in ); alg5_stage1<<< dim3((algs.m_size+2-1)/2, algs.n_size), dim3(WS, DW) >>>( d_transp_pybar, d_transp_ezhat, d_ptucheck, d_etvtilde ); alg5_stage2_3<<< dim3(1, algs.n_size), dim3(WS, std::min<int>(algs.m_size, DW)) >>>( d_transp_pybar, d_transp_ezhat ); swap(d_transp_pybar, d_transp_py); swap(d_transp_ezhat, d_transp_ez); alg5_stage4_5<<< dim3(algs.m_size, 1), dim3(WS, std::min<int>(algs.n_size, CHW)) >>>( d_ptucheck, d_etvtilde, d_transp_py, d_transp_ez ); swap(d_ptucheck, d_ptu); swap(d_etvtilde, d_etv); alg5_stage6<<< dim3((algs.m_size+2-1)/2, algs.n_size), dim3(WS, DW) >>>( d_out, d_transp_py, d_transp_ez, d_ptu, d_etv ); swap(d_etv, d_etvtilde); swap(d_ptu, d_ptucheck); swap(d_transp_ez, d_transp_ezhat); swap(d_transp_py, d_transp_pybar); cudaUnbindTexture( t_in ); } __host__ void alg5( float *h_inout, const int& w, const int& h, const float& b0, const float& a1, const int& extb, const initcond& ic ) { alg_setup algs; dvector<float> d_out; dvector<float> d_transp_pybar, d_transp_ezhat, d_ptucheck, d_etvtilde; cudaArray *a_in; prepare_alg5( algs, d_out, d_transp_pybar, d_transp_ezhat, d_ptucheck, d_etvtilde, a_in, h_inout, w, h, b0, a1, extb, ic ); alg5( d_out, d_transp_pybar, d_transp_ezhat, d_ptucheck, d_etvtilde, a_in, algs ); d_out.copy_to( h_inout, w * h ); cudaFreeArray( a_in ); } //============================================================================= } // namespace gpufilter //============================================================================= // vi: ai ts=4 sw=4
c42127eecc5e4c2c332f4e1b95246c161f1609ed.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include <hip/hip_runtime.h> const int blockCount = 60; const int threadsPerBlock = 256; const int radius = 3; const int arraySize = blockCount * threadsPerBlock; const int arraySizeWithHalos = arraySize + 2 * radius; texture<int, hipTextureType1D, hipReadModeElementType> texRef; __global__ void Stencil_1d(int* out) { __shared__ int temp[threadsPerBlock + 2 * radius]; int gindex = threadIdx.x + blockIdx.x * blockDim.x + radius; int lindex = threadIdx.x + radius; temp[lindex] = tex1D(texRef, gindex); if (threadIdx.x < radius) { temp[lindex - radius] = tex1D(texRef, gindex - radius); temp[lindex + threadsPerBlock] = tex1D(texRef, gindex + threadsPerBlock); } // Synchronize (ensure all the data is available) __syncthreads(); // Apply the stencil int result = 0; for (int offset = -radius; offset <= radius; offset++) result += temp[lindex + offset]; // Store the result out[gindex - radius] = result; } int main() { int h_in[arraySizeWithHalos]; // add halos in the main input array too for simplicity in the kernel code int h_out[arraySize]; for (int i = 0; i < arraySize; i++) { h_in[radius + i] = i + 1; h_out[i] = 0; } for (int i = 0; i < radius; i++) { h_in[i] = h_in[arraySizeWithHalos - i - 1] = 0; } int* d_out; hipMalloc(&d_out, arraySize * sizeof(int)); hipMemcpy(d_out, h_out, arraySize * sizeof(int), hipMemcpyHostToDevice); // Bind the device input array to the texture reference hipArray_t d_in; hipChannelFormatDesc channel = hipCreateChannelDesc<int>(); hipMallocArray(&d_in, &channel, arraySizeWithHalos, 1, hipArrayDefault); hipMemcpyToArray(d_in, 0, 0, h_in, arraySizeWithHalos * sizeof(int), hipMemcpyHostToDevice); hipBindTextureToArray(texRef, d_in, channel); hipLaunchKernelGGL(( Stencil_1d) , dim3(blockCount), dim3(threadsPerBlock) , 0, 0, d_out); hipMemcpy(h_out, d_out, arraySize * sizeof(int), hipMemcpyDeviceToHost); hipFree(d_out); printf("Radius: %d\n", radius); printf("Input:\t\t|\t\tOutput:\n"); for (int i = 0; i < arraySize; i++) printf("%d\t\t|\t\t%d\n", h_in[i + radius], h_out[i]); return 0; }
c42127eecc5e4c2c332f4e1b95246c161f1609ed.cu
#include "stdio.h" #include <cuda_runtime.h> const int blockCount = 60; const int threadsPerBlock = 256; const int radius = 3; const int arraySize = blockCount * threadsPerBlock; const int arraySizeWithHalos = arraySize + 2 * radius; texture<int, cudaTextureType1D, cudaReadModeElementType> texRef; __global__ void Stencil_1d(int* out) { __shared__ int temp[threadsPerBlock + 2 * radius]; int gindex = threadIdx.x + blockIdx.x * blockDim.x + radius; int lindex = threadIdx.x + radius; temp[lindex] = tex1D(texRef, gindex); if (threadIdx.x < radius) { temp[lindex - radius] = tex1D(texRef, gindex - radius); temp[lindex + threadsPerBlock] = tex1D(texRef, gindex + threadsPerBlock); } // Synchronize (ensure all the data is available) __syncthreads(); // Apply the stencil int result = 0; for (int offset = -radius; offset <= radius; offset++) result += temp[lindex + offset]; // Store the result out[gindex - radius] = result; } int main() { int h_in[arraySizeWithHalos]; // add halos in the main input array too for simplicity in the kernel code int h_out[arraySize]; for (int i = 0; i < arraySize; i++) { h_in[radius + i] = i + 1; h_out[i] = 0; } for (int i = 0; i < radius; i++) { h_in[i] = h_in[arraySizeWithHalos - i - 1] = 0; } int* d_out; cudaMalloc(&d_out, arraySize * sizeof(int)); cudaMemcpy(d_out, h_out, arraySize * sizeof(int), cudaMemcpyHostToDevice); // Bind the device input array to the texture reference cudaArray_t d_in; cudaChannelFormatDesc channel = cudaCreateChannelDesc<int>(); cudaMallocArray(&d_in, &channel, arraySizeWithHalos, 1, cudaArrayDefault); cudaMemcpyToArray(d_in, 0, 0, h_in, arraySizeWithHalos * sizeof(int), cudaMemcpyHostToDevice); cudaBindTextureToArray(texRef, d_in, channel); Stencil_1d <<<blockCount, threadsPerBlock >>> (d_out); cudaMemcpy(h_out, d_out, arraySize * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_out); printf("Radius: %d\n", radius); printf("Input:\t\t|\t\tOutput:\n"); for (int i = 0; i < arraySize; i++) printf("%d\t\t|\t\t%d\n", h_in[i + radius], h_out[i]); return 0; }
a3527ab76875267d1b537c7939e3922b50f6c8b7.hip
// !!! This is a file automatically generated by hipify!!! // // This reduction code worked most of the time, but failed on memorial_large.exr // produced : // min is -3.109206, max is 2.350199, range is 5.459405 // should be : // min is -3.122315, max is 2.350199, range is 5.472514 // debugged how it failed - error due to running min and max reduction in a loop, // but not re-initializing num_elem_in, when running second loop. // num_elem_in had been shrinking. // Took a lot of debugging code, but realized issue was with the input when // I switched the order of the min max reductions and got the wrong answer for // the other operation. #include <hip/hip_runtime.h> #include "utils.h" #define MAX_THREADS_PER_BLOCK 32 //1024 //? __global__ void reduce_min_max_kernel(float* d_out, float* d_in, int array_len, bool use_min) /* Does a reduction of d_in into one final d_out array, where d_out is an array with length of the number of blocks in the kernel. Then only expectation is that blocks and threads are one dimensional. \Params: * array_len - length of d_in array * use_min - boolean to use minimum reduction operator if true, else use maximium. */ { // Set up shared memory extern __shared__ float input_array[]; int global_idx = blockIdx.x*blockDim.x + threadIdx.x; int th_idx = threadIdx.x; // If this thread index takes us outside of the array, fill it with // the first value of the actual global array. if (global_idx >= array_len) input_array[th_idx] = d_in[0]; else input_array[th_idx] = d_in[global_idx]; __syncthreads(); // syncs up all threads within the block. // Do reduction in shared memory. All elements in input_array are // filled (some with duplicate values from first element of global // input array which wont effect final result). for (int neighbor = 1; neighbor<=blockDim.x/2; neighbor *=2){ int skip = 2 * neighbor; if ((th_idx % skip) == 0) { if ((th_idx + neighbor) < blockDim.x) { if (use_min) input_array[th_idx] = min(input_array[th_idx], input_array[th_idx + neighbor]); else input_array[th_idx] = max(input_array[th_idx], input_array[th_idx + neighbor]); } } __syncthreads(); } // only thread 0 writes result for this block to d_out: if (th_idx == 0) { d_out[blockIdx.x] = input_array[0]; } } void reduce_min_max(const float* const d_input_array, unsigned num_elem_in, float& minimum, float& maximum) /* Split up array into blocks of MAX_THREADS_PER_BLOCK length each, and reduce (find extrema) of each block, writing the output of the block to a new d_out array. Then the new d_out array becomes the input array to perform reduction on, until the length of the d_out array is 1 element and extremum is found. */ { // We can't change original array, so copy it here on device so that // we can modify it: float * d_in; const unsigned int num_elem_store = num_elem_in; for (int min_or_max = 0; min_or_max < 2; min_or_max++) { num_elem_in = num_elem_store; checkCudaErrors(hipMalloc((void**)&d_in, num_elem_in * sizeof(float))); checkCudaErrors(hipMemcpy(d_in, d_input_array, num_elem_in * sizeof(float), hipMemcpyDeviceToDevice)); int nthreads = MAX_THREADS_PER_BLOCK; int num_elem_out = (num_elem_in - 1) / MAX_THREADS_PER_BLOCK + 1; int nblocks = num_elem_out; while (true) { float * d_out; checkCudaErrors(hipMalloc((void**)&d_out, num_elem_out * sizeof(float))); reduce_min_max_kernel << <nblocks, nthreads, nthreads * sizeof(float) >> > (d_out, d_in, num_elem_in, (bool)min_or_max); checkCudaErrors(hipFree(d_in)); if (num_elem_out <= 1) { //check this // Copy output to h_out float* h_out = (float*)malloc(sizeof(float)); if (min_or_max) { checkCudaErrors(hipMemcpy(h_out, d_out, sizeof(float), hipMemcpyDeviceToHost)); minimum = h_out[0]; } else { checkCudaErrors(hipMemcpy(h_out, d_out, sizeof(float), hipMemcpyDeviceToHost)); maximum = h_out[0]; } free(h_out); checkCudaErrors(hipFree(d_out)); break; } // Now output array becomes new input array: num_elem_in = num_elem_out; num_elem_out = (num_elem_in - 1) / MAX_THREADS_PER_BLOCK + 1; nblocks = num_elem_out; d_in = d_out; } } return; } __global__ void shmem_reduce_kernel_min(float * d_out, const float * d_in, int arraysize) { extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; if (myId > arraysize) sdata[tid] = FLT_MAX; else sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = min(sdata[tid], sdata[tid + s]); } __syncthreads(); } if (tid == 0) d_out[blockIdx.x] = sdata[0]; } __global__ void shmem_reduce_kernel_max(float * d_out, const float * d_in, int arraysize) { extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; if (myId > arraysize) sdata[tid] = FLT_MIN; else sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = max(sdata[tid], sdata[tid + s]); } __syncthreads(); } if (tid == 0) d_out[blockIdx.x] = sdata[0]; } int nextpower(int in) { if (in > 1024 || in < 1) { printf("Array Size %d out of bounds!\n"); return(in); } if (in > 512) return 1024; if (in > 256) return 512; if (in > 128) return 256; if (in > 64) return 128; if (in > 32) return 64; if (in > 16) return 32; if (in > 8) return 16; if (in > 4) return 8; if (in > 2) return 4; if (in == 1) return 2; return(in); } //void reduce_min_max2(float * d_outmin, float * d_outmax, const float* const d_in, float * d_intermin, float * d_intermax, const size_t numRows, const size_t numCols) { void reduce_min_max2(float& d_outmin, float& d_outmax, const float* const d_in, const size_t numRows, const size_t numCols) { const int maxThreadsPerBlock = 1024; // BLOCKSIZE; float * d_out; checkCudaErrors(hipMalloc((void**)&d_out, sizeof(float))); float* h_out = (float*)malloc(sizeof(float)); int arraysize = numRows*numCols; int threads = maxThreadsPerBlock; // launch one thread for each block in prev step int blocks = numRows*numCols / maxThreadsPerBlock; blocks = nextpower(blocks); float * d_intermax; checkCudaErrors(hipMalloc((void**)&d_intermax, blocks * sizeof(float))); shmem_reduce_kernel_max << <blocks, threads, threads * sizeof(float) >> > (d_intermax, d_in, arraysize); threads = blocks; // launch one thread for each block in prev step actually move up to next power of 2 blocks = 1; shmem_reduce_kernel_max << <blocks, threads, threads * sizeof(float) >> > (d_out, d_intermax, arraysize/maxThreadsPerBlock); checkCudaErrors(hipMemcpy(h_out, d_out, sizeof(float), hipMemcpyDeviceToHost)); d_outmax = h_out[0]; threads = maxThreadsPerBlock; blocks = numRows*numCols / maxThreadsPerBlock; blocks = nextpower(blocks); float * d_intermin; checkCudaErrors(hipMalloc((void**)&d_intermin, blocks * sizeof(float))); shmem_reduce_kernel_min << <blocks, threads, threads * sizeof(float) >> > (d_intermin, d_in, arraysize); threads = blocks; // launch one thread for each block in prev step blocks = 1; printf("launching %d threads in last reduction of single block\n", threads); shmem_reduce_kernel_min << < blocks, threads, threads * sizeof(float) >> > (d_out, d_intermin, arraysize/maxThreadsPerBlock); checkCudaErrors(hipMemcpy(h_out, d_out, sizeof(float), hipMemcpyDeviceToHost)); d_outmin = h_out[0]; checkCudaErrors(hipFree(d_intermin)); free(h_out); checkCudaErrors(hipFree(d_intermax)); checkCudaErrors(hipFree(d_out)); }
a3527ab76875267d1b537c7939e3922b50f6c8b7.cu
// // This reduction code worked most of the time, but failed on memorial_large.exr // produced : // min is -3.109206, max is 2.350199, range is 5.459405 // should be : // min is -3.122315, max is 2.350199, range is 5.472514 // debugged how it failed - error due to running min and max reduction in a loop, // but not re-initializing num_elem_in, when running second loop. // num_elem_in had been shrinking. // Took a lot of debugging code, but realized issue was with the input when // I switched the order of the min max reductions and got the wrong answer for // the other operation. #include <cuda_runtime.h> #include "utils.h" #define MAX_THREADS_PER_BLOCK 32 //1024 //? __global__ void reduce_min_max_kernel(float* d_out, float* d_in, int array_len, bool use_min) /* Does a reduction of d_in into one final d_out array, where d_out is an array with length of the number of blocks in the kernel. Then only expectation is that blocks and threads are one dimensional. \Params: * array_len - length of d_in array * use_min - boolean to use minimum reduction operator if true, else use maximium. */ { // Set up shared memory extern __shared__ float input_array[]; int global_idx = blockIdx.x*blockDim.x + threadIdx.x; int th_idx = threadIdx.x; // If this thread index takes us outside of the array, fill it with // the first value of the actual global array. if (global_idx >= array_len) input_array[th_idx] = d_in[0]; else input_array[th_idx] = d_in[global_idx]; __syncthreads(); // syncs up all threads within the block. // Do reduction in shared memory. All elements in input_array are // filled (some with duplicate values from first element of global // input array which wont effect final result). for (int neighbor = 1; neighbor<=blockDim.x/2; neighbor *=2){ int skip = 2 * neighbor; if ((th_idx % skip) == 0) { if ((th_idx + neighbor) < blockDim.x) { if (use_min) input_array[th_idx] = min(input_array[th_idx], input_array[th_idx + neighbor]); else input_array[th_idx] = max(input_array[th_idx], input_array[th_idx + neighbor]); } } __syncthreads(); } // only thread 0 writes result for this block to d_out: if (th_idx == 0) { d_out[blockIdx.x] = input_array[0]; } } void reduce_min_max(const float* const d_input_array, unsigned num_elem_in, float& minimum, float& maximum) /* Split up array into blocks of MAX_THREADS_PER_BLOCK length each, and reduce (find extrema) of each block, writing the output of the block to a new d_out array. Then the new d_out array becomes the input array to perform reduction on, until the length of the d_out array is 1 element and extremum is found. */ { // We can't change original array, so copy it here on device so that // we can modify it: float * d_in; const unsigned int num_elem_store = num_elem_in; for (int min_or_max = 0; min_or_max < 2; min_or_max++) { num_elem_in = num_elem_store; checkCudaErrors(cudaMalloc((void**)&d_in, num_elem_in * sizeof(float))); checkCudaErrors(cudaMemcpy(d_in, d_input_array, num_elem_in * sizeof(float), cudaMemcpyDeviceToDevice)); int nthreads = MAX_THREADS_PER_BLOCK; int num_elem_out = (num_elem_in - 1) / MAX_THREADS_PER_BLOCK + 1; int nblocks = num_elem_out; while (true) { float * d_out; checkCudaErrors(cudaMalloc((void**)&d_out, num_elem_out * sizeof(float))); reduce_min_max_kernel << <nblocks, nthreads, nthreads * sizeof(float) >> > (d_out, d_in, num_elem_in, (bool)min_or_max); checkCudaErrors(cudaFree(d_in)); if (num_elem_out <= 1) { //check this // Copy output to h_out float* h_out = (float*)malloc(sizeof(float)); if (min_or_max) { checkCudaErrors(cudaMemcpy(h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost)); minimum = h_out[0]; } else { checkCudaErrors(cudaMemcpy(h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost)); maximum = h_out[0]; } free(h_out); checkCudaErrors(cudaFree(d_out)); break; } // Now output array becomes new input array: num_elem_in = num_elem_out; num_elem_out = (num_elem_in - 1) / MAX_THREADS_PER_BLOCK + 1; nblocks = num_elem_out; d_in = d_out; } } return; } __global__ void shmem_reduce_kernel_min(float * d_out, const float * d_in, int arraysize) { extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; if (myId > arraysize) sdata[tid] = FLT_MAX; else sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = min(sdata[tid], sdata[tid + s]); } __syncthreads(); } if (tid == 0) d_out[blockIdx.x] = sdata[0]; } __global__ void shmem_reduce_kernel_max(float * d_out, const float * d_in, int arraysize) { extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; if (myId > arraysize) sdata[tid] = FLT_MIN; else sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = max(sdata[tid], sdata[tid + s]); } __syncthreads(); } if (tid == 0) d_out[blockIdx.x] = sdata[0]; } int nextpower(int in) { if (in > 1024 || in < 1) { printf("Array Size %d out of bounds!\n"); return(in); } if (in > 512) return 1024; if (in > 256) return 512; if (in > 128) return 256; if (in > 64) return 128; if (in > 32) return 64; if (in > 16) return 32; if (in > 8) return 16; if (in > 4) return 8; if (in > 2) return 4; if (in == 1) return 2; return(in); } //void reduce_min_max2(float * d_outmin, float * d_outmax, const float* const d_in, float * d_intermin, float * d_intermax, const size_t numRows, const size_t numCols) { void reduce_min_max2(float& d_outmin, float& d_outmax, const float* const d_in, const size_t numRows, const size_t numCols) { const int maxThreadsPerBlock = 1024; // BLOCKSIZE; float * d_out; checkCudaErrors(cudaMalloc((void**)&d_out, sizeof(float))); float* h_out = (float*)malloc(sizeof(float)); int arraysize = numRows*numCols; int threads = maxThreadsPerBlock; // launch one thread for each block in prev step int blocks = numRows*numCols / maxThreadsPerBlock; blocks = nextpower(blocks); float * d_intermax; checkCudaErrors(cudaMalloc((void**)&d_intermax, blocks * sizeof(float))); shmem_reduce_kernel_max << <blocks, threads, threads * sizeof(float) >> > (d_intermax, d_in, arraysize); threads = blocks; // launch one thread for each block in prev step actually move up to next power of 2 blocks = 1; shmem_reduce_kernel_max << <blocks, threads, threads * sizeof(float) >> > (d_out, d_intermax, arraysize/maxThreadsPerBlock); checkCudaErrors(cudaMemcpy(h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost)); d_outmax = h_out[0]; threads = maxThreadsPerBlock; blocks = numRows*numCols / maxThreadsPerBlock; blocks = nextpower(blocks); float * d_intermin; checkCudaErrors(cudaMalloc((void**)&d_intermin, blocks * sizeof(float))); shmem_reduce_kernel_min << <blocks, threads, threads * sizeof(float) >> > (d_intermin, d_in, arraysize); threads = blocks; // launch one thread for each block in prev step blocks = 1; printf("launching %d threads in last reduction of single block\n", threads); shmem_reduce_kernel_min << < blocks, threads, threads * sizeof(float) >> > (d_out, d_intermin, arraysize/maxThreadsPerBlock); checkCudaErrors(cudaMemcpy(h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost)); d_outmin = h_out[0]; checkCudaErrors(cudaFree(d_intermin)); free(h_out); checkCudaErrors(cudaFree(d_intermax)); checkCudaErrors(cudaFree(d_out)); }
552a7c84094ce2bc2b1cfda5ad6697353cdadb0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void getSum_kernel(float *g_idata, float *result, long long n) { //load shared_mem unsigned int THREAD_NUM = blockDim.x; extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; sdata[tid] = 0.0f; for (long long i=tid; i<n; i+=THREAD_NUM) sdata[tid] += g_idata[i]; __syncthreads(); unsigned int k = THREAD_NUM/2; while (k>0) { if (tid<k) sdata[tid]+=sdata[tid+k]; k = k/2; __syncthreads(); } if (tid == 0) result[0] = sdata[0]; // __syncthreads(); } __global__ void L2_norm_kernel(float *d_1, float *d_2) { long long i = blockIdx.z*blockDim.x*gridDim.x*gridDim.y + blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x; d_1[i] = (d_1[i]-d_2[i])*(d_1[i]-d_2[i]); } __global__ void TV_norm_kernel(float *d_TV, float *d_volume) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = blockIdx.y; int z = blockIdx.z; unsigned int i = z* M*N + y*M + x; unsigned int j = z* M*N + y*M + (x+1); unsigned int k = z* M*N + (y+1)*M + x; unsigned int l = (z+1)* M*N + y*M + x; if ((x<M-1)&&(y<N-1)&&(z<ZETA-1)) d_TV[i]=sqrt( (d_volume[i]-d_volume[j])*(d_volume[i]-d_volume[j])+(d_volume[i]-d_volume[k])*(d_volume[i]-d_volume[k])+(d_volume[i]-d_volume[l])*(d_volume[i]-d_volume[l]) ); else if ((x==M-1)&&(y<N-1)&&(z<ZETA-1)) d_TV[i]=sqrt( (d_volume[i]-d_volume[k])*(d_volume[i]-d_volume[k])+(d_volume[i]-d_volume[l])*(d_volume[i]-d_volume[l]) ); else if ((x<M-1)&&(y==N-1)&&(z<ZETA-1)) d_TV[i]=sqrt( (d_volume[i]-d_volume[j])*(d_volume[i]-d_volume[j])+(d_volume[i]-d_volume[l])*(d_volume[i]-d_volume[l]) ); else if ((x<M-1)&&(y<N-1)&&(z==ZETA-1)) d_TV[i]=sqrt( (d_volume[i]-d_volume[j])*(d_volume[i]-d_volume[j])+(d_volume[i]-d_volume[k])*(d_volume[i]-d_volume[k]) ); else if ((x==M-1)&&(y==N-1)&&(z<ZETA-1)) d_TV[i]=abs( d_volume[i]-d_volume[l]); else if ((x==M-1)&&(y<N-1)&&(z==ZETA-1)) d_TV[i]=abs( d_volume[i]-d_volume[k]); else if ((x<M-1)&&(y==N-1)&&(z==ZETA-1)) d_TV[i]=abs( d_volume[i]-d_volume[j]); } __global__ void substract_3d_kernel(float *d_1, float *d_2, float *d_result) { long i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x; d_result[i] = d_1[i] -d_2[i]; }
552a7c84094ce2bc2b1cfda5ad6697353cdadb0d.cu
__global__ void getSum_kernel(float *g_idata, float *result, long long n) { //load shared_mem unsigned int THREAD_NUM = blockDim.x; extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; sdata[tid] = 0.0f; for (long long i=tid; i<n; i+=THREAD_NUM) sdata[tid] += g_idata[i]; __syncthreads(); unsigned int k = THREAD_NUM/2; while (k>0) { if (tid<k) sdata[tid]+=sdata[tid+k]; k = k/2; __syncthreads(); } if (tid == 0) result[0] = sdata[0]; // __syncthreads(); } __global__ void L2_norm_kernel(float *d_1, float *d_2) { long long i = blockIdx.z*blockDim.x*gridDim.x*gridDim.y + blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x; d_1[i] = (d_1[i]-d_2[i])*(d_1[i]-d_2[i]); } __global__ void TV_norm_kernel(float *d_TV, float *d_volume) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = blockIdx.y; int z = blockIdx.z; unsigned int i = z* M*N + y*M + x; unsigned int j = z* M*N + y*M + (x+1); unsigned int k = z* M*N + (y+1)*M + x; unsigned int l = (z+1)* M*N + y*M + x; if ((x<M-1)&&(y<N-1)&&(z<ZETA-1)) d_TV[i]=sqrt( (d_volume[i]-d_volume[j])*(d_volume[i]-d_volume[j])+(d_volume[i]-d_volume[k])*(d_volume[i]-d_volume[k])+(d_volume[i]-d_volume[l])*(d_volume[i]-d_volume[l]) ); else if ((x==M-1)&&(y<N-1)&&(z<ZETA-1)) d_TV[i]=sqrt( (d_volume[i]-d_volume[k])*(d_volume[i]-d_volume[k])+(d_volume[i]-d_volume[l])*(d_volume[i]-d_volume[l]) ); else if ((x<M-1)&&(y==N-1)&&(z<ZETA-1)) d_TV[i]=sqrt( (d_volume[i]-d_volume[j])*(d_volume[i]-d_volume[j])+(d_volume[i]-d_volume[l])*(d_volume[i]-d_volume[l]) ); else if ((x<M-1)&&(y<N-1)&&(z==ZETA-1)) d_TV[i]=sqrt( (d_volume[i]-d_volume[j])*(d_volume[i]-d_volume[j])+(d_volume[i]-d_volume[k])*(d_volume[i]-d_volume[k]) ); else if ((x==M-1)&&(y==N-1)&&(z<ZETA-1)) d_TV[i]=abs( d_volume[i]-d_volume[l]); else if ((x==M-1)&&(y<N-1)&&(z==ZETA-1)) d_TV[i]=abs( d_volume[i]-d_volume[k]); else if ((x<M-1)&&(y==N-1)&&(z==ZETA-1)) d_TV[i]=abs( d_volume[i]-d_volume[j]); } __global__ void substract_3d_kernel(float *d_1, float *d_2, float *d_result) { long i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x; d_result[i] = d_1[i] -d_2[i]; }
81b29272a3a26f5ffdba2667d24124b36a3a5f5b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file multi_sum_sq.cu * \brief vectorized sums of squares norm over multiple arrays operators * \author Clement Fuji Tsang, Andrei Ivanov, Moises Hernandez, Shuai Zheng */ #include "./multi_sum_sq-inl.h" #include <hipcub/hipcub.hpp> #define ILP 4 #define BLOCK_LIMIT 320 #define ARRAY_LIMIT 110 namespace mxnet { namespace op { // Shamelessly gotten from: // https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_apply.cuh // https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_l2norm_kernel.cu // https://github.com/NVIDIA/apex/blob/master/csrc/type_shim.h const int chunk_size = 32768; template <typename DType> struct MultiSumSqKernelParam { DType* addresses[ARRAY_LIMIT]; int sizes[ARRAY_LIMIT]; unsigned char block_to_tensor[BLOCK_LIMIT]; int block_to_chunk[BLOCK_LIMIT]; int max_chunks_per_tensor = -1; }; template <typename DType> __device__ __forceinline__ DType ReduceBlockIntoLanes(DType* x, DType val) { int tid = threadIdx.x; int block_size = blockDim.x; if (block_size >= 64) { x[tid] = val; __syncthreads(); } #pragma unroll for (int i = (block_size >> 1); i >= 64; i >>= 1) { if (tid < i) x[tid] = x[tid] + x[tid + i]; __syncthreads(); } DType final; if (tid < 32) { if (block_size >= 64) final = x[tid] + x[tid + 32]; else final = val; #pragma unroll for (int i = 16; i >= 1; i >>= 1) final = final + __shfl_down_sync(0xffffffff, final, i); } return final; } template <typename DType> __global__ void MultiSumSqKernel(int chunk_size, MultiSumSqKernelParam<DType> param, float* block_reductions, int start_tensor_id, float scale) { const int tensor_loc = param.block_to_tensor[blockIdx.x]; const int chunk_len = param.block_to_chunk[blockIdx.x] * chunk_size; const int n = param.sizes[tensor_loc] - chunk_len; const DType* x = param.addresses[tensor_loc] + chunk_len; const auto i_max = n <= chunk_size ? n : chunk_size; __shared__ float vals[512]; // Non-divergent exit condition for __syncthreads, not necessary here float val = 0; for (int i_start = 0; i_start < i_max; i_start += blockDim.x * ILP) { int i = i_start + threadIdx.x; #pragma unroll for (int ii = 0; ii < ILP && i < i_max; ++ii, i += blockDim.x) { auto incoming_val = static_cast<float>(x[i]); if (scale != 1.0f) { incoming_val *= scale; } val += incoming_val * incoming_val; } } const float final = ReduceBlockIntoLanes(vals, val); if (threadIdx.x == 0) { block_reductions[(start_tensor_id + tensor_loc) * param.max_chunks_per_tensor + param.block_to_chunk[blockIdx.x]] = final; } } template <typename DType> __global__ void GlobalReductionKernel(MultiSumSqKernelParam<DType> param, float* block_reductions, float* output) { __shared__ float vals[512]; float* reductions_this_tensor = block_reductions + blockIdx.x * param.max_chunks_per_tensor; float val = 0; for (int i = threadIdx.x; i < param.max_chunks_per_tensor; i += blockDim.x) val += reductions_this_tensor[i]; float final = ReduceBlockIntoLanes(vals, val); if (threadIdx.x == 0) output[blockIdx.x] = final; } template <> size_t GetRequiredStorageMultiSumSq<gpu>(const std::vector<TBlob>& inputs, int* param_max_chunks_per_tensor) { // find max num of chunks in tensors int max_chunks_per_tensor = -1; for (size_t t = 0; t < inputs.size(); t++) { int chunks_this_tensor = (inputs[t].shape_.Size() + chunk_size - 1) / chunk_size; if (chunks_this_tensor > max_chunks_per_tensor) max_chunks_per_tensor = chunks_this_tensor; } if (param_max_chunks_per_tensor != nullptr) *param_max_chunks_per_tensor = max_chunks_per_tensor; return inputs.size() * max_chunks_per_tensor * sizeof(float); } template <> void MultiSumSqRun<gpu>(const std::vector<TBlob>& inputs, int n_inputs, float* out_ptr, const OpContext& ctx, float scale) { const int block_size = 512; using namespace mxnet_op; auto s = ctx.get_stream<gpu>(); auto stream = mshadow::Stream<gpu>::GetStream(s); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { MultiSumSqKernelParam<DType> param; size_t workspace_size = GetRequiredStorageMultiSumSq<gpu>(inputs, &param.max_chunks_per_tensor); Tensor<gpu, 1, char> workspace = ctx.requested[multi_sum_sq::kTempSpace].get_space_typed<gpu, 1, char>( Shape1(workspace_size), s); Tensor<gpu, 1, float> block_reductions( reinterpret_cast<float*>(&workspace[0]), Shape1(n_inputs * param.max_chunks_per_tensor), s); CUDA_CALL(hipMemsetAsync( block_reductions.dptr_, 0, n_inputs * param.max_chunks_per_tensor * sizeof(float), stream)); int loc_block_info = 0; // position in param.block_to_tensor and param.block_to_chunck int loc_tensor_info = 0; // position in param.sizes and param.addresses int start_tensor_id = 0; for (int t = 0; t < n_inputs; t++, loc_tensor_info++) { // array index in inputs param.sizes[loc_tensor_info] = inputs[t].shape_.Size(); param.addresses[loc_tensor_info] = inputs[t].FlatTo2D<gpu, DType>(s).dptr_; const int chunks_this_tensor = (inputs[t].shape_.Size() - 1) / chunk_size; for (int chunk = 0; chunk <= chunks_this_tensor; ++chunk) { // array chunk index param.block_to_tensor[loc_block_info] = loc_tensor_info; param.block_to_chunk[loc_block_info] = chunk; loc_block_info++; const bool last_curr_chunk = chunk == chunks_this_tensor; const bool tensors_full = last_curr_chunk && loc_tensor_info == (ARRAY_LIMIT - 1); const bool blocks_full = (loc_block_info == BLOCK_LIMIT); const bool last_chunk = last_curr_chunk && t == n_inputs - 1; if (!(tensors_full || blocks_full || last_chunk)) continue; hipLaunchKernelGGL(( MultiSumSqKernel), dim3(loc_block_info), dim3(block_size), 0, stream, chunk_size, param, block_reductions.dptr_, start_tensor_id, scale); MSHADOW_CUDA_POST_KERNEL_CHECK(MultiSumSqKernel); loc_block_info = 0; if (last_curr_chunk) { // if you start from a new tensor loc_tensor_info = -1; start_tensor_id = t + 1; } else { // if you start from the same tensor param.sizes[0] = param.sizes[loc_tensor_info]; param.addresses[0] = param.addresses[loc_tensor_info]; loc_tensor_info = 0; start_tensor_id = t; } } } // Global reduction hipLaunchKernelGGL(( GlobalReductionKernel), dim3(n_inputs), dim3(block_size), 0, stream, param, block_reductions.dptr_, out_ptr); }); } NNVM_REGISTER_OP(multi_sum_sq).set_attr<FCompute>("FCompute<gpu>", MultiSumSq<gpu>); } // namespace op } // namespace mxnet
81b29272a3a26f5ffdba2667d24124b36a3a5f5b.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file multi_sum_sq.cu * \brief vectorized sums of squares norm over multiple arrays operators * \author Clement Fuji Tsang, Andrei Ivanov, Moises Hernandez, Shuai Zheng */ #include "./multi_sum_sq-inl.h" #include <cub/cub.cuh> #define ILP 4 #define BLOCK_LIMIT 320 #define ARRAY_LIMIT 110 namespace mxnet { namespace op { // Shamelessly gotten from: // https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_apply.cuh // https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_l2norm_kernel.cu // https://github.com/NVIDIA/apex/blob/master/csrc/type_shim.h const int chunk_size = 32768; template <typename DType> struct MultiSumSqKernelParam { DType* addresses[ARRAY_LIMIT]; int sizes[ARRAY_LIMIT]; unsigned char block_to_tensor[BLOCK_LIMIT]; int block_to_chunk[BLOCK_LIMIT]; int max_chunks_per_tensor = -1; }; template <typename DType> __device__ __forceinline__ DType ReduceBlockIntoLanes(DType* x, DType val) { int tid = threadIdx.x; int block_size = blockDim.x; if (block_size >= 64) { x[tid] = val; __syncthreads(); } #pragma unroll for (int i = (block_size >> 1); i >= 64; i >>= 1) { if (tid < i) x[tid] = x[tid] + x[tid + i]; __syncthreads(); } DType final; if (tid < 32) { if (block_size >= 64) final = x[tid] + x[tid + 32]; else final = val; #pragma unroll for (int i = 16; i >= 1; i >>= 1) final = final + __shfl_down_sync(0xffffffff, final, i); } return final; } template <typename DType> __global__ void MultiSumSqKernel(int chunk_size, MultiSumSqKernelParam<DType> param, float* block_reductions, int start_tensor_id, float scale) { const int tensor_loc = param.block_to_tensor[blockIdx.x]; const int chunk_len = param.block_to_chunk[blockIdx.x] * chunk_size; const int n = param.sizes[tensor_loc] - chunk_len; const DType* x = param.addresses[tensor_loc] + chunk_len; const auto i_max = n <= chunk_size ? n : chunk_size; __shared__ float vals[512]; // Non-divergent exit condition for __syncthreads, not necessary here float val = 0; for (int i_start = 0; i_start < i_max; i_start += blockDim.x * ILP) { int i = i_start + threadIdx.x; #pragma unroll for (int ii = 0; ii < ILP && i < i_max; ++ii, i += blockDim.x) { auto incoming_val = static_cast<float>(x[i]); if (scale != 1.0f) { incoming_val *= scale; } val += incoming_val * incoming_val; } } const float final = ReduceBlockIntoLanes(vals, val); if (threadIdx.x == 0) { block_reductions[(start_tensor_id + tensor_loc) * param.max_chunks_per_tensor + param.block_to_chunk[blockIdx.x]] = final; } } template <typename DType> __global__ void GlobalReductionKernel(MultiSumSqKernelParam<DType> param, float* block_reductions, float* output) { __shared__ float vals[512]; float* reductions_this_tensor = block_reductions + blockIdx.x * param.max_chunks_per_tensor; float val = 0; for (int i = threadIdx.x; i < param.max_chunks_per_tensor; i += blockDim.x) val += reductions_this_tensor[i]; float final = ReduceBlockIntoLanes(vals, val); if (threadIdx.x == 0) output[blockIdx.x] = final; } template <> size_t GetRequiredStorageMultiSumSq<gpu>(const std::vector<TBlob>& inputs, int* param_max_chunks_per_tensor) { // find max num of chunks in tensors int max_chunks_per_tensor = -1; for (size_t t = 0; t < inputs.size(); t++) { int chunks_this_tensor = (inputs[t].shape_.Size() + chunk_size - 1) / chunk_size; if (chunks_this_tensor > max_chunks_per_tensor) max_chunks_per_tensor = chunks_this_tensor; } if (param_max_chunks_per_tensor != nullptr) *param_max_chunks_per_tensor = max_chunks_per_tensor; return inputs.size() * max_chunks_per_tensor * sizeof(float); } template <> void MultiSumSqRun<gpu>(const std::vector<TBlob>& inputs, int n_inputs, float* out_ptr, const OpContext& ctx, float scale) { const int block_size = 512; using namespace mxnet_op; auto s = ctx.get_stream<gpu>(); auto stream = mshadow::Stream<gpu>::GetStream(s); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { MultiSumSqKernelParam<DType> param; size_t workspace_size = GetRequiredStorageMultiSumSq<gpu>(inputs, &param.max_chunks_per_tensor); Tensor<gpu, 1, char> workspace = ctx.requested[multi_sum_sq::kTempSpace].get_space_typed<gpu, 1, char>( Shape1(workspace_size), s); Tensor<gpu, 1, float> block_reductions( reinterpret_cast<float*>(&workspace[0]), Shape1(n_inputs * param.max_chunks_per_tensor), s); CUDA_CALL(cudaMemsetAsync( block_reductions.dptr_, 0, n_inputs * param.max_chunks_per_tensor * sizeof(float), stream)); int loc_block_info = 0; // position in param.block_to_tensor and param.block_to_chunck int loc_tensor_info = 0; // position in param.sizes and param.addresses int start_tensor_id = 0; for (int t = 0; t < n_inputs; t++, loc_tensor_info++) { // array index in inputs param.sizes[loc_tensor_info] = inputs[t].shape_.Size(); param.addresses[loc_tensor_info] = inputs[t].FlatTo2D<gpu, DType>(s).dptr_; const int chunks_this_tensor = (inputs[t].shape_.Size() - 1) / chunk_size; for (int chunk = 0; chunk <= chunks_this_tensor; ++chunk) { // array chunk index param.block_to_tensor[loc_block_info] = loc_tensor_info; param.block_to_chunk[loc_block_info] = chunk; loc_block_info++; const bool last_curr_chunk = chunk == chunks_this_tensor; const bool tensors_full = last_curr_chunk && loc_tensor_info == (ARRAY_LIMIT - 1); const bool blocks_full = (loc_block_info == BLOCK_LIMIT); const bool last_chunk = last_curr_chunk && t == n_inputs - 1; if (!(tensors_full || blocks_full || last_chunk)) continue; MultiSumSqKernel<<<loc_block_info, block_size, 0, stream>>>( chunk_size, param, block_reductions.dptr_, start_tensor_id, scale); MSHADOW_CUDA_POST_KERNEL_CHECK(MultiSumSqKernel); loc_block_info = 0; if (last_curr_chunk) { // if you start from a new tensor loc_tensor_info = -1; start_tensor_id = t + 1; } else { // if you start from the same tensor param.sizes[0] = param.sizes[loc_tensor_info]; param.addresses[0] = param.addresses[loc_tensor_info]; loc_tensor_info = 0; start_tensor_id = t; } } } // Global reduction GlobalReductionKernel<<<n_inputs, block_size, 0, stream>>>( param, block_reductions.dptr_, out_ptr); }); } NNVM_REGISTER_OP(multi_sum_sq).set_attr<FCompute>("FCompute<gpu>", MultiSumSq<gpu>); } // namespace op } // namespace mxnet
75bfdb144332cd21583e054cfebdac1797abf68d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define M 3 #define N 3 #define P 3 __global__ void kernel(float*,float*,float*); void random_floats(float*,int); void print_matrix(float*,int,int); int main(int argc,char** argv) { /** * Init all variables */ int a_size = sizeof(float)*M*N, b_size = sizeof(float)*N*P, result_size = sizeof(float)*M*P; float a[] = {1,2,3,4,5,6,7,8,9}, b[] = {9,8,7,6,5,4,3,2,1}, answer[] = {30,24,18,84,69,54,138,114,90}, *result = (float*)malloc(result_size), *d_a, *d_b, *d_result; /** * Setup device memory */ hipMalloc((void**)&d_a,a_size); hipMalloc((void**)&d_b,b_size); hipMalloc((void**)&d_result,result_size); hipMemcpy(d_a,a,a_size,hipMemcpyHostToDevice); hipMemcpy(d_b,b,b_size,hipMemcpyHostToDevice); /** * Start GPU */ hipLaunchKernelGGL(( kernel), dim3(P),dim3(M), 0, 0, d_a,d_b,d_result); /** * Copy results back to host */ hipMemcpy(result,d_result,sizeof(float)* M * P,hipMemcpyDeviceToHost); /** * Print results */ printf("Result: \n"); print_matrix(result,M,P); printf("Expected: \n"); print_matrix(answer,M,P); /** * Cleanup memory */ hipFree(d_a); hipFree(d_b); hipFree(d_result); free(result); return 0; } void print_matrix(float *a,int cols,int rows) { int i,j; for(i=0;i<cols;i++) { for(j=0;j<rows;j++) printf("%f ",a[i*M+j]); printf("\n"); } } __global__ void kernel(float *a,float *b,float *result) { bool extra_a; int row = blockIdx.x, col = threadIdx.x, a_count, offset, i; /** * Allocate shared memory */ __shared__ float local_a[M]; __shared__ float local_b[N*P]; /** * Each thread is responsible for loading: * 1. An entire column from table b * 2. Thread 0 loads row from a */ extra_a = M%blockDim.x>0&&M%blockDim.x<threadIdx.x; a_count = (extra_a)?M/blockDim.x+1:M/blockDim.x; offset = (extra_a)?a_count*threadIdx.x:a_count*threadIdx.x+M%blockDim.x; for(i=0;i<a_count;i++) local_a[offset+i] = a[row*M+offset+i]; for(i=0;i<P;i++) { offset = i*N+threadIdx.x; local_b[offset] = b[offset]; } __syncthreads(); /** * Computer cell value */ for(result[row*M+col]=0,i=0;i<N;i++) result[row*M+col] += local_a[i] * local_b[i*N+col]; } void random_floats(float* a, int size) { int i; for(i=0;i<size;i++) a[i] = rand() % 8 + 1; //generate a number betwee 1 and 9 }
75bfdb144332cd21583e054cfebdac1797abf68d.cu
#include <stdio.h> #define M 3 #define N 3 #define P 3 __global__ void kernel(float*,float*,float*); void random_floats(float*,int); void print_matrix(float*,int,int); int main(int argc,char** argv) { /** * Init all variables */ int a_size = sizeof(float)*M*N, b_size = sizeof(float)*N*P, result_size = sizeof(float)*M*P; float a[] = {1,2,3,4,5,6,7,8,9}, b[] = {9,8,7,6,5,4,3,2,1}, answer[] = {30,24,18,84,69,54,138,114,90}, *result = (float*)malloc(result_size), *d_a, *d_b, *d_result; /** * Setup device memory */ cudaMalloc((void**)&d_a,a_size); cudaMalloc((void**)&d_b,b_size); cudaMalloc((void**)&d_result,result_size); cudaMemcpy(d_a,a,a_size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,b_size,cudaMemcpyHostToDevice); /** * Start GPU */ kernel<<<P,M>>>(d_a,d_b,d_result); /** * Copy results back to host */ cudaMemcpy(result,d_result,sizeof(float)* M * P,cudaMemcpyDeviceToHost); /** * Print results */ printf("Result: \n"); print_matrix(result,M,P); printf("Expected: \n"); print_matrix(answer,M,P); /** * Cleanup memory */ cudaFree(d_a); cudaFree(d_b); cudaFree(d_result); free(result); return 0; } void print_matrix(float *a,int cols,int rows) { int i,j; for(i=0;i<cols;i++) { for(j=0;j<rows;j++) printf("%f ",a[i*M+j]); printf("\n"); } } __global__ void kernel(float *a,float *b,float *result) { bool extra_a; int row = blockIdx.x, col = threadIdx.x, a_count, offset, i; /** * Allocate shared memory */ __shared__ float local_a[M]; __shared__ float local_b[N*P]; /** * Each thread is responsible for loading: * 1. An entire column from table b * 2. Thread 0 loads row from a */ extra_a = M%blockDim.x>0&&M%blockDim.x<threadIdx.x; a_count = (extra_a)?M/blockDim.x+1:M/blockDim.x; offset = (extra_a)?a_count*threadIdx.x:a_count*threadIdx.x+M%blockDim.x; for(i=0;i<a_count;i++) local_a[offset+i] = a[row*M+offset+i]; for(i=0;i<P;i++) { offset = i*N+threadIdx.x; local_b[offset] = b[offset]; } __syncthreads(); /** * Computer cell value */ for(result[row*M+col]=0,i=0;i<N;i++) result[row*M+col] += local_a[i] * local_b[i*N+col]; } void random_floats(float* a, int size) { int i; for(i=0;i<size;i++) a[i] = rand() % 8 + 1; //generate a number betwee 1 and 9 }
26b7e94d4ea82fbe78958e2b4522b2c5d029f1f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define DIMENSIONS 2 #define GPU_DEVICE_ZERO 0 __global__ void distanceArrCalc(int pointsCounter, int threads, double *distanceFromPointToCluster, double *pointsInGpu, double *clustersInGpu) { /** This Function computes distances. Every index is a point. Every value inside an index is a distance. **/ double distanceX = 0; double distanceY= 0; int threadsLeft=pointsCounter % blockDim.x; if ((threadsLeft > threadIdx.x) || (blockIdx.x+1 != gridDim.x)) { int offsetPointIndex=(blockIdx.x * threads + threadIdx.x)*DIMENSIONS; int offsetClusterIndexForPoint=threadIdx.y * DIMENSIONS; //calc X double a=pointsInGpu[offsetPointIndex]; double b= clustersInGpu[offsetClusterIndexForPoint]; distanceX = (a - b); distanceX*=distanceX; //calc Y a=pointsInGpu[offsetPointIndex+1]; b= clustersInGpu[offsetClusterIndexForPoint+1]; distanceY =(a - b); distanceY*=distanceY; double totalDistance=sqrt(distanceY+distanceX); int currentPointIndexY = pointsCounter*threadIdx.y; int currentPointIndexX=(blockIdx.x * threads + threadIdx.x); int pointIndex=currentPointIndexY+currentPointIndexX; distanceFromPointToCluster[pointIndex] = totalDistance; } }
26b7e94d4ea82fbe78958e2b4522b2c5d029f1f5.cu
#include "includes.h" #define DIMENSIONS 2 #define GPU_DEVICE_ZERO 0 __global__ void distanceArrCalc(int pointsCounter, int threads, double *distanceFromPointToCluster, double *pointsInGpu, double *clustersInGpu) { /** This Function computes distances. Every index is a point. Every value inside an index is a distance. **/ double distanceX = 0; double distanceY= 0; int threadsLeft=pointsCounter % blockDim.x; if ((threadsLeft > threadIdx.x) || (blockIdx.x+1 != gridDim.x)) { int offsetPointIndex=(blockIdx.x * threads + threadIdx.x)*DIMENSIONS; int offsetClusterIndexForPoint=threadIdx.y * DIMENSIONS; //calc X double a=pointsInGpu[offsetPointIndex]; double b= clustersInGpu[offsetClusterIndexForPoint]; distanceX = (a - b); distanceX*=distanceX; //calc Y a=pointsInGpu[offsetPointIndex+1]; b= clustersInGpu[offsetClusterIndexForPoint+1]; distanceY =(a - b); distanceY*=distanceY; double totalDistance=sqrt(distanceY+distanceX); int currentPointIndexY = pointsCounter*threadIdx.y; int currentPointIndexX=(blockIdx.x * threads + threadIdx.x); int pointIndex=currentPointIndexY+currentPointIndexX; distanceFromPointToCluster[pointIndex] = totalDistance; } }
cc4511e9ddd3057fe6189c21cb215dd862717cfe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Example 4.2.2 - Julia Set #include "../common/book.h" #include "../common/cpu_bitmap.h" #include <stdio.h> #define DIM 512 struct hipComplex { float r; float i; __device__ hipComplex(float a, float b) : r(a), i(b) {} __device__ float magnitude2(void) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r*a.r, i + a.i); } }; __device__ int julia(int x, int y) { const float scale = 1.5; float jx = scale * (float)(DIM / 2 - x) / (DIM / 2); float jy = scale * (float)(DIM / 2 - y) / (DIM / 2); hipComplex c(-0.8, 0.156); hipComplex a(jx, jy); int i = 0; for (i = 0; i < 200; i++) { a = a*a + c; if (a.magnitude2() > 1000) return 0; } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int juliaValue = julia(x, y); ptr[offset * 4 + 0] = 255 * juliaValue; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } // globals needed by the update routine struct DataBlock { unsigned char *dev_bitmap; }; int main(void) { DataBlock data; CPUBitmap bitmap(DIM, DIM, &data); unsigned char *dev_bitmap; HANDLE_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size())); dim3 grid(DIM, DIM); kernel << <grid, 1 >> > (dev_bitmap); HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost)); HANDLE_ERROR(hipFree(dev_bitmap)); bitmap.display_and_exit(); }
cc4511e9ddd3057fe6189c21cb215dd862717cfe.cu
//Example 4.2.2 - Julia Set #include "../common/book.h" #include "../common/cpu_bitmap.h" #include <stdio.h> #define DIM 512 struct cuComplex { float r; float i; __device__ cuComplex(float a, float b) : r(a), i(b) {} __device__ float magnitude2(void) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r*a.r, i + a.i); } }; __device__ int julia(int x, int y) { const float scale = 1.5; float jx = scale * (float)(DIM / 2 - x) / (DIM / 2); float jy = scale * (float)(DIM / 2 - y) / (DIM / 2); cuComplex c(-0.8, 0.156); cuComplex a(jx, jy); int i = 0; for (i = 0; i < 200; i++) { a = a*a + c; if (a.magnitude2() > 1000) return 0; } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int juliaValue = julia(x, y); ptr[offset * 4 + 0] = 255 * juliaValue; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } // globals needed by the update routine struct DataBlock { unsigned char *dev_bitmap; }; int main(void) { DataBlock data; CPUBitmap bitmap(DIM, DIM, &data); unsigned char *dev_bitmap; HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size())); dim3 grid(DIM, DIM); kernel << <grid, 1 >> > (dev_bitmap); HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaFree(dev_bitmap)); bitmap.display_and_exit(); }