hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
e169007e8eea29389a4eb5f17b38aa75fa44950e.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================================================================= Adapted from https://github.com/faebstn96/trainable-joint-bilateral-filter-source which has the following license... https://github.com/faebstn96/trainable-joint-bilateral-filter-source/blob/main/LICENSE Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "trainable_joint_bilateral.h" //#include "../utils/cuda_error_check.h" #include "utils/meta_macros.h" #include "utils/tensor_description.h" __constant__ int cBatchStrideBack; __constant__ int cColorStrideBack; __constant__ int cSizesBack[3]; __constant__ int cStridesBack[3]; __constant__ int cKernelSizesBack[3]; __constant__ int cHalfWindowSize_arrBack[3]; __constant__ float cGaussianKernel_xBack[256]; __constant__ float cGaussianKernel_yBack[256]; __constant__ float cGaussianKernel_zBack[256]; __constant__ float cXDistanceSquaredBack[256]; __constant__ float cYDistanceSquaredBack[256]; __constant__ float cZDistanceSquaredBack[256]; __constant__ float cColorExponentConstantBack; __constant__ float cSigma_xBack; __constant__ float cSigma_yBack; __constant__ float cSigma_zBack; __constant__ float cColorSigmaBack; template <typename scalar_t, int C> __global__ void JointBilateralFilterCudaKernel3DBackward( scalar_t* gradientInputTensor, scalar_t* gradientGuidanceTensor, scalar_t* gradientOutputTensor, scalar_t* inputTensor, scalar_t* guidanceTensor, scalar_t* outputTensor, scalar_t* outputWeightsTensor, scalar_t* dO_dz_ki) { int homeOffset = blockIdx.x * blockDim.x + threadIdx.x; int batchOffset = blockIdx.y * cBatchStrideBack; if (homeOffset >= cColorStrideBack) return; int homeX = homeOffset / cStridesBack[0]; int homeY = (homeOffset - homeX * cStridesBack[0]) / cStridesBack[1]; int homeZ = (homeOffset - homeX * cStridesBack[0] - homeY * cStridesBack[1]) / cStridesBack[2]; int homeIndex[] = {homeX, homeY, homeZ}; // Zero kernel aggregates. scalar_t valueSumGuidance = 0; scalar_t valueSumInput = 0; for (int kernelX = 0; kernelX < cKernelSizesBack[0]; kernelX++) { int neighbourX = max(0, min(homeX + (kernelX - cHalfWindowSize_arrBack[0]), cSizesBack[0] - 1)); scalar_t gaussianX = cGaussianKernel_xBack[kernelX]; for (int kernelY = 0; kernelY < cKernelSizesBack[1]; kernelY++) { int neighbourY = max(0, min(homeY + (kernelY - cHalfWindowSize_arrBack[1]), cSizesBack[1] - 1)); scalar_t gaussianY = cGaussianKernel_yBack[kernelY]; for (int kernelZ = 0; kernelZ < cKernelSizesBack[2]; kernelZ++) { int neighbourZ = max(0, min(homeZ + (kernelZ - cHalfWindowSize_arrBack[2]), cSizesBack[2] - 1)); scalar_t gaussianZ = cGaussianKernel_zBack[kernelZ]; int neighbourOffset = neighbourX * cStridesBack[0] + neighbourY * cStridesBack[1] + neighbourZ; bool flagNotClamped = true; int kernelIndex[] = {kernelX, kernelY, kernelZ}; int dimensions = 3; // Must equal the number of spatial dimensions. for (int i = 0; i < dimensions; i++) { int HalfWindowSizeBack = cHalfWindowSize_arrBack[i]; // Define constant memory as new variable here (!!), // otherwise: hipErrorMisalignedAddress int neighbourIndex = homeIndex[i] + kernelIndex[i] - HalfWindowSizeBack; int neighbourIndexClamped = min(cSizesBack[i] - 1, max(0, neighbourIndex)); if (neighbourIndex != neighbourIndexClamped) { flagNotClamped = false; } } scalar_t colorDistance = 0; scalar_t colorDistanceSquared = 0; #pragma unroll for (int c = 0; c < C; c++) { scalar_t a = guidanceTensor[batchOffset + neighbourOffset + c * cColorStrideBack]; scalar_t b = guidanceTensor[batchOffset + homeOffset + c * cColorStrideBack]; // Be careful: Here it is (Z_k - // Z_i) and not (Z_i - Z_q) scalar_t diff = a - b; colorDistance += diff; // Do not take the absolute value here. Be careful with the signs. colorDistanceSquared += diff * diff; } scalar_t spatialWeight = gaussianX * gaussianY * gaussianZ; scalar_t colorWeight = exp(cColorExponentConstantBack * colorDistanceSquared); scalar_t totalWeight = spatialWeight * colorWeight; // Aggregating values. Only do this if flagNotClamped: Pixels outside the image are disregarded. if (flagNotClamped) { scalar_t filter_kernel_guidance_back; #pragma unroll for (int c = 0; c < C; c++) { // Distinguish cases for k!=i (calculation is done here) // and k==i (partial derivatives are precalculated). // If statement replaces center element of neighborhood/kernel. if (kernelX != cHalfWindowSize_arrBack[0] || kernelY != cHalfWindowSize_arrBack[1] || kernelZ != cHalfWindowSize_arrBack[2]) { filter_kernel_guidance_back = -(1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) * outputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] * totalWeight * colorDistance / (cColorSigmaBack * cColorSigmaBack) + (1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) * totalWeight * (inputTensor[batchOffset + homeOffset + c * cColorStrideBack] * colorDistance / (cColorSigmaBack * cColorSigmaBack)); // inputTensorData[homeOffset] !!, no +1!! } else { filter_kernel_guidance_back = dO_dz_ki[batchOffset + homeOffset + c * cColorStrideBack]; } valueSumGuidance += gradientInputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] * filter_kernel_guidance_back; valueSumInput += gradientInputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] * (1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) * totalWeight; } } } } } #pragma unroll for (int c = 0; c < C; c++) { gradientGuidanceTensor[batchOffset + homeOffset + c * cColorStrideBack] = valueSumGuidance; gradientOutputTensor[batchOffset + homeOffset + c * cColorStrideBack] = valueSumInput; } } template <int C, int D> void JointBilateralFilterCudaBackwardFunction( torch::Tensor gradientInputTensor, torch::Tensor gradientGuidanceTensor, torch::Tensor gradientOutputTensor, torch::Tensor inputTensor, torch::Tensor guidanceTensor, torch::Tensor outputTensor, torch::Tensor outputWeightsTensor, torch::Tensor dO_dz_ki, float sigma_x, float sigma_y, float sigma_z, float colorSigma) { // Getting tensor description. TensorDescription desc = TensorDescription(inputTensor); // Pre-calculating gaussian kernel. int windowSize_x = ::max(((int)ceil(5.0f * sigma_x) | 1), 5); // ORing last bit to ensure odd window size int windowSize_y = ::max(((int)ceil(5.0f * sigma_y) | 1), 5); // ORing last bit to ensure odd window size int windowSize_z = ::max(((int)ceil(5.0f * sigma_z) | 1), 5); // ORing last bit to ensure odd window size int halfWindowSize_x = floor(0.5f * windowSize_x); int halfWindowSize_y = floor(0.5f * windowSize_y); int halfWindowSize_z = floor(0.5f * windowSize_z); int halfWindowSize_arr[] = {halfWindowSize_x, halfWindowSize_y, halfWindowSize_z}; float spatialExpConstant_x = -1.0f / (2 * sigma_x * sigma_x); float spatialExpConstant_y = -1.0f / (2 * sigma_y * sigma_y); float spatialExpConstant_z = -1.0f / (2 * sigma_z * sigma_z); float colorExpConstant = -1.0f / (2 * colorSigma * colorSigma); int* kernelSizes = new int[desc.dimensions]; kernelSizes[0] = windowSize_x; kernelSizes[1] = windowSize_y; kernelSizes[2] = windowSize_z; auto* gaussianKernel_x = new float[windowSize_x]; auto* gaussianKernel_y = new float[windowSize_y]; auto* gaussianKernel_z = new float[windowSize_z]; auto* xDistanceSquared = new float[windowSize_x]; auto* yDistanceSquared = new float[windowSize_y]; auto* zDistanceSquared = new float[windowSize_z]; for (int i = 0; i < windowSize_x; i++) { int distance = i - halfWindowSize_x; gaussianKernel_x[i] = exp(distance * distance * spatialExpConstant_x); xDistanceSquared[i] = distance * distance; } for (int i = 0; i < windowSize_y; i++) { int distance = i - halfWindowSize_y; gaussianKernel_y[i] = exp(distance * distance * spatialExpConstant_y); yDistanceSquared[i] = distance * distance; } for (int i = 0; i < windowSize_z; i++) { int distance = i - halfWindowSize_z; gaussianKernel_z[i] = exp(distance * distance * spatialExpConstant_z); zDistanceSquared[i] = distance * distance; } // Writing constant memory. hipMemcpyToSymbol(cBatchStrideBack, &desc.batchStride, sizeof(int)); hipMemcpyToSymbol(cColorStrideBack, &desc.channelStride, sizeof(int)); hipMemcpyToSymbol(cSizesBack, desc.sizes, sizeof(int) * 3); hipMemcpyToSymbol(cStridesBack, desc.strides, sizeof(int) * 3); hipMemcpyToSymbol(cKernelSizesBack, kernelSizes, sizeof(int) * desc.dimensions); hipMemcpyToSymbol(cHalfWindowSize_arrBack, halfWindowSize_arr, sizeof(int) * desc.dimensions); hipMemcpyToSymbol(cGaussianKernel_xBack, gaussianKernel_x, sizeof(float) * windowSize_x); hipMemcpyToSymbol(cGaussianKernel_yBack, gaussianKernel_y, sizeof(float) * windowSize_y); hipMemcpyToSymbol(cGaussianKernel_zBack, gaussianKernel_z, sizeof(float) * windowSize_z); hipMemcpyToSymbol(cXDistanceSquaredBack, xDistanceSquared, sizeof(float) * windowSize_x); hipMemcpyToSymbol(cYDistanceSquaredBack, yDistanceSquared, sizeof(float) * windowSize_y); hipMemcpyToSymbol(cZDistanceSquaredBack, zDistanceSquared, sizeof(float) * windowSize_z); hipMemcpyToSymbol(cColorExponentConstantBack, &colorExpConstant, sizeof(float)); hipMemcpyToSymbol(cSigma_xBack, &sigma_x, sizeof(float)); hipMemcpyToSymbol(cSigma_yBack, &sigma_y, sizeof(float)); hipMemcpyToSymbol(cSigma_zBack, &sigma_z, sizeof(float)); hipMemcpyToSymbol(cColorSigmaBack, &colorSigma, sizeof(float)); // cuda_error_check("Cuda check before kernel call."); #define BLOCK_SIZE 32 AT_DISPATCH_FLOATING_TYPES_AND_HALF( inputTensor.scalar_type(), "JointBilateralFilterCudaKernel3DBackward", ([&] { hipLaunchKernelGGL(( JointBilateralFilterCudaKernel3DBackward<scalar_t, C>) , dim3(dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount)), dim3(dim3(BLOCK_SIZE, 1)), 0, 0, gradientInputTensor.data_ptr<scalar_t>(), gradientGuidanceTensor.data_ptr<scalar_t>(), gradientOutputTensor.data_ptr<scalar_t>(), inputTensor.data_ptr<scalar_t>(), guidanceTensor.data_ptr<scalar_t>(), outputTensor.data_ptr<scalar_t>(), outputWeightsTensor.data_ptr<scalar_t>(), dO_dz_ki.data_ptr<scalar_t>()); })); // cuda_error_check("Cuda check after kernel call."); // delete[] kernel; delete[] kernelSizes; delete[] gaussianKernel_x; delete[] gaussianKernel_y; delete[] gaussianKernel_z; delete[] xDistanceSquared; delete[] yDistanceSquared; delete[] zDistanceSquared; } // Function to choose template implementation based on dynamic, channels and dimensions std::tuple<torch::Tensor, torch::Tensor> JointBilateralFilterCudaBackward( torch::Tensor gradientInputTensor, torch::Tensor inputTensor, torch::Tensor guidanceTensor, torch::Tensor outputTensor, torch::Tensor outputWeightsTensor, torch::Tensor dO_dz_ki, float sigma_x, float sigma_y, float sigma_z, float colorSigma) { torch::Tensor gradientOutputTensor = torch::zeros_like(gradientInputTensor); torch::Tensor gradientGuidanceTensor = torch::zeros_like(gradientInputTensor); // cuda_error_check("beginning"); #define CASE(c, d) \ JointBilateralFilterCudaBackwardFunction<c, d>( \ gradientInputTensor, \ gradientGuidanceTensor, \ gradientOutputTensor, \ inputTensor, \ guidanceTensor, \ outputTensor, \ outputWeightsTensor, \ dO_dz_ki, \ sigma_x, \ sigma_y, \ sigma_z, \ colorSigma); SWITCH_AB( CASE, BF_CUDA_MAX_CHANNELS, BF_CUDA_MAX_SPATIAL_DIMENSION, gradientInputTensor.size(1), gradientInputTensor.dim() - 2); return {gradientOutputTensor, gradientGuidanceTensor}; }
e169007e8eea29389a4eb5f17b38aa75fa44950e.cu
/* Copyright (c) MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================================================================= Adapted from https://github.com/faebstn96/trainable-joint-bilateral-filter-source which has the following license... https://github.com/faebstn96/trainable-joint-bilateral-filter-source/blob/main/LICENSE Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda.h> #include <cuda_runtime.h> #include "trainable_joint_bilateral.h" //#include "../utils/cuda_error_check.h" #include "utils/meta_macros.h" #include "utils/tensor_description.h" __constant__ int cBatchStrideBack; __constant__ int cColorStrideBack; __constant__ int cSizesBack[3]; __constant__ int cStridesBack[3]; __constant__ int cKernelSizesBack[3]; __constant__ int cHalfWindowSize_arrBack[3]; __constant__ float cGaussianKernel_xBack[256]; __constant__ float cGaussianKernel_yBack[256]; __constant__ float cGaussianKernel_zBack[256]; __constant__ float cXDistanceSquaredBack[256]; __constant__ float cYDistanceSquaredBack[256]; __constant__ float cZDistanceSquaredBack[256]; __constant__ float cColorExponentConstantBack; __constant__ float cSigma_xBack; __constant__ float cSigma_yBack; __constant__ float cSigma_zBack; __constant__ float cColorSigmaBack; template <typename scalar_t, int C> __global__ void JointBilateralFilterCudaKernel3DBackward( scalar_t* gradientInputTensor, scalar_t* gradientGuidanceTensor, scalar_t* gradientOutputTensor, scalar_t* inputTensor, scalar_t* guidanceTensor, scalar_t* outputTensor, scalar_t* outputWeightsTensor, scalar_t* dO_dz_ki) { int homeOffset = blockIdx.x * blockDim.x + threadIdx.x; int batchOffset = blockIdx.y * cBatchStrideBack; if (homeOffset >= cColorStrideBack) return; int homeX = homeOffset / cStridesBack[0]; int homeY = (homeOffset - homeX * cStridesBack[0]) / cStridesBack[1]; int homeZ = (homeOffset - homeX * cStridesBack[0] - homeY * cStridesBack[1]) / cStridesBack[2]; int homeIndex[] = {homeX, homeY, homeZ}; // Zero kernel aggregates. scalar_t valueSumGuidance = 0; scalar_t valueSumInput = 0; for (int kernelX = 0; kernelX < cKernelSizesBack[0]; kernelX++) { int neighbourX = max(0, min(homeX + (kernelX - cHalfWindowSize_arrBack[0]), cSizesBack[0] - 1)); scalar_t gaussianX = cGaussianKernel_xBack[kernelX]; for (int kernelY = 0; kernelY < cKernelSizesBack[1]; kernelY++) { int neighbourY = max(0, min(homeY + (kernelY - cHalfWindowSize_arrBack[1]), cSizesBack[1] - 1)); scalar_t gaussianY = cGaussianKernel_yBack[kernelY]; for (int kernelZ = 0; kernelZ < cKernelSizesBack[2]; kernelZ++) { int neighbourZ = max(0, min(homeZ + (kernelZ - cHalfWindowSize_arrBack[2]), cSizesBack[2] - 1)); scalar_t gaussianZ = cGaussianKernel_zBack[kernelZ]; int neighbourOffset = neighbourX * cStridesBack[0] + neighbourY * cStridesBack[1] + neighbourZ; bool flagNotClamped = true; int kernelIndex[] = {kernelX, kernelY, kernelZ}; int dimensions = 3; // Must equal the number of spatial dimensions. for (int i = 0; i < dimensions; i++) { int HalfWindowSizeBack = cHalfWindowSize_arrBack[i]; // Define constant memory as new variable here (!!), // otherwise: cudaErrorMisalignedAddress int neighbourIndex = homeIndex[i] + kernelIndex[i] - HalfWindowSizeBack; int neighbourIndexClamped = min(cSizesBack[i] - 1, max(0, neighbourIndex)); if (neighbourIndex != neighbourIndexClamped) { flagNotClamped = false; } } scalar_t colorDistance = 0; scalar_t colorDistanceSquared = 0; #pragma unroll for (int c = 0; c < C; c++) { scalar_t a = guidanceTensor[batchOffset + neighbourOffset + c * cColorStrideBack]; scalar_t b = guidanceTensor[batchOffset + homeOffset + c * cColorStrideBack]; // Be careful: Here it is (Z_k - // Z_i) and not (Z_i - Z_q) scalar_t diff = a - b; colorDistance += diff; // Do not take the absolute value here. Be careful with the signs. colorDistanceSquared += diff * diff; } scalar_t spatialWeight = gaussianX * gaussianY * gaussianZ; scalar_t colorWeight = exp(cColorExponentConstantBack * colorDistanceSquared); scalar_t totalWeight = spatialWeight * colorWeight; // Aggregating values. Only do this if flagNotClamped: Pixels outside the image are disregarded. if (flagNotClamped) { scalar_t filter_kernel_guidance_back; #pragma unroll for (int c = 0; c < C; c++) { // Distinguish cases for k!=i (calculation is done here) // and k==i (partial derivatives are precalculated). // If statement replaces center element of neighborhood/kernel. if (kernelX != cHalfWindowSize_arrBack[0] || kernelY != cHalfWindowSize_arrBack[1] || kernelZ != cHalfWindowSize_arrBack[2]) { filter_kernel_guidance_back = -(1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) * outputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] * totalWeight * colorDistance / (cColorSigmaBack * cColorSigmaBack) + (1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) * totalWeight * (inputTensor[batchOffset + homeOffset + c * cColorStrideBack] * colorDistance / (cColorSigmaBack * cColorSigmaBack)); // inputTensorData[homeOffset] !!, no +1!! } else { filter_kernel_guidance_back = dO_dz_ki[batchOffset + homeOffset + c * cColorStrideBack]; } valueSumGuidance += gradientInputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] * filter_kernel_guidance_back; valueSumInput += gradientInputTensor[batchOffset + neighbourOffset + c * cColorStrideBack] * (1 / outputWeightsTensor[batchOffset + neighbourOffset + c * cColorStrideBack]) * totalWeight; } } } } } #pragma unroll for (int c = 0; c < C; c++) { gradientGuidanceTensor[batchOffset + homeOffset + c * cColorStrideBack] = valueSumGuidance; gradientOutputTensor[batchOffset + homeOffset + c * cColorStrideBack] = valueSumInput; } } template <int C, int D> void JointBilateralFilterCudaBackwardFunction( torch::Tensor gradientInputTensor, torch::Tensor gradientGuidanceTensor, torch::Tensor gradientOutputTensor, torch::Tensor inputTensor, torch::Tensor guidanceTensor, torch::Tensor outputTensor, torch::Tensor outputWeightsTensor, torch::Tensor dO_dz_ki, float sigma_x, float sigma_y, float sigma_z, float colorSigma) { // Getting tensor description. TensorDescription desc = TensorDescription(inputTensor); // Pre-calculating gaussian kernel. int windowSize_x = std::max(((int)ceil(5.0f * sigma_x) | 1), 5); // ORing last bit to ensure odd window size int windowSize_y = std::max(((int)ceil(5.0f * sigma_y) | 1), 5); // ORing last bit to ensure odd window size int windowSize_z = std::max(((int)ceil(5.0f * sigma_z) | 1), 5); // ORing last bit to ensure odd window size int halfWindowSize_x = floor(0.5f * windowSize_x); int halfWindowSize_y = floor(0.5f * windowSize_y); int halfWindowSize_z = floor(0.5f * windowSize_z); int halfWindowSize_arr[] = {halfWindowSize_x, halfWindowSize_y, halfWindowSize_z}; float spatialExpConstant_x = -1.0f / (2 * sigma_x * sigma_x); float spatialExpConstant_y = -1.0f / (2 * sigma_y * sigma_y); float spatialExpConstant_z = -1.0f / (2 * sigma_z * sigma_z); float colorExpConstant = -1.0f / (2 * colorSigma * colorSigma); int* kernelSizes = new int[desc.dimensions]; kernelSizes[0] = windowSize_x; kernelSizes[1] = windowSize_y; kernelSizes[2] = windowSize_z; auto* gaussianKernel_x = new float[windowSize_x]; auto* gaussianKernel_y = new float[windowSize_y]; auto* gaussianKernel_z = new float[windowSize_z]; auto* xDistanceSquared = new float[windowSize_x]; auto* yDistanceSquared = new float[windowSize_y]; auto* zDistanceSquared = new float[windowSize_z]; for (int i = 0; i < windowSize_x; i++) { int distance = i - halfWindowSize_x; gaussianKernel_x[i] = exp(distance * distance * spatialExpConstant_x); xDistanceSquared[i] = distance * distance; } for (int i = 0; i < windowSize_y; i++) { int distance = i - halfWindowSize_y; gaussianKernel_y[i] = exp(distance * distance * spatialExpConstant_y); yDistanceSquared[i] = distance * distance; } for (int i = 0; i < windowSize_z; i++) { int distance = i - halfWindowSize_z; gaussianKernel_z[i] = exp(distance * distance * spatialExpConstant_z); zDistanceSquared[i] = distance * distance; } // Writing constant memory. cudaMemcpyToSymbol(cBatchStrideBack, &desc.batchStride, sizeof(int)); cudaMemcpyToSymbol(cColorStrideBack, &desc.channelStride, sizeof(int)); cudaMemcpyToSymbol(cSizesBack, desc.sizes, sizeof(int) * 3); cudaMemcpyToSymbol(cStridesBack, desc.strides, sizeof(int) * 3); cudaMemcpyToSymbol(cKernelSizesBack, kernelSizes, sizeof(int) * desc.dimensions); cudaMemcpyToSymbol(cHalfWindowSize_arrBack, halfWindowSize_arr, sizeof(int) * desc.dimensions); cudaMemcpyToSymbol(cGaussianKernel_xBack, gaussianKernel_x, sizeof(float) * windowSize_x); cudaMemcpyToSymbol(cGaussianKernel_yBack, gaussianKernel_y, sizeof(float) * windowSize_y); cudaMemcpyToSymbol(cGaussianKernel_zBack, gaussianKernel_z, sizeof(float) * windowSize_z); cudaMemcpyToSymbol(cXDistanceSquaredBack, xDistanceSquared, sizeof(float) * windowSize_x); cudaMemcpyToSymbol(cYDistanceSquaredBack, yDistanceSquared, sizeof(float) * windowSize_y); cudaMemcpyToSymbol(cZDistanceSquaredBack, zDistanceSquared, sizeof(float) * windowSize_z); cudaMemcpyToSymbol(cColorExponentConstantBack, &colorExpConstant, sizeof(float)); cudaMemcpyToSymbol(cSigma_xBack, &sigma_x, sizeof(float)); cudaMemcpyToSymbol(cSigma_yBack, &sigma_y, sizeof(float)); cudaMemcpyToSymbol(cSigma_zBack, &sigma_z, sizeof(float)); cudaMemcpyToSymbol(cColorSigmaBack, &colorSigma, sizeof(float)); // cuda_error_check("Cuda check before kernel call."); #define BLOCK_SIZE 32 AT_DISPATCH_FLOATING_TYPES_AND_HALF( inputTensor.scalar_type(), "JointBilateralFilterCudaKernel3DBackward", ([&] { JointBilateralFilterCudaKernel3DBackward<scalar_t, C> <<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>( gradientInputTensor.data_ptr<scalar_t>(), gradientGuidanceTensor.data_ptr<scalar_t>(), gradientOutputTensor.data_ptr<scalar_t>(), inputTensor.data_ptr<scalar_t>(), guidanceTensor.data_ptr<scalar_t>(), outputTensor.data_ptr<scalar_t>(), outputWeightsTensor.data_ptr<scalar_t>(), dO_dz_ki.data_ptr<scalar_t>()); })); // cuda_error_check("Cuda check after kernel call."); // delete[] kernel; delete[] kernelSizes; delete[] gaussianKernel_x; delete[] gaussianKernel_y; delete[] gaussianKernel_z; delete[] xDistanceSquared; delete[] yDistanceSquared; delete[] zDistanceSquared; } // Function to choose template implementation based on dynamic, channels and dimensions std::tuple<torch::Tensor, torch::Tensor> JointBilateralFilterCudaBackward( torch::Tensor gradientInputTensor, torch::Tensor inputTensor, torch::Tensor guidanceTensor, torch::Tensor outputTensor, torch::Tensor outputWeightsTensor, torch::Tensor dO_dz_ki, float sigma_x, float sigma_y, float sigma_z, float colorSigma) { torch::Tensor gradientOutputTensor = torch::zeros_like(gradientInputTensor); torch::Tensor gradientGuidanceTensor = torch::zeros_like(gradientInputTensor); // cuda_error_check("beginning"); #define CASE(c, d) \ JointBilateralFilterCudaBackwardFunction<c, d>( \ gradientInputTensor, \ gradientGuidanceTensor, \ gradientOutputTensor, \ inputTensor, \ guidanceTensor, \ outputTensor, \ outputWeightsTensor, \ dO_dz_ki, \ sigma_x, \ sigma_y, \ sigma_z, \ colorSigma); SWITCH_AB( CASE, BF_CUDA_MAX_CHANNELS, BF_CUDA_MAX_SPATIAL_DIMENSION, gradientInputTensor.size(1), gradientInputTensor.dim() - 2); return {gradientOutputTensor, gradientGuidanceTensor}; }
11b25623850738c692b382b6a744378f99f0c82f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/CommunicatorGPU.cu * \brief Implementation of communication algorithms on the GPU */ #ifdef ENABLE_MPI #include "CommunicatorGPU.cuh" #include "CommunicatorUtilities.h" #include "ReductionOperators.h" #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/transform.h> #include "hoomd/extern/cub/hipcub/hipcub.hpp" namespace mpcd { namespace gpu { namespace kernel { //! Select a particle for migration /*! * \param d_comm_flag Communication flags to write out * \param d_pos Device array of particle positions * \param N Number of local particles * \param box Local box * * Checks for particles being out of bounds, and aggregates send flags. */ __global__ void stage_particles(unsigned int *d_comm_flag, const Scalar4 *d_pos, unsigned int N, const BoxDim box) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; const Scalar4 postype = d_pos[idx]; const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); const Scalar3 lo = box.getLo(); const Scalar3 hi = box.getHi(); unsigned int flags = 0; if (pos.x >= hi.x) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::east); else if (pos.x < lo.x) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::west); if (pos.y >= hi.y) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::north); else if (pos.y < lo.y) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::south); if (pos.z >= hi.z) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::up); else if (pos.z < lo.z) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::down); d_comm_flag[idx] = flags; } } // end namespace kernel //! Functor to select a particle for migration struct get_migrate_key : public thrust::unary_function<const unsigned int, unsigned int> { const uint3 my_pos; //!< My domain decomposition position const Index3D di; //!< Domain indexer const unsigned int mask; //!< Mask of allowed directions const unsigned int *cart_ranks; //!< Rank lookup table //! Constructor /*! * \param _my_pos Domain decomposition position * \param _di Domain indexer * \param _mask Mask of allowed directions * \param _cart_ranks Rank lookup table */ get_migrate_key(const uint3 _my_pos, const Index3D _di, const unsigned int _mask, const unsigned int *_cart_ranks) : my_pos(_my_pos), di(_di), mask(_mask), cart_ranks(_cart_ranks) { } //! Generate key for a sent particle /*! * \param element Particle data being sent */ __device__ __forceinline__ unsigned int operator()(const mpcd::detail::pdata_element& element) { const unsigned int flags = element.comm_flag; int ix, iy, iz; ix = iy = iz = 0; if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::east)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::east))) ix = 1; else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::west)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::west))) ix = -1; if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::north)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::north))) iy = 1; else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::south)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::south))) iy = -1; if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::up)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::up))) iz = 1; else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::down)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::down))) iz = -1; int i = my_pos.x; int j = my_pos.y; int k = my_pos.z; i += ix; if (i == (int)di.getW()) i = 0; else if (i < 0) i += di.getW(); j += iy; if (j == (int)di.getH()) j = 0; else if (j < 0) j += di.getH(); k += iz; if (k == (int)di.getD()) k = 0; else if (k < 0) k += di.getD(); return cart_ranks[di(i,j,k)]; } }; } // end namespace gpu } // end namespace mpcd /*! * \param d_comm_flag Communication flags to write out * \param d_pos Device array of particle positions * \param N Number of local particles * \param box Local box * * \returns Accumulated communication flags of all particles */ hipError_t mpcd::gpu::stage_particles(unsigned int *d_comm_flag, const Scalar4 *d_pos, const unsigned int N, const BoxDim& box, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_particles); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::stage_particles), dim3(grid), dim3(run_block_size), 0, 0, d_comm_flag, d_pos, N, box); return hipSuccess; } /*! * \param d_sendbuf Particle data buffer to sort * \param d_neigh_send Neighbor ranks that particles are being sent to (output) * \param d_num_send Number of particles being sent to each neighbor * \param d_tmp_keys Temporary array (size \a Nsend) used for sorting * \param grid_pos Grid position of the rank * \param di Domain decomposition indexer * \param mask Sending mask for the current stage * \param d_cart_ranks Cartesian array of domains * \param Nsend Number of particles in send buffer * * \returns The number of unique neighbor ranks to send to * * The communication flags in \a d_sendbuf are first transformed into a destination * rank (see mpcd::gpu::get_migrate_key). The send buffer is then sorted using * the destination rank as the key. Run-length encoding is then performed to * determine the number of particles going to each destination rank, and how * many ranks will be sent to. */ size_t mpcd::gpu::sort_comm_send_buffer(mpcd::detail::pdata_element *d_sendbuf, unsigned int *d_neigh_send, unsigned int *d_num_send, unsigned int *d_tmp_keys, const uint3 grid_pos, const Index3D& di, const unsigned int mask, const unsigned int *d_cart_ranks, const unsigned int Nsend) { // transform extracted communication flags into destination rank thrust::device_ptr<mpcd::detail::pdata_element> sendbuf(d_sendbuf); thrust::device_ptr<unsigned int> keys(d_tmp_keys); thrust::transform(sendbuf, sendbuf + Nsend, keys, mpcd::gpu::get_migrate_key(grid_pos, di, mask, d_cart_ranks)); // sort the destination ranks thrust::sort_by_key(keys, keys + Nsend, sendbuf); // run length encode to get the number going to each rank thrust::device_ptr<unsigned int> neigh_send(d_neigh_send); thrust::device_ptr<unsigned int> num_send(d_num_send); size_t num_neigh = thrust::reduce_by_key(keys, keys + Nsend, thrust::constant_iterator<int>(1), neigh_send, num_send).first - neigh_send; return num_neigh; } /*! * \param d_req_flags Reduced requested communication flags (output) * \param d_tmp Temporary storage for reduction * \param tmp_bytes Number of temporary storage bytes requested * \param d_comm_flags Communication flags to reduce * \param N Number of local particles * * Bitwise OR reduction is performed on the communication flags to determine * requested migration direction. * * \note This function must be called \b twice. The first call sizes the temporary * arrays. The caller must then allocate the necessary temporary storage, and then * call again to perform the reduction. */ void mpcd::gpu::reduce_comm_flags(unsigned int *d_req_flags, void *d_tmp, size_t& tmp_bytes, const unsigned int *d_comm_flags, const unsigned int N) { mpcd::ops::BitwiseOr bit_or; hipcub::DeviceReduce::Reduce(d_tmp, tmp_bytes, d_comm_flags, d_req_flags, N, bit_or, (unsigned int)0); } namespace mpcd { namespace gpu { //! Wrap a particle in a pdata_element struct wrap_particle_op : public thrust::unary_function<const mpcd::detail::pdata_element, mpcd::detail::pdata_element> { const BoxDim box; //!< The box for which we are applying boundary conditions //! Constructor /*! * \param _box Shifted simulation box for wrapping */ wrap_particle_op(const BoxDim _box) : box(_box) { } //! Wrap position information inside particle data element /*! * \param p Particle data element * \returns The particle data element with wrapped coordinates */ __device__ mpcd::detail::pdata_element operator()(const mpcd::detail::pdata_element p) { mpcd::detail::pdata_element ret = p; int3 image = make_int3(0,0,0); box.wrap(ret.pos, image); return ret; } }; } // end namespace gpu } // end namespace mpcd /*! * \param n_recv Number of particles in buffer * \param d_in Buffer of particle data elements * \param box Box for which to apply boundary conditions */ void mpcd::gpu::wrap_particles(const unsigned int n_recv, mpcd::detail::pdata_element *d_in, const BoxDim& box) { // Wrap device ptr thrust::device_ptr<mpcd::detail::pdata_element> in_ptr(d_in); // Apply box wrap to input buffer thrust::transform(in_ptr, in_ptr + n_recv, in_ptr, mpcd::gpu::wrap_particle_op(box)); } #endif // ENABLE_MPI
11b25623850738c692b382b6a744378f99f0c82f.cu
// Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/CommunicatorGPU.cu * \brief Implementation of communication algorithms on the GPU */ #ifdef ENABLE_MPI #include "CommunicatorGPU.cuh" #include "CommunicatorUtilities.h" #include "ReductionOperators.h" #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/transform.h> #include "hoomd/extern/cub/cub/device/device_reduce.cuh" namespace mpcd { namespace gpu { namespace kernel { //! Select a particle for migration /*! * \param d_comm_flag Communication flags to write out * \param d_pos Device array of particle positions * \param N Number of local particles * \param box Local box * * Checks for particles being out of bounds, and aggregates send flags. */ __global__ void stage_particles(unsigned int *d_comm_flag, const Scalar4 *d_pos, unsigned int N, const BoxDim box) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; const Scalar4 postype = d_pos[idx]; const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); const Scalar3 lo = box.getLo(); const Scalar3 hi = box.getHi(); unsigned int flags = 0; if (pos.x >= hi.x) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::east); else if (pos.x < lo.x) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::west); if (pos.y >= hi.y) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::north); else if (pos.y < lo.y) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::south); if (pos.z >= hi.z) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::up); else if (pos.z < lo.z) flags |= static_cast<unsigned int>(mpcd::detail::send_mask::down); d_comm_flag[idx] = flags; } } // end namespace kernel //! Functor to select a particle for migration struct get_migrate_key : public thrust::unary_function<const unsigned int, unsigned int> { const uint3 my_pos; //!< My domain decomposition position const Index3D di; //!< Domain indexer const unsigned int mask; //!< Mask of allowed directions const unsigned int *cart_ranks; //!< Rank lookup table //! Constructor /*! * \param _my_pos Domain decomposition position * \param _di Domain indexer * \param _mask Mask of allowed directions * \param _cart_ranks Rank lookup table */ get_migrate_key(const uint3 _my_pos, const Index3D _di, const unsigned int _mask, const unsigned int *_cart_ranks) : my_pos(_my_pos), di(_di), mask(_mask), cart_ranks(_cart_ranks) { } //! Generate key for a sent particle /*! * \param element Particle data being sent */ __device__ __forceinline__ unsigned int operator()(const mpcd::detail::pdata_element& element) { const unsigned int flags = element.comm_flag; int ix, iy, iz; ix = iy = iz = 0; if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::east)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::east))) ix = 1; else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::west)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::west))) ix = -1; if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::north)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::north))) iy = 1; else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::south)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::south))) iy = -1; if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::up)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::up))) iz = 1; else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::down)) && (mask & static_cast<unsigned int>(mpcd::detail::send_mask::down))) iz = -1; int i = my_pos.x; int j = my_pos.y; int k = my_pos.z; i += ix; if (i == (int)di.getW()) i = 0; else if (i < 0) i += di.getW(); j += iy; if (j == (int)di.getH()) j = 0; else if (j < 0) j += di.getH(); k += iz; if (k == (int)di.getD()) k = 0; else if (k < 0) k += di.getD(); return cart_ranks[di(i,j,k)]; } }; } // end namespace gpu } // end namespace mpcd /*! * \param d_comm_flag Communication flags to write out * \param d_pos Device array of particle positions * \param N Number of local particles * \param box Local box * * \returns Accumulated communication flags of all particles */ cudaError_t mpcd::gpu::stage_particles(unsigned int *d_comm_flag, const Scalar4 *d_pos, const unsigned int N, const BoxDim& box, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_particles); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N / run_block_size + 1); mpcd::gpu::kernel::stage_particles<<<grid, run_block_size>>>(d_comm_flag, d_pos, N, box); return cudaSuccess; } /*! * \param d_sendbuf Particle data buffer to sort * \param d_neigh_send Neighbor ranks that particles are being sent to (output) * \param d_num_send Number of particles being sent to each neighbor * \param d_tmp_keys Temporary array (size \a Nsend) used for sorting * \param grid_pos Grid position of the rank * \param di Domain decomposition indexer * \param mask Sending mask for the current stage * \param d_cart_ranks Cartesian array of domains * \param Nsend Number of particles in send buffer * * \returns The number of unique neighbor ranks to send to * * The communication flags in \a d_sendbuf are first transformed into a destination * rank (see mpcd::gpu::get_migrate_key). The send buffer is then sorted using * the destination rank as the key. Run-length encoding is then performed to * determine the number of particles going to each destination rank, and how * many ranks will be sent to. */ size_t mpcd::gpu::sort_comm_send_buffer(mpcd::detail::pdata_element *d_sendbuf, unsigned int *d_neigh_send, unsigned int *d_num_send, unsigned int *d_tmp_keys, const uint3 grid_pos, const Index3D& di, const unsigned int mask, const unsigned int *d_cart_ranks, const unsigned int Nsend) { // transform extracted communication flags into destination rank thrust::device_ptr<mpcd::detail::pdata_element> sendbuf(d_sendbuf); thrust::device_ptr<unsigned int> keys(d_tmp_keys); thrust::transform(sendbuf, sendbuf + Nsend, keys, mpcd::gpu::get_migrate_key(grid_pos, di, mask, d_cart_ranks)); // sort the destination ranks thrust::sort_by_key(keys, keys + Nsend, sendbuf); // run length encode to get the number going to each rank thrust::device_ptr<unsigned int> neigh_send(d_neigh_send); thrust::device_ptr<unsigned int> num_send(d_num_send); size_t num_neigh = thrust::reduce_by_key(keys, keys + Nsend, thrust::constant_iterator<int>(1), neigh_send, num_send).first - neigh_send; return num_neigh; } /*! * \param d_req_flags Reduced requested communication flags (output) * \param d_tmp Temporary storage for reduction * \param tmp_bytes Number of temporary storage bytes requested * \param d_comm_flags Communication flags to reduce * \param N Number of local particles * * Bitwise OR reduction is performed on the communication flags to determine * requested migration direction. * * \note This function must be called \b twice. The first call sizes the temporary * arrays. The caller must then allocate the necessary temporary storage, and then * call again to perform the reduction. */ void mpcd::gpu::reduce_comm_flags(unsigned int *d_req_flags, void *d_tmp, size_t& tmp_bytes, const unsigned int *d_comm_flags, const unsigned int N) { mpcd::ops::BitwiseOr bit_or; cub::DeviceReduce::Reduce(d_tmp, tmp_bytes, d_comm_flags, d_req_flags, N, bit_or, (unsigned int)0); } namespace mpcd { namespace gpu { //! Wrap a particle in a pdata_element struct wrap_particle_op : public thrust::unary_function<const mpcd::detail::pdata_element, mpcd::detail::pdata_element> { const BoxDim box; //!< The box for which we are applying boundary conditions //! Constructor /*! * \param _box Shifted simulation box for wrapping */ wrap_particle_op(const BoxDim _box) : box(_box) { } //! Wrap position information inside particle data element /*! * \param p Particle data element * \returns The particle data element with wrapped coordinates */ __device__ mpcd::detail::pdata_element operator()(const mpcd::detail::pdata_element p) { mpcd::detail::pdata_element ret = p; int3 image = make_int3(0,0,0); box.wrap(ret.pos, image); return ret; } }; } // end namespace gpu } // end namespace mpcd /*! * \param n_recv Number of particles in buffer * \param d_in Buffer of particle data elements * \param box Box for which to apply boundary conditions */ void mpcd::gpu::wrap_particles(const unsigned int n_recv, mpcd::detail::pdata_element *d_in, const BoxDim& box) { // Wrap device ptr thrust::device_ptr<mpcd::detail::pdata_element> in_ptr(d_in); // Apply box wrap to input buffer thrust::transform(in_ptr, in_ptr + n_recv, in_ptr, mpcd::gpu::wrap_particle_op(box)); } #endif // ENABLE_MPI
f2ef7f0e9fd8fe5bb0f329be99eb02dd199bbc19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <Environment.h> #include <loops/transform_float.h> #include <types/types.h> #include <op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformFloatSimple(void *x, Nd4jLong *xShapeInfo, int xRank, void *params, void *z, Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { functions::transform::TransformFloat<X,Z>::template transformCuda<OpType>( x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformFloat<X,Y>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformFloat<X,Z>::transformCuda( void *vx, Nd4jLong *xShapeInfo, void *vparams, void *vz, Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto x = reinterpret_cast<X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto params = reinterpret_cast<Z*>(vparams); auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder) { for (Nd4jLong i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X, typename Y> __device__ void TransformFloat<X,Y>::transformCudaLegacy( int opNum, void *x, Nd4jLong *xShapeInfo, void *params, void *z, Nd4jLong *zShapeInfo, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(transformCuda, PARAMS(x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS); } template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformFloat<X,Z>::intermediateShaped(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { hipLaunchKernelGGL(( transformFloatSimple<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); nd4j::DebugHelper::checkErrorCode(stream, "transformFloat(...) failed"); } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformFloat, , LIBND4J_TYPES, FLOAT_TYPES); } }
f2ef7f0e9fd8fe5bb0f329be99eb02dd199bbc19.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <Environment.h> #include <loops/transform_float.h> #include <types/types.h> #include <op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformFloatSimple(void *x, Nd4jLong *xShapeInfo, int xRank, void *params, void *z, Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { functions::transform::TransformFloat<X,Z>::template transformCuda<OpType>( x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformFloat<X,Y>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformFloat<X,Z>::transformCuda( void *vx, Nd4jLong *xShapeInfo, void *vparams, void *vz, Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto x = reinterpret_cast<X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto params = reinterpret_cast<Z*>(vparams); auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder) { for (Nd4jLong i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X, typename Y> __device__ void TransformFloat<X,Y>::transformCudaLegacy( int opNum, void *x, Nd4jLong *xShapeInfo, void *params, void *z, Nd4jLong *zShapeInfo, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(transformCuda, PARAMS(x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS); } template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformFloat<X,Z>::intermediateShaped(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { transformFloatSimple<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); nd4j::DebugHelper::checkErrorCode(stream, "transformFloat(...) failed"); } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformFloat, , LIBND4J_TYPES, FLOAT_TYPES); } }
f10c63e02ffd7f3945440f698ee6efd99c67743d.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include <vector> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include "../../../../src/tree/gpu_hist/row_partitioner.cuh" #include "../../helpers.h" namespace xgboost { namespace tree { void TestSortPosition(const std::vector<int>& position_in, int left_idx, int right_idx) { dh::safe_cuda(hipSetDevice(0)); std::vector<int64_t> left_count = { std::count(position_in.begin(), position_in.end(), left_idx)}; dh::caching_device_vector<int64_t> d_left_count = left_count; dh::caching_device_vector<int> position = position_in; dh::caching_device_vector<int> position_out(position.size()); dh::caching_device_vector<RowPartitioner::RowIndexT> ridx(position.size()); thrust::sequence(ridx.begin(), ridx.end()); dh::caching_device_vector<RowPartitioner::RowIndexT> ridx_out(ridx.size()); RowPartitioner rp(0,10); rp.SortPosition( common::Span<int>(position.data().get(), position.size()), common::Span<int>(position_out.data().get(), position_out.size()), common::Span<RowPartitioner::RowIndexT>(ridx.data().get(), ridx.size()), common::Span<RowPartitioner::RowIndexT>(ridx_out.data().get(), ridx_out.size()), left_idx, right_idx, d_left_count.data().get(), nullptr); thrust::host_vector<int> position_result = position_out; thrust::host_vector<int> ridx_result = ridx_out; // Check position is sorted EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end())); // Check row indices are sorted inside left and right segment EXPECT_TRUE( std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0])); EXPECT_TRUE( std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end())); // Check key value pairs are the same for (auto i = 0ull; i < ridx_result.size(); i++) { EXPECT_EQ(position_result[i], position_in[ridx_result[i]]); } } TEST(GpuHist, SortPosition) { TestSortPosition({1, 2, 1, 2, 1}, 1, 2); TestSortPosition({1, 1, 1, 1}, 1, 2); TestSortPosition({2, 2, 2, 2}, 1, 2); TestSortPosition({1, 2, 1, 2, 3}, 1, 2); } void TestUpdatePosition() { const int kNumRows = 10; RowPartitioner rp(0, kNumRows); auto rows = rp.GetRowsHost(0); EXPECT_EQ(rows.size(), kNumRows); for (auto i = 0ull; i < kNumRows; i++) { EXPECT_EQ(rows[i], i); } // Send the first five training instances to the right node // and the second 5 to the left node rp.UpdatePosition(0, 1, 2, [=] __device__(RowPartitioner::RowIndexT ridx) { if (ridx > 4) { return 1; } else { return 2; } }); rows = rp.GetRowsHost(1); for (auto r : rows) { EXPECT_GT(r, 4); } rows = rp.GetRowsHost(2); for (auto r : rows) { EXPECT_LT(r, 5); } // Split the left node again rp.UpdatePosition(1, 3, 4, [=]__device__(RowPartitioner::RowIndexT ridx) { if (ridx < 7) { return 3 ; } return 4; }); EXPECT_EQ(rp.GetRows(3).size(), 2); EXPECT_EQ(rp.GetRows(4).size(), 3); // Check position is as expected EXPECT_EQ(rp.GetPositionHost(), std::vector<RowPartitioner::TreePositionT>({3,3,4,4,4,2,2,2,2,2})); } TEST(RowPartitioner, Basic) { TestUpdatePosition(); } void TestFinalise() { const int kNumRows = 10; RowPartitioner rp(0, kNumRows); rp.FinalisePosition([=]__device__(RowPartitioner::RowIndexT ridx, int position) { return 7; }); auto position = rp.GetPositionHost(); for(auto p:position) { EXPECT_EQ(p, 7); } } TEST(RowPartitioner, Finalise) { TestFinalise(); } void TestIncorrectRow() { RowPartitioner rp(0, 1); rp.UpdatePosition(0, 1, 2, [=]__device__ (RowPartitioner::RowIndexT ridx) { return 4; // This is not the left branch or the right branch }); } TEST(RowPartitioner, IncorrectRow) { ASSERT_DEATH({ TestIncorrectRow(); },".*"); } } // namespace tree } // namespace xgboost
f10c63e02ffd7f3945440f698ee6efd99c67743d.cu
#include <gtest/gtest.h> #include <vector> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include "../../../../src/tree/gpu_hist/row_partitioner.cuh" #include "../../helpers.h" namespace xgboost { namespace tree { void TestSortPosition(const std::vector<int>& position_in, int left_idx, int right_idx) { dh::safe_cuda(cudaSetDevice(0)); std::vector<int64_t> left_count = { std::count(position_in.begin(), position_in.end(), left_idx)}; dh::caching_device_vector<int64_t> d_left_count = left_count; dh::caching_device_vector<int> position = position_in; dh::caching_device_vector<int> position_out(position.size()); dh::caching_device_vector<RowPartitioner::RowIndexT> ridx(position.size()); thrust::sequence(ridx.begin(), ridx.end()); dh::caching_device_vector<RowPartitioner::RowIndexT> ridx_out(ridx.size()); RowPartitioner rp(0,10); rp.SortPosition( common::Span<int>(position.data().get(), position.size()), common::Span<int>(position_out.data().get(), position_out.size()), common::Span<RowPartitioner::RowIndexT>(ridx.data().get(), ridx.size()), common::Span<RowPartitioner::RowIndexT>(ridx_out.data().get(), ridx_out.size()), left_idx, right_idx, d_left_count.data().get(), nullptr); thrust::host_vector<int> position_result = position_out; thrust::host_vector<int> ridx_result = ridx_out; // Check position is sorted EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end())); // Check row indices are sorted inside left and right segment EXPECT_TRUE( std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0])); EXPECT_TRUE( std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end())); // Check key value pairs are the same for (auto i = 0ull; i < ridx_result.size(); i++) { EXPECT_EQ(position_result[i], position_in[ridx_result[i]]); } } TEST(GpuHist, SortPosition) { TestSortPosition({1, 2, 1, 2, 1}, 1, 2); TestSortPosition({1, 1, 1, 1}, 1, 2); TestSortPosition({2, 2, 2, 2}, 1, 2); TestSortPosition({1, 2, 1, 2, 3}, 1, 2); } void TestUpdatePosition() { const int kNumRows = 10; RowPartitioner rp(0, kNumRows); auto rows = rp.GetRowsHost(0); EXPECT_EQ(rows.size(), kNumRows); for (auto i = 0ull; i < kNumRows; i++) { EXPECT_EQ(rows[i], i); } // Send the first five training instances to the right node // and the second 5 to the left node rp.UpdatePosition(0, 1, 2, [=] __device__(RowPartitioner::RowIndexT ridx) { if (ridx > 4) { return 1; } else { return 2; } }); rows = rp.GetRowsHost(1); for (auto r : rows) { EXPECT_GT(r, 4); } rows = rp.GetRowsHost(2); for (auto r : rows) { EXPECT_LT(r, 5); } // Split the left node again rp.UpdatePosition(1, 3, 4, [=]__device__(RowPartitioner::RowIndexT ridx) { if (ridx < 7) { return 3 ; } return 4; }); EXPECT_EQ(rp.GetRows(3).size(), 2); EXPECT_EQ(rp.GetRows(4).size(), 3); // Check position is as expected EXPECT_EQ(rp.GetPositionHost(), std::vector<RowPartitioner::TreePositionT>({3,3,4,4,4,2,2,2,2,2})); } TEST(RowPartitioner, Basic) { TestUpdatePosition(); } void TestFinalise() { const int kNumRows = 10; RowPartitioner rp(0, kNumRows); rp.FinalisePosition([=]__device__(RowPartitioner::RowIndexT ridx, int position) { return 7; }); auto position = rp.GetPositionHost(); for(auto p:position) { EXPECT_EQ(p, 7); } } TEST(RowPartitioner, Finalise) { TestFinalise(); } void TestIncorrectRow() { RowPartitioner rp(0, 1); rp.UpdatePosition(0, 1, 2, [=]__device__ (RowPartitioner::RowIndexT ridx) { return 4; // This is not the left branch or the right branch }); } TEST(RowPartitioner, IncorrectRow) { ASSERT_DEATH({ TestIncorrectRow(); },".*"); } } // namespace tree } // namespace xgboost
2b0049ff68dd3c12377da8f8d1348e02a00c5490.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void histgramMakerKernel_SharedMemAtomics(int *d_histgram, const unsigned char *d_text, int textLength) { __shared__ int sh_histgram[256]; for (int histPos = threadIdx.x; histPos < 256; histPos += blockDim.x) sh_histgram[histPos] = 0; __syncthreads(); int stride = gridDim.x * blockDim.x; int gid = blockDim.x * blockIdx.x + threadIdx.x; for (int pos = gid; pos < textLength; pos += stride) { int ch = d_text[pos]; atomicAdd(&sh_histgram[ch], 1); } __syncthreads(); for (int histPos = threadIdx.x; histPos < 256; histPos += blockDim.x) atomicAdd(&d_histgram[histPos], sh_histgram[histPos]); }
2b0049ff68dd3c12377da8f8d1348e02a00c5490.cu
extern "C" __global__ void histgramMakerKernel_SharedMemAtomics(int *d_histgram, const unsigned char *d_text, int textLength) { __shared__ int sh_histgram[256]; for (int histPos = threadIdx.x; histPos < 256; histPos += blockDim.x) sh_histgram[histPos] = 0; __syncthreads(); int stride = gridDim.x * blockDim.x; int gid = blockDim.x * blockIdx.x + threadIdx.x; for (int pos = gid; pos < textLength; pos += stride) { int ch = d_text[pos]; atomicAdd(&sh_histgram[ch], 1); } __syncthreads(); for (int histPos = threadIdx.x; histPos < 256; histPos += blockDim.x) atomicAdd(&d_histgram[histPos], sh_histgram[histPos]); }
14efc9c9ec049fceaf05be0060b7042aeb69dc11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { Resultat[j*height + i] = Source[i*width + j]; } }
14efc9c9ec049fceaf05be0060b7042aeb69dc11.cu
#include "includes.h" __global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { Resultat[j*height + i] = Source[i*width + j]; } }
7c04ff04074938c3ff431d48150983e21385fb25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<dense_mv.hpp> using namespace livai::tts::sys; __global__ void dense_mv_add(size_t sz, float_t* src, float_t* dest) { size_t index = blockIdx.x*blockDim.x + threadIdx.x; if(index < sz) { dest[index] += src[index]; } } dense_mv::dense_mv() { } void dense_mv::init(const cnpy::NpyArray& h_kernel, const cnpy::NpyArray& h_bias) { checkCUBLAS(hipblasCreate (& handle )); // load kernel d_kernel.init(h_kernel.shape); hipMemcpy(d_kernel.ptr, h_kernel.data<float_t>(), d_kernel.size()*sizeof(float_t), hipMemcpyHostToDevice); // load bias d_bias.init(h_bias.shape); hipMemcpy(d_bias.ptr, h_bias.data<float_t>(), d_bias.size()*sizeof(float_t), hipMemcpyHostToDevice); hasbias = true; } void dense_mv::init(const cnpy::NpyArray& h_kernel) { checkCUBLAS(hipblasCreate (& handle )); // load kernel d_kernel.init(h_kernel.shape); hipMemcpy(d_kernel.ptr, h_kernel.data<float_t>(), d_kernel.size()*sizeof(float_t), hipMemcpyHostToDevice); hasbias = false; } void dense_mv::operator () (cudnnHandle_t& cudnn, const gpu_float_array& d_input, gpu_float_array& d_output) { const float alpha = 1, beta = 0; size_t m = 1; size_t k = d_input.shape[1]; size_t n = d_input.shape[0]; hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, d_kernel.ptr, m, d_input.ptr, k, &beta, d_output.ptr, m); // add bias if(hasbias) { hipLaunchKernelGGL(( dense_mv_add), dim3(1), dim3(m), 0, 0, m, d_bias.ptr, d_output.ptr); } } // free host & device memory dense_mv::~dense_mv() { hipblasDestroy ( handle ); }
7c04ff04074938c3ff431d48150983e21385fb25.cu
#include<dense_mv.hpp> using namespace livai::tts::sys; __global__ void dense_mv_add(size_t sz, float_t* src, float_t* dest) { size_t index = blockIdx.x*blockDim.x + threadIdx.x; if(index < sz) { dest[index] += src[index]; } } dense_mv::dense_mv() { } void dense_mv::init(const cnpy::NpyArray& h_kernel, const cnpy::NpyArray& h_bias) { checkCUBLAS(cublasCreate (& handle )); // load kernel d_kernel.init(h_kernel.shape); cudaMemcpy(d_kernel.ptr, h_kernel.data<float_t>(), d_kernel.size()*sizeof(float_t), cudaMemcpyHostToDevice); // load bias d_bias.init(h_bias.shape); cudaMemcpy(d_bias.ptr, h_bias.data<float_t>(), d_bias.size()*sizeof(float_t), cudaMemcpyHostToDevice); hasbias = true; } void dense_mv::init(const cnpy::NpyArray& h_kernel) { checkCUBLAS(cublasCreate (& handle )); // load kernel d_kernel.init(h_kernel.shape); cudaMemcpy(d_kernel.ptr, h_kernel.data<float_t>(), d_kernel.size()*sizeof(float_t), cudaMemcpyHostToDevice); hasbias = false; } void dense_mv::operator () (cudnnHandle_t& cudnn, const gpu_float_array& d_input, gpu_float_array& d_output) { const float alpha = 1, beta = 0; size_t m = 1; size_t k = d_input.shape[1]; size_t n = d_input.shape[0]; cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, d_kernel.ptr, m, d_input.ptr, k, &beta, d_output.ptr, m); // add bias if(hasbias) { dense_mv_add<<<1, m>>>(m, d_bias.ptr, d_output.ptr); } } // free host & device memory dense_mv::~dense_mv() { cublasDestroy ( handle ); }
427915b8ebdfd94a20e8e36315509805290cef6e.hip
// !!! This is a file automatically generated by hipify!!! //Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> #include <stdio.h> #include <hip/hip_runtime.h> /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ //#define DEBUGGING1 // bitmasks #define b0 0x00000001 #define b1 0x00000002 #define b2 0x00000004 #define b3 0x00000008 #define b4 0x00000010 #define b5 0x00000020 #define b6 0x00000040 #define b7 0x00000080 #define b8 0x00000100 #define b9 0x00000200 #define b10 0x00000400 #define b11 0x00000800 #define b12 0x00001000 #define b13 0x00002000 #define b14 0x00004000 #define b15 0x00008000 #define b16 0x00010000 #define b17 0x00020000 #define b18 0x00040000 #define b19 0x00080000 #define b20 0x00100000 #define b21 0x00200000 #define b22 0x00400000 #define b23 0x00800000 #define b24 0x01000000 #define b25 0x02000000 #define b26 0x04000000 #define b27 0x08000000 #define b28 0x10000000 #define b29 0x20000000 #define b30 0x40000000 #define b31 0x80000000 unsigned int bMasks[32] = { b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31 }; __global__ void lsbHisto_kernel(unsigned int* d_binHistogram, unsigned int numBins, unsigned int* const d_inVals, const size_t numElems) { //1) loop from 0 to biggest value; //2) perform check to see if the value is unsigned int bMasks[32] = { b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31 }; extern __shared__ unsigned int s_vals[]; if (numBins > 32) numBins = 32; unsigned int tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { s_vals[tIdx] = d_inVals[gIdx]; for (int i = 0; i < numBins; i++) { if ((bMasks[i]&s_vals[tIdx]) == bMasks[i]) { atomicAdd(&(d_binHistogram[i]), 1); } } } #ifdef DEBUGGING2 if (blockIdx.x < 1) { printf("d_inVals[ %d ] = %d == %d , checkval j= %d .\n", gIdx, s_vals[tIdx], d_inVals[gIdx], j); printf("%d - %d , %d, %d ==> %d, %d, %d .\n", s_vals[tIdx], bMasks[0], bMasks[1], bMasks[2], s_vals[tIdx] & bMasks[0], s_vals[tIdx] & bMasks[1], s_vals[tIdx] & bMasks[2]); } #endif } __global__ void incSumScan_kernel(unsigned int* d_outVals, unsigned int* d_inVals, size_t numVals) { unsigned int tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ unsigned int s_incScan[]; if (gIdx >= numVals) return; s_incScan[tIdx] = d_inVals[tIdx]; __syncthreads(); for (int offset = 1; offset <= numVals; offset = offset * 2) { unsigned int temp = s_incScan[tIdx]; unsigned int neighbor = 0; if (tIdx >= offset ) { neighbor = s_incScan[tIdx - offset]; __syncthreads(); s_incScan[tIdx] = temp + neighbor; } __syncthreads(); } d_outVals[tIdx] = s_incScan[tIdx]; } //first part of inclusive sum scan of an array larger than a single block. __global__ void incSumScanB1_kernel(unsigned int* d_outVals, unsigned int* d_inVals, size_t numVals, unsigned int* d_blockOffset, unsigned int valOffset) { unsigned int tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ unsigned int s_incScan[]; if (gIdx >= numVals) return; //if it is the first element of a block then we need to add the offset to it. s_incScan[tIdx] = (tIdx == 0)? d_inVals[gIdx] + valOffset: d_inVals[gIdx]; // if (tIdx == 0) printf("gIdx = %d, d_inVals[ %d ] = %d , s_incScan[ %d ] = %d , valOffset = %d .\n", gIdx, gIdx, d_inVals[gIdx], tIdx, s_incScan[tIdx], valOffset); __syncthreads(); //for (int offset = 1; offset <= numVals; offset = offset * 2) for (int offset = 1; offset <= blockDim.x; offset = offset * 2) { unsigned int temp = s_incScan[tIdx]; unsigned int neighbor = 0; if (tIdx >= offset) { neighbor = s_incScan[tIdx - offset]; __syncthreads(); s_incScan[tIdx] = temp + neighbor; } __syncthreads(); } d_outVals[gIdx] = s_incScan[tIdx]; //now set the cumulative sum for this block in the the blockoffsetarray if ((tIdx + 1) == blockDim.x) { if ((blockIdx.x + 1) < gridDim.x) { d_blockOffset[blockIdx.x + 1] = s_incScan[tIdx]; //this will still need to be summed with other blocks } } // if (gIdx < 10 || gIdx > (numVals - 10)) printf("gIdx = %d, d_inVals[ %d ] = %d, d_outvals[ %d ] = %d , s_incScan[ %d ] = %d , valOffset = %d .\n", // gIdx, gIdx, d_inVals[gIdx], gIdx, d_outVals[gIdx], tIdx, s_incScan[tIdx], valOffset); } //finishes the multi-part sumScan of an array larger than blockSize - __global__ void incSumScanB2_kernel(unsigned int* d_outVals, unsigned int* d_inVals, size_t numVals, unsigned int* d_blockOffset) { // unsigned int tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ unsigned int s_incScan[]; if (gIdx >= numVals) return; d_outVals[gIdx] = ( blockIdx.x > 0) ? d_inVals[gIdx] + d_blockOffset[blockIdx.x]: d_inVals[gIdx]; } __global__ void arraySet_kernel(unsigned int* d_vals, unsigned int value, size_t num_vals) { // tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < num_vals) d_vals[gIdx] = value; } __global__ void getPredicate_kernel(unsigned int * d_inVal, unsigned int * d_predVal, unsigned int numElems, unsigned int bitMask) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { // if bitmask matches inputvale then assign 1 to the position otherwise set to 0 // we'll need to run an inclusive scan later to get the position d_predVal[gIdx] = ((d_inVal[gIdx] & bitMask) == bitMask) ? 1 : 0; //d_npredVal[gIdx] = ((d_inVal[gIdx] & bitMask) == bitMask) ? 0 : 1; } } __global__ void swapLocations_kernel(unsigned int * d_outVals, unsigned int * d_inVals, unsigned int * d_outPos, unsigned int * d_inPos, unsigned int * d_swapPred, /*unsigned int * d_swapnPred,*/ unsigned int numElems, unsigned int bitmask) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int offset = d_swapPred[numElems-1]; int swapmove; __syncthreads(); if (gIdx < numElems) { //unsigned int swapmove = ((d_inVals[gIdx] & bitmask) == bitmask) ? d_swapPred[gIdx]-1 : (gIdx - (d_swapPred[gIdx]-1))+offset-1; if ((d_inVals[gIdx] & bitmask) == bitmask) { swapmove = d_swapPred[gIdx] - 1; //if (gIdx < 10 || gIdx >(numElems - 10)) printf("gIdx = %d, swapmove = %d .\n", gIdx, swapmove); //if (swapmove < 0) swapmove = 0; } else { swapmove = (gIdx - (d_swapPred[gIdx] - 1)) + offset-1; //if (gIdx < 10 || gIdx >(numElems - 10)) printf("gIdx = %d, swapmove = %d, offset = %d .\n", gIdx, swapmove, offset); //if (swapmove < 0) swapmove = 0; } d_outVals[swapmove] = d_inVals[gIdx]; d_outPos[swapmove] = d_inPos[gIdx]; // if (gIdx < 10 || gIdx > (numElems - 10)) { // printf("gIdx = %d , bitmask = %08x , offset= %d, swapmove = %d , d_inVals[gIdx] = %d, d_inPos[gIdx] = %d .\n ", // gIdx, bitmask, offset, swapmove, d_inVals[gIdx], d_inPos[gIdx]); // } } } __global__ void swapVals_kernel(unsigned int * d_newArray, unsigned int * d_oldArray, unsigned int numElems) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { d_newArray[gIdx] = d_oldArray[gIdx]; } } __global__ void reverseSort_kernel(unsigned int * d_newArray, unsigned int * d_oldArray, unsigned int numElems) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { d_newArray[gIdx] = d_oldArray[(numElems - 1)- gIdx]; } } void your_sort(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) { //inputPos holds original position. //outputPos holds the location when resorted by Val #ifdef DEBUGGING1 std::cout << "Sort of " << numElems << " Elements through " << 8*sizeof(unsigned int)<< " loops." << std::endl; #endif unsigned int threadsperblock = 32; //Assign Histogram in device unsigned int *d_binHistogram; //for 32bit integers unsigned int numBins = 32; checkCudaErrors(hipMalloc(&d_binHistogram, numBins*sizeof(unsigned int))); //set histogram values to zero - faster than memcpy? dim3 blockSize = { threadsperblock, 1, 1 }; dim3 gridSize = { (numBins + blockSize.x - 1) / (blockSize.x), 1, 1 }; arraySet_kernel << <gridSize, blockSize >> > (d_binHistogram, (unsigned int)0, numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); blockSize = { threadsperblock, 1, 1 }; gridSize = { ((unsigned int)numElems + blockSize.x - 1) / blockSize.x, 1, 1 }; // std::cout << "blocks = " << gridSize.x << " when using " << blockSize.x << " threads per block ." << std::endl; lsbHisto_kernel << <gridSize, blockSize, blockSize.x*sizeof(unsigned int) >> > (d_binHistogram, numBins, d_inputVals, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); unsigned int * h_binHistogram = (unsigned int*)std::malloc(numBins * sizeof(unsigned int)); checkCudaErrors(hipMemcpy(h_binHistogram, d_binHistogram, numBins * sizeof(unsigned int), hipMemcpyDeviceToHost)); #ifdef DEBUGGING1 std::cout << "h_binHistogram [ "; for (unsigned int i = 0; i < numBins-1; i++) { std::cout << h_binHistogram[i] << ","; } std::cout << h_binHistogram[numBins-1] << "]" << std::endl; #endif // don't need to add these up - only do one at a time. // incSumScan_kernel<< < 1, numBins, numBins * sizeof(unsigned int) >> > (d_binHistogram, d_binHistogram, numBins); // hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); #ifdef DEBUGGING1 // h_binHistogram = (unsigned int*)std::malloc(numBins * sizeof(unsigned int)); // checkCudaErrors(hipMemcpy(h_binHistogram, d_binHistogram, numBins * sizeof(unsigned int), hipMemcpyDeviceToHost)); // std::cout << "h_binHistogram [ "; // for (unsigned int i = 0; i < numBins - 1; i++) { // std::cout << h_binHistogram[i] << ","; // } // std::cout << h_binHistogram[numBins - 1] << "]" << std::endl; // free(h_binHistogram); #endif threadsperblock = 1024; blockSize = { threadsperblock, 1, 1 }; gridSize = { ((unsigned int)numElems + blockSize.x - 1) / blockSize.x, 1, 1 }; // std::cout << "Doing inclusive sumscan in " << gridSize.x << " blocks of " << blockSize.x << " threads." << std::endl; unsigned int * d_blockOffsets; checkCudaErrors(hipMalloc(&d_blockOffsets, gridSize.x * sizeof(unsigned int))); unsigned int * d_predicates; //, * d_npredicates; checkCudaErrors(hipMalloc(&d_predicates, numElems * sizeof(unsigned int))); //checkCudaErrors(hipMalloc(&d_npredicates, numElems * sizeof(unsigned int))); //for (int maskPtr = 0; maskPtr < 32; maskPtr++) //should be to 32 for (int maskPtr = 0; maskPtr < 32; maskPtr++) //should be to 32 { if (h_binHistogram[maskPtr] > 0) //don't bother if no elements to be sorted - everything will stay the same { //rad_sort(d_inputVals, d_outputVals, d_inputPos, d_outputPos, d_predicates, d_npredicates, numElems); // is npredicate == gIdx - d_predicates[gIdx] + d_binHistogram[bMasks[maskPtr]] arraySet_kernel << <1, gridSize>> > (d_blockOffsets, (unsigned int) 0, gridSize.x); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // run predicate on input vals and put result in outputPos getPredicate_kernel << <gridSize, blockSize >> > (d_inputVals, d_predicates, numElems, bMasks[maskPtr]); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // run inclusive scans on each block, putting Offset total for that block in d_blockOffsets incSumScanB1_kernel << < gridSize, blockSize, blockSize.x * sizeof(unsigned int) >> > (d_predicates, d_predicates, numElems, d_blockOffsets, 0); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // run inclusive scan on d_blockoffsets incSumScan_kernel << < 1, gridSize, gridSize.x * sizeof(unsigned int) >> > (d_blockOffsets, d_blockOffsets, gridSize.x); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //finish the sumscan accounting for all blocks incSumScanB2_kernel << < gridSize, blockSize >> > (d_predicates, d_predicates, numElems, d_blockOffsets); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); #ifdef DEBUGGING1 unsigned int * h_blockOffsets = (unsigned int*)std::malloc(gridSize.x * sizeof(unsigned int)); checkCudaErrors(hipMemcpy(h_blockOffsets, d_blockOffsets, gridSize.x * sizeof(unsigned int), hipMemcpyDeviceToHost)); std::cout << "h_blockOffsets [ "; for (unsigned int i = 0; i < gridSize.x - 1; i++) { std::cout << h_blockOffsets[i] << ","; } std::cout << h_blockOffsets[gridSize.x - 1] << "]" << std::endl; free(h_blockOffsets); #endif //do the gathering moving values and positions into new locations on the output arrays swapLocations_kernel << < gridSize, blockSize >> > (d_outputVals, d_inputVals, d_outputPos, d_inputPos, d_predicates, numElems, bMasks[maskPtr]); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //now move all them back to input locations so we can do it again on next loop swapVals_kernel << < gridSize, blockSize >> > (d_inputVals, d_outputVals, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); swapVals_kernel << < gridSize, blockSize >> > (d_inputPos, d_outputPos, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // std::cout << "Got to end of loop " << maskPtr << std::endl; } else { // printf("skipping loop %d because there are no matches on this bitmask.\n", maskPtr); } } threadsperblock = 1024; blockSize = { threadsperblock, 1, 1 }; gridSize = { ((unsigned int)numElems + blockSize.x - 1) / blockSize.x, 1, 1 }; //I may have sorted the wrong way! reverseSort_kernel << < gridSize, blockSize >> > (d_outputPos, d_inputPos, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); reverseSort_kernel << < gridSize, blockSize >> > (d_outputVals, d_inputVals, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); #ifdef DEBUGGING1 unsigned int * h_Vals = (unsigned int*)std::malloc(numElems * sizeof(unsigned int)); checkCudaErrors(hipMemcpy(h_Vals, d_outputVals, numElems * sizeof(unsigned int), hipMemcpyDeviceToHost)); unsigned int * h_Poss = (unsigned int*)std::malloc(numElems * sizeof(unsigned int)); checkCudaErrors(hipMemcpy(h_Poss, d_outputPos, numElems * sizeof(unsigned int), hipMemcpyDeviceToHost)); std::cout << "Pos, Val, OrigPos \n"; for (unsigned int i = 0; i < numElems; i++) { std::cout << i <<","<< h_Vals[i] << "," << h_Poss[i] << "," << std::endl; } free(h_Vals); free(h_Poss); #endif free(h_binHistogram); checkCudaErrors(hipFree(d_binHistogram)); checkCudaErrors(hipFree(d_blockOffsets)); }
427915b8ebdfd94a20e8e36315509805290cef6e.cu
//Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> #include <stdio.h> #include <cuda_runtime.h> /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ //#define DEBUGGING1 // bitmasks #define b0 0x00000001 #define b1 0x00000002 #define b2 0x00000004 #define b3 0x00000008 #define b4 0x00000010 #define b5 0x00000020 #define b6 0x00000040 #define b7 0x00000080 #define b8 0x00000100 #define b9 0x00000200 #define b10 0x00000400 #define b11 0x00000800 #define b12 0x00001000 #define b13 0x00002000 #define b14 0x00004000 #define b15 0x00008000 #define b16 0x00010000 #define b17 0x00020000 #define b18 0x00040000 #define b19 0x00080000 #define b20 0x00100000 #define b21 0x00200000 #define b22 0x00400000 #define b23 0x00800000 #define b24 0x01000000 #define b25 0x02000000 #define b26 0x04000000 #define b27 0x08000000 #define b28 0x10000000 #define b29 0x20000000 #define b30 0x40000000 #define b31 0x80000000 unsigned int bMasks[32] = { b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31 }; __global__ void lsbHisto_kernel(unsigned int* d_binHistogram, unsigned int numBins, unsigned int* const d_inVals, const size_t numElems) { //1) loop from 0 to biggest value; //2) perform check to see if the value is unsigned int bMasks[32] = { b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31 }; extern __shared__ unsigned int s_vals[]; if (numBins > 32) numBins = 32; unsigned int tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { s_vals[tIdx] = d_inVals[gIdx]; for (int i = 0; i < numBins; i++) { if ((bMasks[i]&s_vals[tIdx]) == bMasks[i]) { atomicAdd(&(d_binHistogram[i]), 1); } } } #ifdef DEBUGGING2 if (blockIdx.x < 1) { printf("d_inVals[ %d ] = %d == %d , checkval j= %d .\n", gIdx, s_vals[tIdx], d_inVals[gIdx], j); printf("%d - %d , %d, %d ==> %d, %d, %d .\n", s_vals[tIdx], bMasks[0], bMasks[1], bMasks[2], s_vals[tIdx] & bMasks[0], s_vals[tIdx] & bMasks[1], s_vals[tIdx] & bMasks[2]); } #endif } __global__ void incSumScan_kernel(unsigned int* d_outVals, unsigned int* d_inVals, size_t numVals) { unsigned int tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ unsigned int s_incScan[]; if (gIdx >= numVals) return; s_incScan[tIdx] = d_inVals[tIdx]; __syncthreads(); for (int offset = 1; offset <= numVals; offset = offset * 2) { unsigned int temp = s_incScan[tIdx]; unsigned int neighbor = 0; if (tIdx >= offset ) { neighbor = s_incScan[tIdx - offset]; __syncthreads(); s_incScan[tIdx] = temp + neighbor; } __syncthreads(); } d_outVals[tIdx] = s_incScan[tIdx]; } //first part of inclusive sum scan of an array larger than a single block. __global__ void incSumScanB1_kernel(unsigned int* d_outVals, unsigned int* d_inVals, size_t numVals, unsigned int* d_blockOffset, unsigned int valOffset) { unsigned int tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ unsigned int s_incScan[]; if (gIdx >= numVals) return; //if it is the first element of a block then we need to add the offset to it. s_incScan[tIdx] = (tIdx == 0)? d_inVals[gIdx] + valOffset: d_inVals[gIdx]; // if (tIdx == 0) printf("gIdx = %d, d_inVals[ %d ] = %d , s_incScan[ %d ] = %d , valOffset = %d .\n", gIdx, gIdx, d_inVals[gIdx], tIdx, s_incScan[tIdx], valOffset); __syncthreads(); //for (int offset = 1; offset <= numVals; offset = offset * 2) for (int offset = 1; offset <= blockDim.x; offset = offset * 2) { unsigned int temp = s_incScan[tIdx]; unsigned int neighbor = 0; if (tIdx >= offset) { neighbor = s_incScan[tIdx - offset]; __syncthreads(); s_incScan[tIdx] = temp + neighbor; } __syncthreads(); } d_outVals[gIdx] = s_incScan[tIdx]; //now set the cumulative sum for this block in the the blockoffsetarray if ((tIdx + 1) == blockDim.x) { if ((blockIdx.x + 1) < gridDim.x) { d_blockOffset[blockIdx.x + 1] = s_incScan[tIdx]; //this will still need to be summed with other blocks } } // if (gIdx < 10 || gIdx > (numVals - 10)) printf("gIdx = %d, d_inVals[ %d ] = %d, d_outvals[ %d ] = %d , s_incScan[ %d ] = %d , valOffset = %d .\n", // gIdx, gIdx, d_inVals[gIdx], gIdx, d_outVals[gIdx], tIdx, s_incScan[tIdx], valOffset); } //finishes the multi-part sumScan of an array larger than blockSize - __global__ void incSumScanB2_kernel(unsigned int* d_outVals, unsigned int* d_inVals, size_t numVals, unsigned int* d_blockOffset) { // unsigned int tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ unsigned int s_incScan[]; if (gIdx >= numVals) return; d_outVals[gIdx] = ( blockIdx.x > 0) ? d_inVals[gIdx] + d_blockOffset[blockIdx.x]: d_inVals[gIdx]; } __global__ void arraySet_kernel(unsigned int* d_vals, unsigned int value, size_t num_vals) { // tIdx = threadIdx.x; unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < num_vals) d_vals[gIdx] = value; } __global__ void getPredicate_kernel(unsigned int * d_inVal, unsigned int * d_predVal, unsigned int numElems, unsigned int bitMask) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { // if bitmask matches inputvale then assign 1 to the position otherwise set to 0 // we'll need to run an inclusive scan later to get the position d_predVal[gIdx] = ((d_inVal[gIdx] & bitMask) == bitMask) ? 1 : 0; //d_npredVal[gIdx] = ((d_inVal[gIdx] & bitMask) == bitMask) ? 0 : 1; } } __global__ void swapLocations_kernel(unsigned int * d_outVals, unsigned int * d_inVals, unsigned int * d_outPos, unsigned int * d_inPos, unsigned int * d_swapPred, /*unsigned int * d_swapnPred,*/ unsigned int numElems, unsigned int bitmask) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int offset = d_swapPred[numElems-1]; int swapmove; __syncthreads(); if (gIdx < numElems) { //unsigned int swapmove = ((d_inVals[gIdx] & bitmask) == bitmask) ? d_swapPred[gIdx]-1 : (gIdx - (d_swapPred[gIdx]-1))+offset-1; if ((d_inVals[gIdx] & bitmask) == bitmask) { swapmove = d_swapPred[gIdx] - 1; //if (gIdx < 10 || gIdx >(numElems - 10)) printf("gIdx = %d, swapmove = %d .\n", gIdx, swapmove); //if (swapmove < 0) swapmove = 0; } else { swapmove = (gIdx - (d_swapPred[gIdx] - 1)) + offset-1; //if (gIdx < 10 || gIdx >(numElems - 10)) printf("gIdx = %d, swapmove = %d, offset = %d .\n", gIdx, swapmove, offset); //if (swapmove < 0) swapmove = 0; } d_outVals[swapmove] = d_inVals[gIdx]; d_outPos[swapmove] = d_inPos[gIdx]; // if (gIdx < 10 || gIdx > (numElems - 10)) { // printf("gIdx = %d , bitmask = %08x , offset= %d, swapmove = %d , d_inVals[gIdx] = %d, d_inPos[gIdx] = %d .\n ", // gIdx, bitmask, offset, swapmove, d_inVals[gIdx], d_inPos[gIdx]); // } } } __global__ void swapVals_kernel(unsigned int * d_newArray, unsigned int * d_oldArray, unsigned int numElems) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { d_newArray[gIdx] = d_oldArray[gIdx]; } } __global__ void reverseSort_kernel(unsigned int * d_newArray, unsigned int * d_oldArray, unsigned int numElems) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { d_newArray[gIdx] = d_oldArray[(numElems - 1)- gIdx]; } } void your_sort(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) { //inputPos holds original position. //outputPos holds the location when resorted by Val #ifdef DEBUGGING1 std::cout << "Sort of " << numElems << " Elements through " << 8*sizeof(unsigned int)<< " loops." << std::endl; #endif unsigned int threadsperblock = 32; //Assign Histogram in device unsigned int *d_binHistogram; //for 32bit integers unsigned int numBins = 32; checkCudaErrors(cudaMalloc(&d_binHistogram, numBins*sizeof(unsigned int))); //set histogram values to zero - faster than memcpy? dim3 blockSize = { threadsperblock, 1, 1 }; dim3 gridSize = { (numBins + blockSize.x - 1) / (blockSize.x), 1, 1 }; arraySet_kernel << <gridSize, blockSize >> > (d_binHistogram, (unsigned int)0, numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); blockSize = { threadsperblock, 1, 1 }; gridSize = { ((unsigned int)numElems + blockSize.x - 1) / blockSize.x, 1, 1 }; // std::cout << "blocks = " << gridSize.x << " when using " << blockSize.x << " threads per block ." << std::endl; lsbHisto_kernel << <gridSize, blockSize, blockSize.x*sizeof(unsigned int) >> > (d_binHistogram, numBins, d_inputVals, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); unsigned int * h_binHistogram = (unsigned int*)std::malloc(numBins * sizeof(unsigned int)); checkCudaErrors(cudaMemcpy(h_binHistogram, d_binHistogram, numBins * sizeof(unsigned int), cudaMemcpyDeviceToHost)); #ifdef DEBUGGING1 std::cout << "h_binHistogram [ "; for (unsigned int i = 0; i < numBins-1; i++) { std::cout << h_binHistogram[i] << ","; } std::cout << h_binHistogram[numBins-1] << "]" << std::endl; #endif // don't need to add these up - only do one at a time. // incSumScan_kernel<< < 1, numBins, numBins * sizeof(unsigned int) >> > (d_binHistogram, d_binHistogram, numBins); // cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); #ifdef DEBUGGING1 // h_binHistogram = (unsigned int*)std::malloc(numBins * sizeof(unsigned int)); // checkCudaErrors(cudaMemcpy(h_binHistogram, d_binHistogram, numBins * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // std::cout << "h_binHistogram [ "; // for (unsigned int i = 0; i < numBins - 1; i++) { // std::cout << h_binHistogram[i] << ","; // } // std::cout << h_binHistogram[numBins - 1] << "]" << std::endl; // free(h_binHistogram); #endif threadsperblock = 1024; blockSize = { threadsperblock, 1, 1 }; gridSize = { ((unsigned int)numElems + blockSize.x - 1) / blockSize.x, 1, 1 }; // std::cout << "Doing inclusive sumscan in " << gridSize.x << " blocks of " << blockSize.x << " threads." << std::endl; unsigned int * d_blockOffsets; checkCudaErrors(cudaMalloc(&d_blockOffsets, gridSize.x * sizeof(unsigned int))); unsigned int * d_predicates; //, * d_npredicates; checkCudaErrors(cudaMalloc(&d_predicates, numElems * sizeof(unsigned int))); //checkCudaErrors(cudaMalloc(&d_npredicates, numElems * sizeof(unsigned int))); //for (int maskPtr = 0; maskPtr < 32; maskPtr++) //should be to 32 for (int maskPtr = 0; maskPtr < 32; maskPtr++) //should be to 32 { if (h_binHistogram[maskPtr] > 0) //don't bother if no elements to be sorted - everything will stay the same { //rad_sort(d_inputVals, d_outputVals, d_inputPos, d_outputPos, d_predicates, d_npredicates, numElems); // is npredicate == gIdx - d_predicates[gIdx] + d_binHistogram[bMasks[maskPtr]] arraySet_kernel << <1, gridSize>> > (d_blockOffsets, (unsigned int) 0, gridSize.x); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // run predicate on input vals and put result in outputPos getPredicate_kernel << <gridSize, blockSize >> > (d_inputVals, d_predicates, numElems, bMasks[maskPtr]); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // run inclusive scans on each block, putting Offset total for that block in d_blockOffsets incSumScanB1_kernel << < gridSize, blockSize, blockSize.x * sizeof(unsigned int) >> > (d_predicates, d_predicates, numElems, d_blockOffsets, 0); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // run inclusive scan on d_blockoffsets incSumScan_kernel << < 1, gridSize, gridSize.x * sizeof(unsigned int) >> > (d_blockOffsets, d_blockOffsets, gridSize.x); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //finish the sumscan accounting for all blocks incSumScanB2_kernel << < gridSize, blockSize >> > (d_predicates, d_predicates, numElems, d_blockOffsets); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); #ifdef DEBUGGING1 unsigned int * h_blockOffsets = (unsigned int*)std::malloc(gridSize.x * sizeof(unsigned int)); checkCudaErrors(cudaMemcpy(h_blockOffsets, d_blockOffsets, gridSize.x * sizeof(unsigned int), cudaMemcpyDeviceToHost)); std::cout << "h_blockOffsets [ "; for (unsigned int i = 0; i < gridSize.x - 1; i++) { std::cout << h_blockOffsets[i] << ","; } std::cout << h_blockOffsets[gridSize.x - 1] << "]" << std::endl; free(h_blockOffsets); #endif //do the gathering moving values and positions into new locations on the output arrays swapLocations_kernel << < gridSize, blockSize >> > (d_outputVals, d_inputVals, d_outputPos, d_inputPos, d_predicates, numElems, bMasks[maskPtr]); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //now move all them back to input locations so we can do it again on next loop swapVals_kernel << < gridSize, blockSize >> > (d_inputVals, d_outputVals, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); swapVals_kernel << < gridSize, blockSize >> > (d_inputPos, d_outputPos, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // std::cout << "Got to end of loop " << maskPtr << std::endl; } else { // printf("skipping loop %d because there are no matches on this bitmask.\n", maskPtr); } } threadsperblock = 1024; blockSize = { threadsperblock, 1, 1 }; gridSize = { ((unsigned int)numElems + blockSize.x - 1) / blockSize.x, 1, 1 }; //I may have sorted the wrong way! reverseSort_kernel << < gridSize, blockSize >> > (d_outputPos, d_inputPos, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); reverseSort_kernel << < gridSize, blockSize >> > (d_outputVals, d_inputVals, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); #ifdef DEBUGGING1 unsigned int * h_Vals = (unsigned int*)std::malloc(numElems * sizeof(unsigned int)); checkCudaErrors(cudaMemcpy(h_Vals, d_outputVals, numElems * sizeof(unsigned int), cudaMemcpyDeviceToHost)); unsigned int * h_Poss = (unsigned int*)std::malloc(numElems * sizeof(unsigned int)); checkCudaErrors(cudaMemcpy(h_Poss, d_outputPos, numElems * sizeof(unsigned int), cudaMemcpyDeviceToHost)); std::cout << "Pos, Val, OrigPos \n"; for (unsigned int i = 0; i < numElems; i++) { std::cout << i <<","<< h_Vals[i] << "," << h_Poss[i] << "," << std::endl; } free(h_Vals); free(h_Poss); #endif free(h_binHistogram); checkCudaErrors(cudaFree(d_binHistogram)); checkCudaErrors(cudaFree(d_blockOffsets)); }
89dedabd58c0d7d87072b1f2e99e0b67b823102b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { int batch_index = blockIdx.x; xyz1 += n*3*batch_index; xyz2 += m*3*batch_index; idx += m*nsample*batch_index; pts_cnt += m*batch_index; // counting how many unique points selected in local region int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } pts_cnt[j] = cnt; } } // input: points (b,n,c), idx (b,m,nsample) // output: out (b,m,nsample,c) __global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) { int batch_index = blockIdx.x; points += n*c*batch_index; idx += m*nsample*batch_index; out += m*nsample*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { out[j*nsample*c+k*c+l] = points[ii*c+l]; } } } } // input: grad_out (b,m,nsample,c), idx (b,m,nsample), // output: grad_points (b,n,c) __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int batch_index = blockIdx.x; idx += m*nsample*batch_index; grad_out += m*nsample*c*batch_index; grad_points += n*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]); } } } } // input: k (1), distance matrix dist (b,m,n) // output: idx (b,m,n), dist_out (b,m,n) // only the top k results within n are useful __global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) { int batch_index = blockIdx.x; dist+=m*n*batch_index; outi+=m*n*batch_index; out+=m*n*batch_index; int index = threadIdx.x; int stride = blockDim.x; // copy from dist to dist_out for (int j=index;j<m;j+=stride) { for (int s=0;s<n;++s) { out[j*n+s] = dist[j*n+s]; outi[j*n+s] = s; } } float *p_dist; for (int j=index;j<m;j+=stride) { p_dist = out+j*n; // selection sort for the first k elements for (int s=0;s<k;++s) { int min=s; // find the min for (int t=s+1;t<n;++t) { if (p_dist[t]<p_dist[min]) { min = t; } } // swap min-th and i-th element if (min!=s) { float tmp = p_dist[min]; p_dist[min] = p_dist[s]; p_dist[s] = tmp; int tmpi = outi[j*n+min]; outi[j*n+min] = outi[j*n+s]; outi[j*n+s] = tmpi; } } } } void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { hipLaunchKernelGGL(( query_ball_point_gpu), dim3(b),dim3(256), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt); //hipDeviceSynchronize(); } void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) { hipLaunchKernelGGL(( selection_sort_gpu), dim3(b),dim3(256), 0, 0, b,n,m,k,dist,outi,out); //hipDeviceSynchronize(); } void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ hipLaunchKernelGGL(( group_point_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,points,idx,out); //hipDeviceSynchronize(); } void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ hipLaunchKernelGGL(( group_point_grad_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,grad_out,idx,grad_points); //group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //hipDeviceSynchronize(); }
89dedabd58c0d7d87072b1f2e99e0b67b823102b.cu
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { int batch_index = blockIdx.x; xyz1 += n*3*batch_index; xyz2 += m*3*batch_index; idx += m*nsample*batch_index; pts_cnt += m*batch_index; // counting how many unique points selected in local region int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } pts_cnt[j] = cnt; } } // input: points (b,n,c), idx (b,m,nsample) // output: out (b,m,nsample,c) __global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) { int batch_index = blockIdx.x; points += n*c*batch_index; idx += m*nsample*batch_index; out += m*nsample*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { out[j*nsample*c+k*c+l] = points[ii*c+l]; } } } } // input: grad_out (b,m,nsample,c), idx (b,m,nsample), // output: grad_points (b,n,c) __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int batch_index = blockIdx.x; idx += m*nsample*batch_index; grad_out += m*nsample*c*batch_index; grad_points += n*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]); } } } } // input: k (1), distance matrix dist (b,m,n) // output: idx (b,m,n), dist_out (b,m,n) // only the top k results within n are useful __global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) { int batch_index = blockIdx.x; dist+=m*n*batch_index; outi+=m*n*batch_index; out+=m*n*batch_index; int index = threadIdx.x; int stride = blockDim.x; // copy from dist to dist_out for (int j=index;j<m;j+=stride) { for (int s=0;s<n;++s) { out[j*n+s] = dist[j*n+s]; outi[j*n+s] = s; } } float *p_dist; for (int j=index;j<m;j+=stride) { p_dist = out+j*n; // selection sort for the first k elements for (int s=0;s<k;++s) { int min=s; // find the min for (int t=s+1;t<n;++t) { if (p_dist[t]<p_dist[min]) { min = t; } } // swap min-th and i-th element if (min!=s) { float tmp = p_dist[min]; p_dist[min] = p_dist[s]; p_dist[s] = tmp; int tmpi = outi[j*n+min]; outi[j*n+min] = outi[j*n+s]; outi[j*n+s] = tmpi; } } } } void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { query_ball_point_gpu<<<b,256>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt); //cudaDeviceSynchronize(); } void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) { selection_sort_gpu<<<b,256>>>(b,n,m,k,dist,outi,out); //cudaDeviceSynchronize(); } void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out); //cudaDeviceSynchronize(); } void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ group_point_grad_gpu<<<b,256>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //cudaDeviceSynchronize(); }
d41c585b71ed5d9af1a87ef2572d3346ec52560b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorTopK.hip" #else #include <c10/macros/Macros.h> void THCTensor_(topk)(THCState* state, THCTensor *topK, THCudaLongTensor *indices, THCTensor *input_, int64_t k, int dim, int dir, int sorted) { THAssert(topK != NULL && indices != NULL && input_ != NULL); THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input_)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); int64_t dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); int numDims = THCTensor_(nDimensionLegacyNoScalars)(state, input_); THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range"); int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input_, dim); THArgCheck(k >= 0 && k <= sliceSize, 5, "k not in range for dimension"); THCTensor *input = THCTensor_(newContiguous)(state, input_); // Build the output size, which is the dim being selected set to // size k std::vector<int64_t> topKSize = THTensor_sizesLegacyNoScalars(input); topKSize[dim] = k; THCTensor_(resize)(state, topK, topKSize, {}); THCudaLongTensor_resize(state, indices, topKSize, {}); // static_cast is required to ensure that the correct type (INDEX_T) // is provided to the kernel for the arguments. #define RUN_K(INDEX_T, DIM, DIR) \ hipLaunchKernelGGL(( gatherTopK<scalar_t, INDEX_T, DIM, DIR>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \ inputInfo, \ static_cast<INDEX_T>(sliceSize), \ static_cast<INDEX_T>(k), \ static_cast<INDEX_T>(inputSlices), \ /* The actual dimension that the k-selection is running in */ \ /* may have changed from collapseDims() */ \ static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \ topKInfo, \ static_cast<INDEX_T>(topKSlices), \ static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \ indicesInfo, \ static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])) #define RUN_DIR(INDEX_T, DIM) \ if (dir) { \ RUN_K(INDEX_T, DIM, true); \ } else { \ RUN_K(INDEX_T, DIM, false); \ } #define RUN_DIM(INDEX_T) \ if (allDims == 1) { \ RUN_DIR(INDEX_T, 1); \ } else if (allDims == 2) { \ RUN_DIR(INDEX_T, 2); \ } else if (allDims == 3) { \ RUN_DIR(INDEX_T, 3); \ } else { \ RUN_DIR(INDEX_T, -1); \ } #define RUN_T(INDEX_T) \ TensorInfo<scalar_t, INDEX_T> inputInfo = \ getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, input); \ TensorInfo<scalar_t, INDEX_T> topKInfo = \ getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, topK); \ TensorInfo<int64_t, INDEX_T> indicesInfo = \ getTensorInfo<int64_t, THCudaLongTensor, INDEX_T>(state, indices); \ \ /* We use these structures solely to find the offset to */ \ /* each slice we are operating on */ \ inputInfo.sizes[dim] = 1; \ topKInfo.sizes[dim] = 1; \ indicesInfo.sizes[dim] = 1; \ \ /* Collapse all other dims */ \ int collapseInputDim = inputInfo.collapseDims(dim); \ int collapseTopKDim = topKInfo.collapseDims(dim); \ int collapseIndicesDim = indicesInfo.collapseDims(dim); \ \ int64_t inputSlices = 1; \ for (int i = 0; i < inputInfo.dims; ++i) { \ inputSlices *= inputInfo.sizes[i]; \ } \ int64_t topKSlices = 1; \ for (int i = 0; i < topKInfo.dims; ++i) { \ topKSlices *= topKInfo.sizes[i]; \ } \ \ dim3 grid; \ if (!THC_getGridFromTiles(inputSlices, grid)) { \ THError("Slice to sort is too large"); \ } \ \ dim3 block(::min(THCRoundUp(sliceSize, (int64_t) C10_WARP_SIZE), (int64_t) 1024)); \ \ /* This is used as a template parameter to calculate indices. */ \ /* We only specialize it if all collapsed dim sizes are the */ \ /* same; otherwise, we use -1 which is the specialization */ \ /* parameter for arbitrary dimensions */ \ int allDims = inputInfo.dims; \ if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \ allDims = -1; \ } \ \ RUN_DIM(INDEX_T); if (THCTensor_nElement(state, input) > 0) { // Based on required index size, run the algorithm with the // appropriate index type if (THCTensor_canUse32BitIndexMath(state, input) && THCTensor_canUse32BitIndexMath(state, topK) && THCTensor_canUse32BitIndexMath(state, indices)) { RUN_T(uint32_t); } else { RUN_T(uint64_t); } } #undef RUN_T #undef RUN_DIM #undef RUN_DIR #undef RUN_K // Sort the results if the user wants them sorted, since our // selection routine does not ensure sorting if (sorted) { // FIXME: the k/v inplace sort along slice only works for size <= // 2048 at the moment // Workaround: // CUDA 8 uses more shared memory than 7.5 for bitonicSortKVInPlace, // and so for the double word types, // we get "too many resources requested for launch" in the 2048 case #if TORCH_HIP_VERSION >= 8000 #if defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_LONG) int maxSliceSize = 1024; #else int maxSliceSize = 2048; #endif #else int maxSliceSize = 2048; #endif if (sliceSize <= maxSliceSize) { // This avoids any memory allocations and performs all sorting // work inplace along the slice THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir); } else { // Depend upon the backup sort that returns indices, which we // can use in conjunction with gather to produce the original // indices. // This is not the most efficient implementation, especially since // there are memory allocations performed here. If the user desires // greater performance, they should torch.gather() the results // themselves using the reported indices, providing previously // allocated tensors to receive the results. THCTensor* sortedTopK = THCTensor_(new)(state); THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state); THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir); THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state); THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices); THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices); THCTensor_(freeCopyTo)(state, sortedTopK, topK); THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices); THCudaLongTensor_free(state, sortedIndices); } } THCudaLongTensor_free(state, input); THCudaCheck(hipGetLastError()); } #endif // THC_GENERIC_FILE
d41c585b71ed5d9af1a87ef2572d3346ec52560b.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorTopK.cu" #else #include <c10/macros/Macros.h> void THCTensor_(topk)(THCState* state, THCTensor *topK, THCudaLongTensor *indices, THCTensor *input_, int64_t k, int dim, int dir, int sorted) { THAssert(topK != NULL && indices != NULL && input_ != NULL); THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input_)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); int64_t dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); int numDims = THCTensor_(nDimensionLegacyNoScalars)(state, input_); THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range"); int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input_, dim); THArgCheck(k >= 0 && k <= sliceSize, 5, "k not in range for dimension"); THCTensor *input = THCTensor_(newContiguous)(state, input_); // Build the output size, which is the dim being selected set to // size k std::vector<int64_t> topKSize = THTensor_sizesLegacyNoScalars(input); topKSize[dim] = k; THCTensor_(resize)(state, topK, topKSize, {}); THCudaLongTensor_resize(state, indices, topKSize, {}); // static_cast is required to ensure that the correct type (INDEX_T) // is provided to the kernel for the arguments. #define RUN_K(INDEX_T, DIM, DIR) \ gatherTopK<scalar_t, INDEX_T, DIM, DIR> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ inputInfo, \ static_cast<INDEX_T>(sliceSize), \ static_cast<INDEX_T>(k), \ static_cast<INDEX_T>(inputSlices), \ /* The actual dimension that the k-selection is running in */ \ /* may have changed from collapseDims() */ \ static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \ topKInfo, \ static_cast<INDEX_T>(topKSlices), \ static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \ indicesInfo, \ static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])) #define RUN_DIR(INDEX_T, DIM) \ if (dir) { \ RUN_K(INDEX_T, DIM, true); \ } else { \ RUN_K(INDEX_T, DIM, false); \ } #define RUN_DIM(INDEX_T) \ if (allDims == 1) { \ RUN_DIR(INDEX_T, 1); \ } else if (allDims == 2) { \ RUN_DIR(INDEX_T, 2); \ } else if (allDims == 3) { \ RUN_DIR(INDEX_T, 3); \ } else { \ RUN_DIR(INDEX_T, -1); \ } #define RUN_T(INDEX_T) \ TensorInfo<scalar_t, INDEX_T> inputInfo = \ getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, input); \ TensorInfo<scalar_t, INDEX_T> topKInfo = \ getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, topK); \ TensorInfo<int64_t, INDEX_T> indicesInfo = \ getTensorInfo<int64_t, THCudaLongTensor, INDEX_T>(state, indices); \ \ /* We use these structures solely to find the offset to */ \ /* each slice we are operating on */ \ inputInfo.sizes[dim] = 1; \ topKInfo.sizes[dim] = 1; \ indicesInfo.sizes[dim] = 1; \ \ /* Collapse all other dims */ \ int collapseInputDim = inputInfo.collapseDims(dim); \ int collapseTopKDim = topKInfo.collapseDims(dim); \ int collapseIndicesDim = indicesInfo.collapseDims(dim); \ \ int64_t inputSlices = 1; \ for (int i = 0; i < inputInfo.dims; ++i) { \ inputSlices *= inputInfo.sizes[i]; \ } \ int64_t topKSlices = 1; \ for (int i = 0; i < topKInfo.dims; ++i) { \ topKSlices *= topKInfo.sizes[i]; \ } \ \ dim3 grid; \ if (!THC_getGridFromTiles(inputSlices, grid)) { \ THError("Slice to sort is too large"); \ } \ \ dim3 block(std::min(THCRoundUp(sliceSize, (int64_t) C10_WARP_SIZE), (int64_t) 1024)); \ \ /* This is used as a template parameter to calculate indices. */ \ /* We only specialize it if all collapsed dim sizes are the */ \ /* same; otherwise, we use -1 which is the specialization */ \ /* parameter for arbitrary dimensions */ \ int allDims = inputInfo.dims; \ if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \ allDims = -1; \ } \ \ RUN_DIM(INDEX_T); if (THCTensor_nElement(state, input) > 0) { // Based on required index size, run the algorithm with the // appropriate index type if (THCTensor_canUse32BitIndexMath(state, input) && THCTensor_canUse32BitIndexMath(state, topK) && THCTensor_canUse32BitIndexMath(state, indices)) { RUN_T(uint32_t); } else { RUN_T(uint64_t); } } #undef RUN_T #undef RUN_DIM #undef RUN_DIR #undef RUN_K // Sort the results if the user wants them sorted, since our // selection routine does not ensure sorting if (sorted) { // FIXME: the k/v inplace sort along slice only works for size <= // 2048 at the moment // Workaround: // CUDA 8 uses more shared memory than 7.5 for bitonicSortKVInPlace, // and so for the double word types, // we get "too many resources requested for launch" in the 2048 case #if CUDA_VERSION >= 8000 #if defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_LONG) int maxSliceSize = 1024; #else int maxSliceSize = 2048; #endif #else int maxSliceSize = 2048; #endif if (sliceSize <= maxSliceSize) { // This avoids any memory allocations and performs all sorting // work inplace along the slice THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir); } else { // Depend upon the backup sort that returns indices, which we // can use in conjunction with gather to produce the original // indices. // This is not the most efficient implementation, especially since // there are memory allocations performed here. If the user desires // greater performance, they should torch.gather() the results // themselves using the reported indices, providing previously // allocated tensors to receive the results. THCTensor* sortedTopK = THCTensor_(new)(state); THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state); THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir); THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state); THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices); THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices); THCTensor_(freeCopyTo)(state, sortedTopK, topK); THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices); THCudaLongTensor_free(state, sortedIndices); } } THCudaLongTensor_free(state, input); THCudaCheck(cudaGetLastError()); } #endif // THC_GENERIC_FILE
ec3b7e31f875ecffd7fbe22337e7e66ba0f94ef7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <time.h> #include <string.h> #include <math.h> // includes CUDA #include <hip/hip_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples #include <hip/device_functions.h> // helper functions for SDK examples #include "combiStruct.h" void allocateLayers() { checkCudaErrors(hipMalloc((void **) &d_layer,GPU_MAX_LAYER *sizeof(combiLayer))); checkCudaErrors(hipMalloc((void **) &d_cand,GPU_MAX_CAND *sizeof(combiTrack))); checkCudaErrors(hipHostMalloc((void **)&h_layer,GPU_MAX_LAYER *sizeof(combiLayer),hipHostMallocWriteCombined)); checkCudaErrors(hipHostMalloc((void **)&h_cand,GPU_MAX_CAND *sizeof(combiTrack),hipHostMallocWriteCombined)); } void freeLayers() { checkCudaErrors(hipHostFree(h_layer)); checkCudaErrors(hipHostFree(h_cand)); checkCudaErrors(hipFree(d_layer)); checkCudaErrors(hipFree(d_cand)); } void clearLayers() { memset(h_layers,0,GPU_MAX_LAYER *sizeof(combiLayer)); checkCudaErrors(hipMemset(d_layer,0,GPU_MAX_LAYER *sizeof(combiLayer))); } void clearCandidates() { memset(h_cand,0,GPU_MAX_CAND *sizeof(combiTrack)); checkCudaErrors(hipMemset(d_cand,0,GPU_MAX_CAND *sizeof(combiTrack))); } void copyLayer(uint32_t idl) { checkCudaErrors(hipMemcpy(&d_layers[idl], &h_layer[idl], sizeof(int)+h_layer[idl]*sizeof(stubPosition),hipMemcpyHostToDevice)); } __global__ void computeLayerKernel(combiLayer* L) { const unsigned int ib=threadIdx.x; L->stub[ib]._r2= L->stub[ib]._x*L->stub[ib]._x+L->stub[ib]._y*L->stub[ib]._y; L->stub[ib]._r=sqrt(L->stub[ib]._r2); L->stub[ib]._xp=L->stub[ib]._x/L->stub[ib]._r2; L->stub[ib]._yp=L->stub[ib]._y/L->stub[ib]._r2; } void computeLayer(uint32_t idl) { hipLaunchKernelGGL(( computeLayerKernel), dim3(1),dim3(h_layer[idl]._nb),0, 0, &d_layer[idl]); }
ec3b7e31f875ecffd7fbe22337e7e66ba0f94ef7.cu
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <string.h> #include <math.h> // includes CUDA #include <cuda_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples #include <device_functions.h> // helper functions for SDK examples #include "combiStruct.h" void allocateLayers() { checkCudaErrors(cudaMalloc((void **) &d_layer,GPU_MAX_LAYER *sizeof(combiLayer))); checkCudaErrors(cudaMalloc((void **) &d_cand,GPU_MAX_CAND *sizeof(combiTrack))); checkCudaErrors(cudaHostAlloc((void **)&h_layer,GPU_MAX_LAYER *sizeof(combiLayer),cudaHostAllocWriteCombined)); checkCudaErrors(cudaHostAlloc((void **)&h_cand,GPU_MAX_CAND *sizeof(combiTrack),cudaHostAllocWriteCombined)); } void freeLayers() { checkCudaErrors(cudaFreeHost(h_layer)); checkCudaErrors(cudaFreeHost(h_cand)); checkCudaErrors(cudaFree(d_layer)); checkCudaErrors(cudaFree(d_cand)); } void clearLayers() { memset(h_layers,0,GPU_MAX_LAYER *sizeof(combiLayer)); checkCudaErrors(cudaMemset(d_layer,0,GPU_MAX_LAYER *sizeof(combiLayer))); } void clearCandidates() { memset(h_cand,0,GPU_MAX_CAND *sizeof(combiTrack)); checkCudaErrors(cudaMemset(d_cand,0,GPU_MAX_CAND *sizeof(combiTrack))); } void copyLayer(uint32_t idl) { checkCudaErrors(cudaMemcpy(&d_layers[idl], &h_layer[idl], sizeof(int)+h_layer[idl]*sizeof(stubPosition),cudaMemcpyHostToDevice)); } __global__ void computeLayerKernel(combiLayer* L) { const unsigned int ib=threadIdx.x; L->stub[ib]._r2= L->stub[ib]._x*L->stub[ib]._x+L->stub[ib]._y*L->stub[ib]._y; L->stub[ib]._r=sqrt(L->stub[ib]._r2); L->stub[ib]._xp=L->stub[ib]._x/L->stub[ib]._r2; L->stub[ib]._yp=L->stub[ib]._y/L->stub[ib]._r2; } void computeLayer(uint32_t idl) { computeLayerKernel<<<1,h_layer[idl]._nb,0>>>(&d_layer[idl]); }
1944f0647bb9153ff388ba3b2085ccb4c1b01219.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> #include <string> #include <cstring> #include <fstream> #include "solver.h" using namespace std; typedef unsigned char uchar; int num_train = 512, num_test = 500; int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int) ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *) &n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *) &n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printTimes(vector<float> &time, string filename); int main(int argc, char *argv[]) { // int num_train = 100 * batch_size, num_val = batch_size; // void *X_train = malloc(num_train * input_channels * sizeof(float)); // int *y_train = (int *)malloc(num_train * sizeof(int)); // void *X_val = malloc(num_val * input_channels * sizeof(float)); // int *y_val = (int *)malloc(num_val * sizeof(int)); // for (int i = 0; i < num_train; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_train[i] = 0; // } // for (int i = 0; i < num_val; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_val[i] = rand() % 2; // } // int rows = 28, cols = 28, channels = 1; // vector<vector<uchar> > train_images, test_images; // vector<uchar> train_labels, test_labels; // readMNIST(train_images, test_images, train_labels, test_labels); // float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels; float *f_train_images, *f_test_images; int *f_train_labels, *f_test_labels; int rows = 227, cols = 227, channels = 3; int input_size = rows * cols * channels; // f_train_images = (float *)malloc(num_train * input_size * sizeof(float)); // f_train_labels = (int *)malloc(num_train * sizeof(int)); checkCudaErrors(hipHostMalloc(&f_train_images, num_train * input_size * sizeof(float))); checkCudaErrors(hipHostMalloc(&f_train_labels, input_size * sizeof(int))); f_test_images = (float *)malloc(num_test * input_size * sizeof(float)); f_test_labels = (int *)malloc(num_test * sizeof(int)); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < num_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= num_train; } for (int i = 0; i < num_train; i++) { for (int j = 0; j < input_size; j++) { f_train_images[i * input_size + j] -= mean_image[j]; } } for (int i = 0; i < num_test; i++) { for (int j = 0; j < input_size; j++) { f_test_images[i * input_size + j] -= mean_image[j]; } } // int input_channels = rows * cols * channels * 3, hidden_channels1 = 50, hidden_channels2 = 100, output_channels = 10; // vector<LayerSpecifier> layer_specifier; // ConvDescriptor layer0; // LayerSpecifier temp; // layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // ActivationDescriptor layer0_actv; // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // BatchNormDescriptor layer0_bn; // for (int i = 0; i < 200; i++) { // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // } // PoolingDescriptor layer0_pool; // layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX); // temp.initPointer(POOLING); // *((PoolingDescriptor *)temp.params) = layer0_pool; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // // DropoutDescriptor layer0_dropout; // // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2); // // temp.initPointer(DROPOUT); // // *((DropoutDescriptor *)temp.params) = layer0_dropout; // // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // FCDescriptor layer1; // layer1.initializeValues(input_channels, hidden_channels1); // temp.initPointer(FULLY_CONNECTED); // *((FCDescriptor *)(temp.params)) = layer1; // layer_specifier.push_back(temp); // temp.initPointer(ACTV); // ActivationDescriptor layer1_actv; // layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1); // *((ActivationDescriptor *)temp.params) = layer1_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1, hidden_channels1, 1, 1); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // temp.initPointer(FULLY_CONNECTED); // FCDescriptor layer2; // layer2.initializeValues(hidden_channels1, output_channels); // *((FCDescriptor *)temp.params) = layer2; // layer_specifier.push_back(temp); // // temp.initPointer(FULLY_CONNECTED); // // FCDescriptor layer3; // // layer3.initializeValues(hidden_channels2, output_channels); // // *((FCDescriptor *)temp.params) = layer3; // // layer_specifier.push_back(temp); // temp.initPointer(SOFTMAX); // SoftmaxDescriptor smax; // smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, output_channels, 1, 1); // *((SoftmaxDescriptor *)(temp.params)) = smax; // layer_specifier.push_back(temp); // AlexNet vector<LayerSpecifier> layer_specifier; { ConvDescriptor layer0; layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer0; layer_specifier.push_back(temp); } { ActivationDescriptor layer0_actv; layer0_actv.initializeValues(RELU, 96, 55, 55); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer0_actv; layer_specifier.push_back(temp); } { PoolingDescriptor layer1; layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer1; layer_specifier.push_back(temp); } { ConvDescriptor layer2; layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer2; layer_specifier.push_back(temp); } { ActivationDescriptor layer2_actv; layer2_actv.initializeValues(RELU, 256, 27, 27); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer2_actv; layer_specifier.push_back(temp); } { PoolingDescriptor layer3; layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer3; layer_specifier.push_back(temp); } { ConvDescriptor layer4; layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer4; layer_specifier.push_back(temp); } { ActivationDescriptor layer4_actv; layer4_actv.initializeValues(RELU, 384, 13, 13); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer4_actv; layer_specifier.push_back(temp); } { ConvDescriptor layer5; layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer5; layer_specifier.push_back(temp); } { ActivationDescriptor layer5_actv; layer5_actv.initializeValues(RELU, 384, 13, 13); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer5_actv; layer_specifier.push_back(temp); } { ConvDescriptor layer6; layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer6; layer_specifier.push_back(temp); } { ActivationDescriptor layer6_actv; layer6_actv.initializeValues(RELU, 256, 13, 13); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer6_actv; layer_specifier.push_back(temp); } { PoolingDescriptor layer7; layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer7; layer_specifier.push_back(temp); } { FCDescriptor layer8; layer8.initializeValues(9216, 4096); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer8; layer_specifier.push_back(temp); } { ActivationDescriptor layer8_actv; layer8_actv.initializeValues(RELU, 4096, 1, 1); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer8_actv; layer_specifier.push_back(temp); } { FCDescriptor layer9; layer9.initializeValues(4096, 4096); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer9; layer_specifier.push_back(temp); } { ActivationDescriptor layer9_actv; layer9_actv.initializeValues(RELU, 4096, 1, 1); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer9_actv; layer_specifier.push_back(temp); } { FCDescriptor layer10; layer10.initializeValues(4096, 1000); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer10; layer_specifier.push_back(temp); } { SoftmaxDescriptor layer11; layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1); LayerSpecifier temp; temp.initPointer(SOFTMAX); *((SoftmaxDescriptor *)temp.params) = layer11; layer_specifier.push_back(temp); } ConvAlgo conv_algo = CONV_ALGO_PERFORMANCE_OPTIMAL; string filename("base_p"); if (argc == 2) { if (strcmp(argv[1], "p") == 0) { conv_algo = CONV_ALGO_PERFORMANCE_OPTIMAL; filename.assign("base_p"); } else if (strcmp(argv[1], "m") == 0) { conv_algo = CONV_ALGO_MEMORY_OPTIMAL; filename.assign("base_m"); } else { printf("invalid argument.. using performance optimal\n"); } } int batch_size = 256; long long dropout_seed = 1; float softmax_eps = 1e-8; float init_std_dev = 0.1; NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, conv_algo); int num_epoch = 1000; double learning_rate = 1e-3; double learning_rate_decay = 0.9; Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train); vector<float> loss; vector<float> time; solver.getTrainTime(loss, time, 100); printTimes(time, filename); } void printTimes(vector<float> &time, string filename) { float mean_time = 0.0; float std_dev = 0.0; int N = time.size(); for (int i = 0; i < N; i++) { mean_time += time[i]; } mean_time /= N; for (int i = 0; i < N; i++) { std_dev += pow(time[i] - mean_time, 2); } std_dev /= N; std_dev = pow(std_dev, 0.5); cout << "Average time: " << mean_time << endl; cout << "Standard deviation: " << std_dev << endl; filename.append(".dat"); fstream f; f.open(filename.c_str(), ios_base::out); for (int i = 0; i < N; i++) { f << time[i] << endl; } f << "mean_time: " << mean_time << endl; f << "standard_deviation: " << std_dev << endl; f.close(); filename.append(".bin"); fstream f_bin; f_bin.open(filename.c_str(), ios_base::out); f_bin.write((char *)&N, sizeof(N)); for (int i = 0; i < N; i++) { f_bin.write((char *)&time[i], sizeof(time[i])); } f_bin.close(); }
1944f0647bb9153ff388ba3b2085ccb4c1b01219.cu
#include <iostream> #include <cstdlib> #include <string> #include <cstring> #include <fstream> #include "solver.h" using namespace std; typedef unsigned char uchar; int num_train = 512, num_test = 500; int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int) ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *) &n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *) &n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printTimes(vector<float> &time, string filename); int main(int argc, char *argv[]) { // int num_train = 100 * batch_size, num_val = batch_size; // void *X_train = malloc(num_train * input_channels * sizeof(float)); // int *y_train = (int *)malloc(num_train * sizeof(int)); // void *X_val = malloc(num_val * input_channels * sizeof(float)); // int *y_val = (int *)malloc(num_val * sizeof(int)); // for (int i = 0; i < num_train; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_train[i] = 0; // } // for (int i = 0; i < num_val; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_val[i] = rand() % 2; // } // int rows = 28, cols = 28, channels = 1; // vector<vector<uchar> > train_images, test_images; // vector<uchar> train_labels, test_labels; // readMNIST(train_images, test_images, train_labels, test_labels); // float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels; float *f_train_images, *f_test_images; int *f_train_labels, *f_test_labels; int rows = 227, cols = 227, channels = 3; int input_size = rows * cols * channels; // f_train_images = (float *)malloc(num_train * input_size * sizeof(float)); // f_train_labels = (int *)malloc(num_train * sizeof(int)); checkCudaErrors(cudaMallocHost(&f_train_images, num_train * input_size * sizeof(float))); checkCudaErrors(cudaMallocHost(&f_train_labels, input_size * sizeof(int))); f_test_images = (float *)malloc(num_test * input_size * sizeof(float)); f_test_labels = (int *)malloc(num_test * sizeof(int)); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < num_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= num_train; } for (int i = 0; i < num_train; i++) { for (int j = 0; j < input_size; j++) { f_train_images[i * input_size + j] -= mean_image[j]; } } for (int i = 0; i < num_test; i++) { for (int j = 0; j < input_size; j++) { f_test_images[i * input_size + j] -= mean_image[j]; } } // int input_channels = rows * cols * channels * 3, hidden_channels1 = 50, hidden_channels2 = 100, output_channels = 10; // vector<LayerSpecifier> layer_specifier; // ConvDescriptor layer0; // LayerSpecifier temp; // layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // ActivationDescriptor layer0_actv; // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // BatchNormDescriptor layer0_bn; // for (int i = 0; i < 200; i++) { // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // } // PoolingDescriptor layer0_pool; // layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX); // temp.initPointer(POOLING); // *((PoolingDescriptor *)temp.params) = layer0_pool; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // // DropoutDescriptor layer0_dropout; // // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2); // // temp.initPointer(DROPOUT); // // *((DropoutDescriptor *)temp.params) = layer0_dropout; // // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // FCDescriptor layer1; // layer1.initializeValues(input_channels, hidden_channels1); // temp.initPointer(FULLY_CONNECTED); // *((FCDescriptor *)(temp.params)) = layer1; // layer_specifier.push_back(temp); // temp.initPointer(ACTV); // ActivationDescriptor layer1_actv; // layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1); // *((ActivationDescriptor *)temp.params) = layer1_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1, hidden_channels1, 1, 1); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // temp.initPointer(FULLY_CONNECTED); // FCDescriptor layer2; // layer2.initializeValues(hidden_channels1, output_channels); // *((FCDescriptor *)temp.params) = layer2; // layer_specifier.push_back(temp); // // temp.initPointer(FULLY_CONNECTED); // // FCDescriptor layer3; // // layer3.initializeValues(hidden_channels2, output_channels); // // *((FCDescriptor *)temp.params) = layer3; // // layer_specifier.push_back(temp); // temp.initPointer(SOFTMAX); // SoftmaxDescriptor smax; // smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, output_channels, 1, 1); // *((SoftmaxDescriptor *)(temp.params)) = smax; // layer_specifier.push_back(temp); // AlexNet vector<LayerSpecifier> layer_specifier; { ConvDescriptor layer0; layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer0; layer_specifier.push_back(temp); } { ActivationDescriptor layer0_actv; layer0_actv.initializeValues(RELU, 96, 55, 55); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer0_actv; layer_specifier.push_back(temp); } { PoolingDescriptor layer1; layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer1; layer_specifier.push_back(temp); } { ConvDescriptor layer2; layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer2; layer_specifier.push_back(temp); } { ActivationDescriptor layer2_actv; layer2_actv.initializeValues(RELU, 256, 27, 27); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer2_actv; layer_specifier.push_back(temp); } { PoolingDescriptor layer3; layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer3; layer_specifier.push_back(temp); } { ConvDescriptor layer4; layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer4; layer_specifier.push_back(temp); } { ActivationDescriptor layer4_actv; layer4_actv.initializeValues(RELU, 384, 13, 13); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer4_actv; layer_specifier.push_back(temp); } { ConvDescriptor layer5; layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer5; layer_specifier.push_back(temp); } { ActivationDescriptor layer5_actv; layer5_actv.initializeValues(RELU, 384, 13, 13); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer5_actv; layer_specifier.push_back(temp); } { ConvDescriptor layer6; layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer6; layer_specifier.push_back(temp); } { ActivationDescriptor layer6_actv; layer6_actv.initializeValues(RELU, 256, 13, 13); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer6_actv; layer_specifier.push_back(temp); } { PoolingDescriptor layer7; layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer7; layer_specifier.push_back(temp); } { FCDescriptor layer8; layer8.initializeValues(9216, 4096); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer8; layer_specifier.push_back(temp); } { ActivationDescriptor layer8_actv; layer8_actv.initializeValues(RELU, 4096, 1, 1); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer8_actv; layer_specifier.push_back(temp); } { FCDescriptor layer9; layer9.initializeValues(4096, 4096); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer9; layer_specifier.push_back(temp); } { ActivationDescriptor layer9_actv; layer9_actv.initializeValues(RELU, 4096, 1, 1); LayerSpecifier temp; temp.initPointer(ACTV); *((ActivationDescriptor *)temp.params) = layer9_actv; layer_specifier.push_back(temp); } { FCDescriptor layer10; layer10.initializeValues(4096, 1000); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer10; layer_specifier.push_back(temp); } { SoftmaxDescriptor layer11; layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1); LayerSpecifier temp; temp.initPointer(SOFTMAX); *((SoftmaxDescriptor *)temp.params) = layer11; layer_specifier.push_back(temp); } ConvAlgo conv_algo = CONV_ALGO_PERFORMANCE_OPTIMAL; string filename("base_p"); if (argc == 2) { if (strcmp(argv[1], "p") == 0) { conv_algo = CONV_ALGO_PERFORMANCE_OPTIMAL; filename.assign("base_p"); } else if (strcmp(argv[1], "m") == 0) { conv_algo = CONV_ALGO_MEMORY_OPTIMAL; filename.assign("base_m"); } else { printf("invalid argument.. using performance optimal\n"); } } int batch_size = 256; long long dropout_seed = 1; float softmax_eps = 1e-8; float init_std_dev = 0.1; NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, conv_algo); int num_epoch = 1000; double learning_rate = 1e-3; double learning_rate_decay = 0.9; Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train); vector<float> loss; vector<float> time; solver.getTrainTime(loss, time, 100); printTimes(time, filename); } void printTimes(vector<float> &time, string filename) { float mean_time = 0.0; float std_dev = 0.0; int N = time.size(); for (int i = 0; i < N; i++) { mean_time += time[i]; } mean_time /= N; for (int i = 0; i < N; i++) { std_dev += pow(time[i] - mean_time, 2); } std_dev /= N; std_dev = pow(std_dev, 0.5); cout << "Average time: " << mean_time << endl; cout << "Standard deviation: " << std_dev << endl; filename.append(".dat"); fstream f; f.open(filename.c_str(), ios_base::out); for (int i = 0; i < N; i++) { f << time[i] << endl; } f << "mean_time: " << mean_time << endl; f << "standard_deviation: " << std_dev << endl; f.close(); filename.append(".bin"); fstream f_bin; f_bin.open(filename.c_str(), ios_base::out); f_bin.write((char *)&N, sizeof(N)); for (int i = 0; i < N; i++) { f_bin.write((char *)&time[i], sizeof(time[i])); } f_bin.close(); }
7e718fe4febfd68f6f2e86ed9f12d93f1227cf56.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorMathPointwise.cuh" #include "THHTensor.hpp" #include "THHStream.h" #include "../generic/THCTensorMathPointwise.cu" #include <THH/THHGenerateByteType.h>
7e718fe4febfd68f6f2e86ed9f12d93f1227cf56.cu
#include "../THCTensorMathPointwise.cuh" #include "THCTensor.hpp" #include "THCStream.h" #include "../generic/THCTensorMathPointwise.cu" #include <THC/THCGenerateByteType.h>
a5c621af779efd8cf5f2bcea8a12fb2e3f1aaa01.hip
// !!! This is a file automatically generated by hipify!!! /* * ------------------------------------------------------------------------------ * * MIT License * * Copyright (c) 2021 Parallel Applications Modelling Group - GMAP * GMAP website: https://gmap.pucrs.br * * Pontifical Catholic University of Rio Grande do Sul (PUCRS) * Av. Ipiranga, 6681, Porto Alegre - Brazil, 90619-900 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ------------------------------------------------------------------------------ * * The original NPB 3.4 version was written in Fortran and belongs to: * http://www.nas.nasa.gov/Software/NPB/ * * Authors of the Fortran code: * M. Yarrow * H. Jin * * ------------------------------------------------------------------------------ * * The serial C++ version is a translation of the original NPB 3.4 * Serial C++ version: https://github.com/GMAP/NPB-CPP/tree/master/NPB-SER * * Authors of the C++ code: * Dalvan Griebler <[email protected]> * Gabriell Araujo <[email protected]> * Jnior Lff <[email protected]> * * ------------------------------------------------------------------------------ * * The CUDA version is a parallel implementation of the serial C++ version * CUDA version: https://github.com/GMAP/NPB-GPU/tree/master/CUDA * * Authors of the CUDA code: * Gabriell Araujo <[email protected]> * * ------------------------------------------------------------------------------ */ #include <hip/hip_runtime.h> #include "../common/npb-CPP.hpp" #include "npbparams.hpp" #define PROFILING_TOTAL_TIME (0) #define PROFILING_CREATE (1) #define PROFILING_RANK (2) #define PROFILING_VERIFY (3) /*****************************************************************/ /* for serial IS, buckets are not really req'd to solve NPB1 IS */ /* spec, but their use on some machines improves performance, on */ /* other machines the use of buckets compromises performance, */ /* probably because it is extra computation which is not req'd. */ /* (note: mechanism not understood, probably cache related) */ /* example: SP2-66MhzWN: 50% speedup with buckets */ /* example: SGI Indy5000: 50% slowdown with buckets */ /* example: SGI O2000: 400% slowdown with buckets (Wow!) */ /*****************************************************************/ /* to disable the use of buckets, comment out the following line */ #define USE_BUCKETS /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'S' #endif /*************/ /* CLASS S */ /*************/ #if CLASS == 'S' #define TOTAL_KEYS_LOG_2 (16) #define MAX_KEY_LOG_2 (11) #define NUM_BUCKETS_LOG_2 (9) #endif /*************/ /* CLASS W */ /*************/ #if CLASS == 'W' #define TOTAL_KEYS_LOG_2 (20) #define MAX_KEY_LOG_2 (16) #define NUM_BUCKETS_LOG_2 (10) #endif /*************/ /* CLASS A */ /*************/ #if CLASS == 'A' #define TOTAL_KEYS_LOG_2 (23) #define MAX_KEY_LOG_2 (19) #define NUM_BUCKETS_LOG_2 (10) #endif /*************/ /* CLASS B */ /*************/ #if CLASS == 'B' #define TOTAL_KEYS_LOG_2 (25) #define MAX_KEY_LOG_2 (21) #define NUM_BUCKETS_LOG_2 (10) #endif /*************/ /* CLASS C */ /*************/ #if CLASS == 'C' #define TOTAL_KEYS_LOG_2 (27) #define MAX_KEY_LOG_2 (23) #define NUM_BUCKETS_LOG_2 (10) #endif /*************/ /* CLASS D */ /*************/ #if CLASS == 'D' #define TOTAL_KEYS_LOG_2 (31) #define MAX_KEY_LOG_2 (27) #define NUM_BUCKETS_LOG_2 (10) #endif #if CLASS == 'D' #define TOTAL_KEYS (1L << TOTAL_KEYS_LOG_2) #else #define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2) #endif #define MAX_KEY (1 << MAX_KEY_LOG_2) #define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2) #define NUM_KEYS (TOTAL_KEYS) #define SIZE_OF_BUFFERS (NUM_KEYS) #define MAX_ITERATIONS (10) #define TEST_ARRAY_SIZE (5) /*************************************/ /* typedef: if necessary, change the */ /* size of INT_TYPE here by changing the */ /* INT_TYPE type to, say, long */ /*************************************/ #if CLASS == 'D' typedef long INT_TYPE; #else typedef int INT_TYPE; #endif /**********************/ /* partial verif info */ /**********************/ INT_TYPE test_index_array[TEST_ARRAY_SIZE], test_rank_array[TEST_ARRAY_SIZE], S_test_index_array[TEST_ARRAY_SIZE] = {48427,17148,23627,62548,4431}, S_test_rank_array[TEST_ARRAY_SIZE] = {0,18,346,64917,65463}, W_test_index_array[TEST_ARRAY_SIZE] = {357773,934767,875723,898999,404505}, W_test_rank_array[TEST_ARRAY_SIZE] = {1249,11698,1039987,1043896,1048018}, A_test_index_array[TEST_ARRAY_SIZE] = {2112377,662041,5336171,3642833,4250760}, A_test_rank_array[TEST_ARRAY_SIZE] = {104,17523,123928,8288932,8388264}, B_test_index_array[TEST_ARRAY_SIZE] = {41869,812306,5102857,18232239,26860214}, B_test_rank_array[TEST_ARRAY_SIZE] = {33422937,10244,59149,33135281,99}, C_test_index_array[TEST_ARRAY_SIZE] = {44172927,72999161,74326391,129606274,21736814}, C_test_rank_array[TEST_ARRAY_SIZE] = {61147,882988,266290,133997595,133525895}, D_test_index_array[TEST_ARRAY_SIZE] = {1317351170,995930646,1157283250,1503301535,1453734525}, D_test_rank_array[TEST_ARRAY_SIZE] = {1,36538729,1978098519,2145192618,2147425337}; /* global variables */ INT_TYPE passed_verification; INT_TYPE* key_array_device; INT_TYPE* key_buff1_device; INT_TYPE* key_buff2_device; INT_TYPE* index_array_device; INT_TYPE* rank_array_device; INT_TYPE* partial_verify_vals_device; INT_TYPE* passed_verification_device; INT_TYPE* key_scan_device; INT_TYPE* sum_device; size_t size_test_array_device; size_t size_key_array_device; size_t size_key_buff1_device; size_t size_key_buff2_device; size_t size_index_array_device; size_t size_rank_array_device; size_t size_partial_verify_vals_device; size_t size_passed_verification_device; size_t size_key_scan_device; size_t size_sum_device; size_t size_shared_data_on_rank_4; size_t size_shared_data_on_rank_5; size_t size_shared_data_on_full_verify_3; INT_TYPE threads_per_block_on_create_seq; INT_TYPE threads_per_block_on_rank; INT_TYPE threads_per_block_on_rank_1; INT_TYPE threads_per_block_on_rank_2; INT_TYPE threads_per_block_on_rank_3; INT_TYPE threads_per_block_on_rank_4; INT_TYPE threads_per_block_on_rank_5; INT_TYPE threads_per_block_on_rank_6; INT_TYPE threads_per_block_on_rank_7; INT_TYPE threads_per_block_on_full_verify; INT_TYPE threads_per_block_on_full_verify_1; INT_TYPE threads_per_block_on_full_verify_2; INT_TYPE threads_per_block_on_full_verify_3; INT_TYPE blocks_per_grid_on_create_seq; INT_TYPE blocks_per_grid_on_rank_1; INT_TYPE blocks_per_grid_on_rank_2; INT_TYPE blocks_per_grid_on_rank_3; INT_TYPE blocks_per_grid_on_rank_4; INT_TYPE blocks_per_grid_on_rank_5; INT_TYPE blocks_per_grid_on_rank_6; INT_TYPE blocks_per_grid_on_rank_7; INT_TYPE blocks_per_grid_on_full_verify_1; INT_TYPE blocks_per_grid_on_full_verify_2; INT_TYPE blocks_per_grid_on_full_verify_3; INT_TYPE amount_of_work_on_create_seq; INT_TYPE amount_of_work_on_rank_1; INT_TYPE amount_of_work_on_rank_2; INT_TYPE amount_of_work_on_rank_3; INT_TYPE amount_of_work_on_rank_4; INT_TYPE amount_of_work_on_rank_5; INT_TYPE amount_of_work_on_rank_6; INT_TYPE amount_of_work_on_rank_7; INT_TYPE amount_of_work_on_full_verify_1; INT_TYPE amount_of_work_on_full_verify_2; INT_TYPE amount_of_work_on_full_verify_3; int gpu_device_id; int total_devices; hipDeviceProp_t gpu_device_properties; extern __shared__ INT_TYPE extern_share_data[]; /* function declarations */ static void create_seq_gpu(double seed, double a); __global__ void create_seq_gpu_kernel(INT_TYPE* key_array, double seed, double a, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __device__ double find_my_seed_device(INT_TYPE kn, INT_TYPE np, long nn, double s, double a); static void full_verify_gpu(); __global__ void full_verify_gpu_kernel_1(INT_TYPE* key_array, INT_TYPE* key_buff2, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void full_verify_gpu_kernel_2(INT_TYPE* key_buff2, INT_TYPE* key_buff_ptr_global, INT_TYPE* key_array, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void full_verify_gpu_kernel_3(INT_TYPE* key_array, INT_TYPE* global_aux, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __device__ double randlc_device(double* X, double* A); static void rank_gpu(INT_TYPE iteration); __global__ void rank_gpu_kernel_1(INT_TYPE* key_array, INT_TYPE* partial_verify_vals, INT_TYPE* test_index_array, INT_TYPE iteration, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_2(INT_TYPE* key_buff1, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_3(INT_TYPE* key_buff_ptr, INT_TYPE* key_buff_ptr2, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_4(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE* sum, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_5(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_6(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE* offset, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_7(INT_TYPE* partial_verify_vals, INT_TYPE* key_buff_ptr, INT_TYPE* test_rank_array, INT_TYPE* passed_verification_device, INT_TYPE iteration, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); static void release_gpu(); static void setup_gpu(); /* is */ int main(int argc, char** argv){ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n"); #endif #if defined(PROFILING) printf(" PROFILING mode on\n"); #endif INT_TYPE i, iteration; double timecounter; timer_clear(PROFILING_TOTAL_TIME); #if defined(PROFILING) timer_clear(PROFILING_CREATE); timer_clear(PROFILING_RANK); timer_clear(PROFILING_VERIFY); #endif #if defined(PROFILING) timer_start(PROFILING_TOTAL_TIME); #endif /* initialize the verification arrays if a valid class */ for(i=0; i<TEST_ARRAY_SIZE; i++){ switch(CLASS){ case 'S': test_index_array[i] = S_test_index_array[i]; test_rank_array[i] = S_test_rank_array[i]; break; case 'A': test_index_array[i] = A_test_index_array[i]; test_rank_array[i] = A_test_rank_array[i]; break; case 'W': test_index_array[i] = W_test_index_array[i]; test_rank_array[i] = W_test_rank_array[i]; break; case 'B': test_index_array[i] = B_test_index_array[i]; test_rank_array[i] = B_test_rank_array[i]; break; case 'C': test_index_array[i] = C_test_index_array[i]; test_rank_array[i] = C_test_rank_array[i]; break; case 'D': test_index_array[i] = D_test_index_array[i]; test_rank_array[i] = D_test_rank_array[i]; break; }; } /* printout initial NPB info */ printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - IS Benchmark\n\n"); printf(" Size: %ld (class %c)\n", (long)TOTAL_KEYS, CLASS); printf(" Iterations: %d\n", MAX_ITERATIONS); setup_gpu(); #if defined(PROFILING) timer_start(PROFILING_CREATE); #endif /* generate random number sequence and subsequent keys on all procs */ create_seq_gpu(314159265.00, /* random number gen seed */ 1220703125.00); /* random number gen mult */ #if defined(PROFILING) timer_stop(PROFILING_CREATE); #endif /* * do one interation for free (i.e., untimed) to guarantee initialization of * all data and code pages and respective tables */ rank_gpu(1); /* start verification counter */ passed_verification = 0; hipMemcpy(passed_verification_device, &passed_verification, size_passed_verification_device, hipMemcpyHostToDevice); if(CLASS != 'S')printf( "\n iteration\n"); #if defined(PROFILING) timer_start(PROFILING_RANK); #else timer_start(PROFILING_TOTAL_TIME); #endif /* this is the main iteration */ for(iteration=1; iteration<=MAX_ITERATIONS; iteration++){ if(CLASS != 'S')printf( " %ld\n", (long)iteration); rank_gpu(iteration); } #if defined(PROFILING) timer_stop(PROFILING_RANK); #else timer_stop(PROFILING_TOTAL_TIME); #endif hipMemcpy(&passed_verification, passed_verification_device, size_passed_verification_device, hipMemcpyDeviceToHost); /* * this tests that keys are in sequence: sorting of last ranked key seq * occurs here, but is an untimed operation */ #if defined(PROFILING) timer_start(PROFILING_VERIFY); #endif full_verify_gpu(); #if defined(PROFILING) timer_stop(PROFILING_VERIFY); #endif #if defined(PROFILING) timer_stop(PROFILING_TOTAL_TIME); timecounter = timer_read(PROFILING_RANK); #else timecounter = timer_read(PROFILING_TOTAL_TIME); #endif char gpu_config[256]; char gpu_config_string[2048]; #if defined(PROFILING) sprintf(gpu_config, "%5s\t%25s\t%25s\t%25s\n", "GPU Kernel", "Threads Per Block", "Time in Seconds", "Time in Percentage"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\t%25f\t%24.2f%%\n", " create", (long) threads_per_block_on_create_seq, timer_read(PROFILING_CREATE), (timer_read(PROFILING_CREATE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\t%25f\t%24.2f%%\n", " rank", (long) threads_per_block_on_rank, timer_read(PROFILING_RANK), (timer_read(PROFILING_RANK)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\t%25f\t%24.2f%%\n", " verify", (long) threads_per_block_on_full_verify, timer_read(PROFILING_VERIFY), (timer_read(PROFILING_VERIFY)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); #else sprintf(gpu_config, "%5s\t%25s\n", "GPU Kernel", "Threads Per Block"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\n", " create", (long) threads_per_block_on_create_seq); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\n", " rank", (long) threads_per_block_on_rank); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\n", " verify", (long) threads_per_block_on_full_verify); strcat(gpu_config_string, gpu_config); #endif /* the final printout */ if(passed_verification != 5*MAX_ITERATIONS+1){passed_verification = 0;} c_print_results((char*)"IS", CLASS, (int)(TOTAL_KEYS/64), 64, 0, MAX_ITERATIONS, timecounter, ((double)(MAX_ITERATIONS*TOTAL_KEYS))/timecounter/1000000.0, (char*)"keys ranked", (int)passed_verification, (char*)NPBVERSION, (char*)COMPILETIME, (char*)COMPILERVERSION, (char*)LIBVERSION, (char*)CPU_MODEL, (char*)gpu_device_properties.name, (char*)gpu_config_string, (char*)CS1, (char*)CS2, (char*)CS3, (char*)CS4, (char*)CS5, (char*)CS6, (char*)CS7); release_gpu(); return 0; } static void create_seq_gpu(double seed, double a){ hipLaunchKernelGGL(( create_seq_gpu_kernel), dim3(blocks_per_grid_on_create_seq), dim3( threads_per_block_on_create_seq), 0, 0, key_array_device, seed, a, blocks_per_grid_on_create_seq, amount_of_work_on_create_seq); hipDeviceSynchronize(); } __global__ void create_seq_gpu_kernel(INT_TYPE* key_array, double seed, double a, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ double x, s; INT_TYPE i, k; INT_TYPE k1, k2; double an = a; INT_TYPE myid, num_procs; INT_TYPE mq; myid = blockIdx.x*blockDim.x+threadIdx.x; num_procs = amount_of_work; mq = (NUM_KEYS + num_procs - 1) / num_procs; k1 = mq * myid; k2 = k1 + mq; if(k2 > NUM_KEYS){k2 = NUM_KEYS;} s = find_my_seed_device(myid, num_procs, (long)4*NUM_KEYS, seed, an); k = MAX_KEY/4; for(i=k1; i<k2; i++){ x = randlc_device(&s, &an); x += randlc_device(&s, &an); x += randlc_device(&s, &an); x += randlc_device(&s, &an); key_array[i] = k*x; } } __device__ double find_my_seed_device(INT_TYPE kn, INT_TYPE np, long nn, double s, double a){ double t1,t2; long mq,nq,kk,ik; if(kn==0){return s;} mq = (nn/4 + np - 1) / np; nq = mq * 4 * kn; t1 = s; t2 = a; kk = nq; while(kk > 1){ ik = kk / 2; if(2*ik==kk){ (void)randlc_device(&t2, &t2); kk = ik; }else{ (void)randlc_device(&t1, &t2); kk = kk - 1; } } (void)randlc_device(&t1, &t2); return(t1); } static void full_verify_gpu(){ INT_TYPE* memory_aux_device; size_t size_memory_aux=sizeof(INT_TYPE)*(amount_of_work_on_full_verify_3/threads_per_block_on_full_verify_3); hipMalloc(&memory_aux_device, size_memory_aux); /* full_verify_gpu_kernel_1 */ hipLaunchKernelGGL(( full_verify_gpu_kernel_1), dim3(blocks_per_grid_on_full_verify_1), dim3( threads_per_block_on_full_verify_1), 0, 0, key_array_device, key_buff2_device, blocks_per_grid_on_full_verify_1, amount_of_work_on_full_verify_1); hipDeviceSynchronize(); /* full_verify_gpu_kernel_2 */ hipLaunchKernelGGL(( full_verify_gpu_kernel_2), dim3(blocks_per_grid_on_full_verify_2), dim3( threads_per_block_on_full_verify_2), 0, 0, key_buff2_device, key_buff1_device, key_array_device, blocks_per_grid_on_full_verify_2, amount_of_work_on_full_verify_2); hipDeviceSynchronize(); /* full_verify_gpu_kernel_3 */ hipLaunchKernelGGL(( full_verify_gpu_kernel_3), dim3(blocks_per_grid_on_full_verify_3), dim3( threads_per_block_on_full_verify_3), size_shared_data_on_full_verify_3, 0, key_array_device, memory_aux_device, blocks_per_grid_on_full_verify_3, amount_of_work_on_full_verify_3); hipDeviceSynchronize(); /* reduce on cpu */ INT_TYPE i, j = 0; INT_TYPE* memory_aux_host=(INT_TYPE*)malloc(size_memory_aux); hipMemcpy(memory_aux_host, memory_aux_device, size_memory_aux, hipMemcpyDeviceToHost); for(i=0; i<size_memory_aux/sizeof(INT_TYPE); i++){ j += memory_aux_host[i]; } if(j!=0){ printf( "Full_verify: number of keys out of sort: %ld\n", (long)j ); }else{ passed_verification++; } hipFree(memory_aux_device); free(memory_aux_host); } __global__ void full_verify_gpu_kernel_1(INT_TYPE* key_array, INT_TYPE* key_buff2, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE i = blockIdx.x*blockDim.x+threadIdx.x; key_buff2[i] = key_array[i]; } __global__ void full_verify_gpu_kernel_2(INT_TYPE* key_buff2, INT_TYPE* key_buff_ptr_global, INT_TYPE* key_array, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE value = key_buff2[blockIdx.x*blockDim.x+threadIdx.x]; #if CLASS == 'D' INT_TYPE index = atomicAdd( (unsigned long long int*) &key_buff_ptr_global[value], (unsigned long long int) -1) -1; #else INT_TYPE index = atomicAdd(&key_buff_ptr_global[value], -1) -1; #endif key_array[index] = value; } __global__ void full_verify_gpu_kernel_3(INT_TYPE* key_array, INT_TYPE* global_aux, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE* shared_aux = (INT_TYPE*)(extern_share_data); INT_TYPE i = (blockIdx.x*blockDim.x+threadIdx.x) + 1; if(i<NUM_KEYS){ if(key_array[i-1]>key_array[i]){shared_aux[threadIdx.x]=1;} else{shared_aux[threadIdx.x]=0;} }else{shared_aux[threadIdx.x]=0;} __syncthreads(); for(i=blockDim.x/2; i>0; i>>=1){ if(threadIdx.x<i){ shared_aux[threadIdx.x] += shared_aux[threadIdx.x+i]; } __syncthreads(); } if(threadIdx.x==0){global_aux[blockIdx.x]=shared_aux[0];} } __device__ double randlc_device(double* X, double* A){ double T1, T2, T3, T4; double A1; double A2; double X1; double X2; double Z; INT_TYPE j; /* * -------------------------------------------------------------------- * break A into two parts such that A = 2^23 * A1 + A2 and set X = N. * -------------------------------------------------------------------- */ T1 = R23 * *A; j = T1; A1 = j; A2 = *A - T23 * A1; /* * -------------------------------------------------------------------- * break X into two parts such that X = 2^23 * X1 + X2, compute * Z = A1 * X2 + A2 * X1 (mod 2^23), and then * X = 2^23 * Z + A2 * X2 (mod 2^46). * -------------------------------------------------------------------- */ T1 = R23 * *X; j = T1; X1 = j; X2 = *X - T23 * X1; T1 = A1 * X2 + A2 * X1; j = R23 * T1; T2 = j; Z = T1 - T23 * T2; T3 = T23 * Z + A2 * X2; j = R46 * T3; T4 = j; *X = T3 - T46 * T4; return(R46 * *X); } static void rank_gpu(INT_TYPE iteration){ /* rank_gpu_kernel_1 */ hipLaunchKernelGGL(( rank_gpu_kernel_1), dim3(blocks_per_grid_on_rank_1), dim3( threads_per_block_on_rank_1), 0, 0, key_array_device, partial_verify_vals_device, index_array_device, iteration, blocks_per_grid_on_rank_1, amount_of_work_on_rank_1); /* rank_gpu_kernel_2 */ hipLaunchKernelGGL(( rank_gpu_kernel_2), dim3(blocks_per_grid_on_rank_2), dim3( threads_per_block_on_rank_2), 0, 0, key_buff1_device, blocks_per_grid_on_rank_2, amount_of_work_on_rank_2); /* rank_gpu_kernel_3 */ hipLaunchKernelGGL(( rank_gpu_kernel_3), dim3(blocks_per_grid_on_rank_3), dim3( threads_per_block_on_rank_3), 0, 0, key_buff1_device, key_array_device, blocks_per_grid_on_rank_3, amount_of_work_on_rank_3); /* rank_gpu_kernel_4 */ hipLaunchKernelGGL(( rank_gpu_kernel_4), dim3(blocks_per_grid_on_rank_4), dim3( threads_per_block_on_rank_4), size_shared_data_on_rank_4, 0, key_buff1_device, key_buff1_device, sum_device, blocks_per_grid_on_rank_4, amount_of_work_on_rank_4); /* rank_gpu_kernel_5 */ hipLaunchKernelGGL(( rank_gpu_kernel_5), dim3(blocks_per_grid_on_rank_5), dim3( threads_per_block_on_rank_5), size_shared_data_on_rank_5, 0, sum_device, sum_device, blocks_per_grid_on_rank_5, amount_of_work_on_rank_5); /* rank_gpu_kernel_6 */ hipLaunchKernelGGL(( rank_gpu_kernel_6), dim3(blocks_per_grid_on_rank_6), dim3( threads_per_block_on_rank_6), 0, 0, key_buff1_device, key_buff1_device, sum_device, blocks_per_grid_on_rank_6, amount_of_work_on_rank_6); /* rank_gpu_kernel_7 */ hipLaunchKernelGGL(( rank_gpu_kernel_7), dim3(blocks_per_grid_on_rank_7), dim3( threads_per_block_on_rank_7), 0, 0, partial_verify_vals_device, key_buff1_device, rank_array_device, passed_verification_device, iteration, blocks_per_grid_on_rank_7, amount_of_work_on_rank_7); } __global__ void rank_gpu_kernel_1(INT_TYPE* key_array, INT_TYPE* partial_verify_vals, INT_TYPE* test_index_array, INT_TYPE iteration, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ key_array[iteration] = iteration; key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration; /* * -------------------------------------------------------------------- * determine where the partial verify test keys are, * -------------------------------------------------------------------- * load into top of array bucket_size * -------------------------------------------------------------------- */ #pragma unroll for(INT_TYPE i=0; i<TEST_ARRAY_SIZE; i++){ partial_verify_vals[i] = key_array[test_index_array[i]]; } } __global__ void rank_gpu_kernel_2(INT_TYPE* key_buff1, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ key_buff1[blockIdx.x*blockDim.x+threadIdx.x] = 0; } __global__ void rank_gpu_kernel_3(INT_TYPE* key_buff_ptr, INT_TYPE* key_buff_ptr2, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ /* * -------------------------------------------------------------------- * in this section, the keys themselves are used as their * own indexes to determine how many of each there are: their * individual population * -------------------------------------------------------------------- */ #if CLASS == 'D' atomicAdd( (unsigned long long int*) &key_buff_ptr[key_buff_ptr2[blockIdx.x*blockDim.x+threadIdx.x]], (unsigned long long int) 1); #else atomicAdd(&key_buff_ptr[key_buff_ptr2[blockIdx.x*blockDim.x+threadIdx.x]], 1); #endif } __global__ void rank_gpu_kernel_4(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE* sum, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE* shared_data = (INT_TYPE*)(extern_share_data); shared_data[threadIdx.x] = 0; INT_TYPE position = blockDim.x + threadIdx.x; INT_TYPE factor = MAX_KEY / number_of_blocks; INT_TYPE start = factor * blockIdx.x; INT_TYPE end = start + factor; for(INT_TYPE i=start; i<end; i+=blockDim.x){ shared_data[position] = source[i + threadIdx.x]; for(INT_TYPE offset=1; offset<blockDim.x; offset<<=1){ __syncthreads(); INT_TYPE t = shared_data[position] + shared_data[position - offset]; __syncthreads(); shared_data[position] = t; } INT_TYPE prv_val = (i == start) ? 0 : destiny[i - 1]; destiny[i + threadIdx.x] = shared_data[position] + prv_val; } __syncthreads(); if(threadIdx.x==0){sum[blockIdx.x]=destiny[end-1];} } __global__ void rank_gpu_kernel_5(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE* shared_data = (INT_TYPE*)(extern_share_data); shared_data[threadIdx.x] = 0; INT_TYPE position = blockDim.x + threadIdx.x; shared_data[position] = source[threadIdx.x]; for(INT_TYPE offset=1; offset<blockDim.x; offset<<=1){ __syncthreads(); INT_TYPE t = shared_data[position] + shared_data[position - offset]; __syncthreads(); shared_data[position] = t; } __syncthreads(); destiny[threadIdx.x] = shared_data[position - 1]; } __global__ void rank_gpu_kernel_6(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE* offset, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE factor = MAX_KEY / number_of_blocks; INT_TYPE start = factor * blockIdx.x; INT_TYPE end = start + factor; INT_TYPE sum = offset[blockIdx.x]; for(INT_TYPE i=start; i<end; i+=blockDim.x){ destiny[i + threadIdx.x] = source[i + threadIdx.x] + sum; } } __global__ void rank_gpu_kernel_7(INT_TYPE* partial_verify_vals, INT_TYPE* key_buff_ptr, INT_TYPE* test_rank_array, INT_TYPE* passed_verification_device, INT_TYPE iteration, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ /* * -------------------------------------------------------------------- * this is the partial verify test section * observe that test_rank_array vals are * shifted differently for different cases * -------------------------------------------------------------------- */ INT_TYPE i, k; INT_TYPE passed_verification = 0; for(i=0; i<TEST_ARRAY_SIZE; i++){ /* test vals were put here on partial_verify_vals */ k = partial_verify_vals[i]; if(0<k && k<=NUM_KEYS-1){ INT_TYPE key_rank = key_buff_ptr[k-1]; INT_TYPE failed = 0; switch(CLASS){ case 'S': if(i<=2){ if(key_rank != test_rank_array[i]+iteration) failed = 1; else passed_verification++; }else{ if(key_rank != test_rank_array[i]-iteration) failed = 1; else passed_verification++; } break; case 'W': if(i<2){ if(key_rank != test_rank_array[i]+(iteration-2)) failed = 1; else passed_verification++; }else{ if(key_rank != test_rank_array[i]-iteration) failed = 1; else passed_verification++; } break; case 'A': if(i<=2){ if(key_rank != test_rank_array[i]+(iteration-1)) failed = 1; else passed_verification++; }else{ if(key_rank != test_rank_array[i]-(iteration-1)) failed = 1; else passed_verification++; } break; case 'B': if(i==1 || i==2 || i==4){ if(key_rank != test_rank_array[i]+iteration) failed = 1; else passed_verification++; } else{ if(key_rank != test_rank_array[i]-iteration) failed = 1; else passed_verification++; } break; case 'C': if(i<=2){ if(key_rank != test_rank_array[i]+iteration) failed = 1; else passed_verification++; }else{ if(key_rank != test_rank_array[i]-iteration) failed = 1; else passed_verification++; } break; case 'D': if(i<2){ if(key_rank != test_rank_array[i]+iteration) failed = 1; else passed_verification++; }else{ if(key_rank != test_rank_array[i]-iteration) failed = 1; else passed_verification++; } break; } if(failed==1){ printf("Failed partial verification: iteration %ld, test key %ld\n", (long)iteration, (long)i); } } } *passed_verification_device += passed_verification; } static void release_gpu(){ hipFree(key_array_device); hipFree(key_buff1_device); hipFree(key_buff2_device); hipFree(index_array_device); hipFree(rank_array_device); hipFree(partial_verify_vals_device); hipFree(passed_verification_device); hipFree(key_scan_device); hipFree(sum_device); } static void setup_gpu(){ /* * struct hipDeviceProp_t{ * char name[256]; * size_t totalGlobalMem; * size_t sharedMemPerBlock; * int regsPerBlock; * int warpSize; * size_t memPitch; * int maxThreadsPerBlock; * int maxThreadsDim[3]; * int maxGridSize[3]; * size_t totalConstMem; * int major; * int minor; * int clockRate; * size_t textureAlignment; * int deviceOverlap; * int multiProcessorCount; * int kernelExecTimeoutEnabled; * int integrated; * int canMapHostMemory; * int computeMode; * int concurrentKernels; * int ECCEnabled; * int pciBusID; * int pciDeviceID; * int tccDriver; * } */ /* amount of available devices */ hipGetDeviceCount(&total_devices); /* define gpu_device */ if(total_devices==0){ printf("\n\n\nNo Nvidia GPU found!\n\n\n"); exit(-1); }else if((GPU_DEVICE>=0)&& (GPU_DEVICE<total_devices)){ gpu_device_id = GPU_DEVICE; }else{ gpu_device_id = 0; } hipSetDevice(gpu_device_id); hipGetDeviceProperties(&gpu_device_properties, gpu_device_id); /* define threads_per_block */ if((IS_THREADS_PER_BLOCK_ON_CREATE_SEQ>=1)&& (IS_THREADS_PER_BLOCK_ON_CREATE_SEQ<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_create_seq = IS_THREADS_PER_BLOCK_ON_CREATE_SEQ; }else{ threads_per_block_on_create_seq = gpu_device_properties.warpSize; } if((IS_THREADS_PER_BLOCK_ON_RANK>=1)&& (IS_THREADS_PER_BLOCK_ON_RANK<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_rank = IS_THREADS_PER_BLOCK_ON_RANK; }else{ threads_per_block_on_rank = gpu_device_properties.warpSize; } if((IS_THREADS_PER_BLOCK_ON_FULL_VERIFY>=1)&& (IS_THREADS_PER_BLOCK_ON_FULL_VERIFY<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_full_verify = IS_THREADS_PER_BLOCK_ON_FULL_VERIFY; }else{ threads_per_block_on_full_verify = gpu_device_properties.warpSize; } threads_per_block_on_rank_1=1; threads_per_block_on_rank_2=threads_per_block_on_rank; threads_per_block_on_rank_3=threads_per_block_on_rank; threads_per_block_on_rank_4=threads_per_block_on_rank; threads_per_block_on_rank_5=threads_per_block_on_rank; threads_per_block_on_rank_6=threads_per_block_on_rank; threads_per_block_on_rank_7=1; threads_per_block_on_full_verify_1=threads_per_block_on_full_verify; threads_per_block_on_full_verify_2=threads_per_block_on_full_verify; threads_per_block_on_full_verify_3=threads_per_block_on_full_verify; amount_of_work_on_create_seq=threads_per_block_on_create_seq*threads_per_block_on_create_seq; amount_of_work_on_rank_1=1; amount_of_work_on_rank_2=MAX_KEY; amount_of_work_on_rank_3=NUM_KEYS; amount_of_work_on_rank_4=threads_per_block_on_rank_4*threads_per_block_on_rank_4; amount_of_work_on_rank_5=threads_per_block_on_rank_5; amount_of_work_on_rank_6=threads_per_block_on_rank_6*threads_per_block_on_rank_6; amount_of_work_on_rank_7=1; amount_of_work_on_full_verify_1=NUM_KEYS; amount_of_work_on_full_verify_2=NUM_KEYS; amount_of_work_on_full_verify_3=NUM_KEYS; blocks_per_grid_on_create_seq=(ceil((double)(amount_of_work_on_create_seq)/(double)(threads_per_block_on_create_seq))); blocks_per_grid_on_rank_1=1; blocks_per_grid_on_rank_2=(ceil((double)(amount_of_work_on_rank_2)/(double)(threads_per_block_on_rank_2))); blocks_per_grid_on_rank_3=(ceil((double)(amount_of_work_on_rank_3)/(double)(threads_per_block_on_rank_3))); if(amount_of_work_on_rank_4 > MAX_KEY){amount_of_work_on_rank_4=MAX_KEY;} blocks_per_grid_on_rank_4=(ceil((double)(amount_of_work_on_rank_4)/(double)(threads_per_block_on_rank_4))); blocks_per_grid_on_rank_5=1; if(amount_of_work_on_rank_6 > MAX_KEY){amount_of_work_on_rank_6=MAX_KEY;} blocks_per_grid_on_rank_6=(ceil((double)(amount_of_work_on_rank_6)/(double)(threads_per_block_on_rank_6))); blocks_per_grid_on_rank_7=1; blocks_per_grid_on_full_verify_1=(ceil((double)(amount_of_work_on_full_verify_1)/(double)(threads_per_block_on_full_verify_1))); blocks_per_grid_on_full_verify_2=(ceil((double)(amount_of_work_on_full_verify_2)/(double)(threads_per_block_on_full_verify_2))); blocks_per_grid_on_full_verify_3=(ceil((double)(amount_of_work_on_full_verify_3)/(double)(threads_per_block_on_full_verify_3))); size_test_array_device=TEST_ARRAY_SIZE*sizeof(INT_TYPE); size_key_array_device=SIZE_OF_BUFFERS*sizeof(INT_TYPE); size_key_buff1_device=MAX_KEY*sizeof(INT_TYPE); size_key_buff2_device=SIZE_OF_BUFFERS*sizeof(INT_TYPE); size_index_array_device=TEST_ARRAY_SIZE*sizeof(INT_TYPE); size_rank_array_device=TEST_ARRAY_SIZE*sizeof(INT_TYPE); size_partial_verify_vals_device=TEST_ARRAY_SIZE*sizeof(INT_TYPE); size_passed_verification_device=1*sizeof(INT_TYPE); size_key_scan_device=MAX_KEY*sizeof(INT_TYPE); size_sum_device=threads_per_block_on_rank*sizeof(INT_TYPE); size_shared_data_on_rank_4=2*threads_per_block_on_rank_4*sizeof(INT_TYPE); size_shared_data_on_rank_5=2*threads_per_block_on_rank_5*sizeof(INT_TYPE); size_shared_data_on_full_verify_3=threads_per_block_on_full_verify_3*sizeof(INT_TYPE); hipMalloc(&key_array_device, size_key_array_device); hipMalloc(&key_buff1_device, size_key_buff1_device); hipMalloc(&key_buff2_device, size_key_buff2_device); hipMalloc(&index_array_device, size_index_array_device); hipMalloc(&rank_array_device, size_rank_array_device); hipMalloc(&partial_verify_vals_device, size_partial_verify_vals_device); hipMalloc(&passed_verification_device, size_passed_verification_device); hipMalloc(&key_scan_device, size_key_scan_device); hipMalloc(&sum_device, size_sum_device); hipMemcpy(index_array_device, test_index_array, size_index_array_device, hipMemcpyHostToDevice); hipMemcpy(rank_array_device, test_rank_array, size_rank_array_device, hipMemcpyHostToDevice); }
a5c621af779efd8cf5f2bcea8a12fb2e3f1aaa01.cu
/* * ------------------------------------------------------------------------------ * * MIT License * * Copyright (c) 2021 Parallel Applications Modelling Group - GMAP * GMAP website: https://gmap.pucrs.br * * Pontifical Catholic University of Rio Grande do Sul (PUCRS) * Av. Ipiranga, 6681, Porto Alegre - Brazil, 90619-900 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ------------------------------------------------------------------------------ * * The original NPB 3.4 version was written in Fortran and belongs to: * http://www.nas.nasa.gov/Software/NPB/ * * Authors of the Fortran code: * M. Yarrow * H. Jin * * ------------------------------------------------------------------------------ * * The serial C++ version is a translation of the original NPB 3.4 * Serial C++ version: https://github.com/GMAP/NPB-CPP/tree/master/NPB-SER * * Authors of the C++ code: * Dalvan Griebler <[email protected]> * Gabriell Araujo <[email protected]> * Júnior Löff <[email protected]> * * ------------------------------------------------------------------------------ * * The CUDA version is a parallel implementation of the serial C++ version * CUDA version: https://github.com/GMAP/NPB-GPU/tree/master/CUDA * * Authors of the CUDA code: * Gabriell Araujo <[email protected]> * * ------------------------------------------------------------------------------ */ #include <cuda.h> #include "../common/npb-CPP.hpp" #include "npbparams.hpp" #define PROFILING_TOTAL_TIME (0) #define PROFILING_CREATE (1) #define PROFILING_RANK (2) #define PROFILING_VERIFY (3) /*****************************************************************/ /* for serial IS, buckets are not really req'd to solve NPB1 IS */ /* spec, but their use on some machines improves performance, on */ /* other machines the use of buckets compromises performance, */ /* probably because it is extra computation which is not req'd. */ /* (note: mechanism not understood, probably cache related) */ /* example: SP2-66MhzWN: 50% speedup with buckets */ /* example: SGI Indy5000: 50% slowdown with buckets */ /* example: SGI O2000: 400% slowdown with buckets (Wow!) */ /*****************************************************************/ /* to disable the use of buckets, comment out the following line */ #define USE_BUCKETS /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'S' #endif /*************/ /* CLASS S */ /*************/ #if CLASS == 'S' #define TOTAL_KEYS_LOG_2 (16) #define MAX_KEY_LOG_2 (11) #define NUM_BUCKETS_LOG_2 (9) #endif /*************/ /* CLASS W */ /*************/ #if CLASS == 'W' #define TOTAL_KEYS_LOG_2 (20) #define MAX_KEY_LOG_2 (16) #define NUM_BUCKETS_LOG_2 (10) #endif /*************/ /* CLASS A */ /*************/ #if CLASS == 'A' #define TOTAL_KEYS_LOG_2 (23) #define MAX_KEY_LOG_2 (19) #define NUM_BUCKETS_LOG_2 (10) #endif /*************/ /* CLASS B */ /*************/ #if CLASS == 'B' #define TOTAL_KEYS_LOG_2 (25) #define MAX_KEY_LOG_2 (21) #define NUM_BUCKETS_LOG_2 (10) #endif /*************/ /* CLASS C */ /*************/ #if CLASS == 'C' #define TOTAL_KEYS_LOG_2 (27) #define MAX_KEY_LOG_2 (23) #define NUM_BUCKETS_LOG_2 (10) #endif /*************/ /* CLASS D */ /*************/ #if CLASS == 'D' #define TOTAL_KEYS_LOG_2 (31) #define MAX_KEY_LOG_2 (27) #define NUM_BUCKETS_LOG_2 (10) #endif #if CLASS == 'D' #define TOTAL_KEYS (1L << TOTAL_KEYS_LOG_2) #else #define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2) #endif #define MAX_KEY (1 << MAX_KEY_LOG_2) #define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2) #define NUM_KEYS (TOTAL_KEYS) #define SIZE_OF_BUFFERS (NUM_KEYS) #define MAX_ITERATIONS (10) #define TEST_ARRAY_SIZE (5) /*************************************/ /* typedef: if necessary, change the */ /* size of INT_TYPE here by changing the */ /* INT_TYPE type to, say, long */ /*************************************/ #if CLASS == 'D' typedef long INT_TYPE; #else typedef int INT_TYPE; #endif /**********************/ /* partial verif info */ /**********************/ INT_TYPE test_index_array[TEST_ARRAY_SIZE], test_rank_array[TEST_ARRAY_SIZE], S_test_index_array[TEST_ARRAY_SIZE] = {48427,17148,23627,62548,4431}, S_test_rank_array[TEST_ARRAY_SIZE] = {0,18,346,64917,65463}, W_test_index_array[TEST_ARRAY_SIZE] = {357773,934767,875723,898999,404505}, W_test_rank_array[TEST_ARRAY_SIZE] = {1249,11698,1039987,1043896,1048018}, A_test_index_array[TEST_ARRAY_SIZE] = {2112377,662041,5336171,3642833,4250760}, A_test_rank_array[TEST_ARRAY_SIZE] = {104,17523,123928,8288932,8388264}, B_test_index_array[TEST_ARRAY_SIZE] = {41869,812306,5102857,18232239,26860214}, B_test_rank_array[TEST_ARRAY_SIZE] = {33422937,10244,59149,33135281,99}, C_test_index_array[TEST_ARRAY_SIZE] = {44172927,72999161,74326391,129606274,21736814}, C_test_rank_array[TEST_ARRAY_SIZE] = {61147,882988,266290,133997595,133525895}, D_test_index_array[TEST_ARRAY_SIZE] = {1317351170,995930646,1157283250,1503301535,1453734525}, D_test_rank_array[TEST_ARRAY_SIZE] = {1,36538729,1978098519,2145192618,2147425337}; /* global variables */ INT_TYPE passed_verification; INT_TYPE* key_array_device; INT_TYPE* key_buff1_device; INT_TYPE* key_buff2_device; INT_TYPE* index_array_device; INT_TYPE* rank_array_device; INT_TYPE* partial_verify_vals_device; INT_TYPE* passed_verification_device; INT_TYPE* key_scan_device; INT_TYPE* sum_device; size_t size_test_array_device; size_t size_key_array_device; size_t size_key_buff1_device; size_t size_key_buff2_device; size_t size_index_array_device; size_t size_rank_array_device; size_t size_partial_verify_vals_device; size_t size_passed_verification_device; size_t size_key_scan_device; size_t size_sum_device; size_t size_shared_data_on_rank_4; size_t size_shared_data_on_rank_5; size_t size_shared_data_on_full_verify_3; INT_TYPE threads_per_block_on_create_seq; INT_TYPE threads_per_block_on_rank; INT_TYPE threads_per_block_on_rank_1; INT_TYPE threads_per_block_on_rank_2; INT_TYPE threads_per_block_on_rank_3; INT_TYPE threads_per_block_on_rank_4; INT_TYPE threads_per_block_on_rank_5; INT_TYPE threads_per_block_on_rank_6; INT_TYPE threads_per_block_on_rank_7; INT_TYPE threads_per_block_on_full_verify; INT_TYPE threads_per_block_on_full_verify_1; INT_TYPE threads_per_block_on_full_verify_2; INT_TYPE threads_per_block_on_full_verify_3; INT_TYPE blocks_per_grid_on_create_seq; INT_TYPE blocks_per_grid_on_rank_1; INT_TYPE blocks_per_grid_on_rank_2; INT_TYPE blocks_per_grid_on_rank_3; INT_TYPE blocks_per_grid_on_rank_4; INT_TYPE blocks_per_grid_on_rank_5; INT_TYPE blocks_per_grid_on_rank_6; INT_TYPE blocks_per_grid_on_rank_7; INT_TYPE blocks_per_grid_on_full_verify_1; INT_TYPE blocks_per_grid_on_full_verify_2; INT_TYPE blocks_per_grid_on_full_verify_3; INT_TYPE amount_of_work_on_create_seq; INT_TYPE amount_of_work_on_rank_1; INT_TYPE amount_of_work_on_rank_2; INT_TYPE amount_of_work_on_rank_3; INT_TYPE amount_of_work_on_rank_4; INT_TYPE amount_of_work_on_rank_5; INT_TYPE amount_of_work_on_rank_6; INT_TYPE amount_of_work_on_rank_7; INT_TYPE amount_of_work_on_full_verify_1; INT_TYPE amount_of_work_on_full_verify_2; INT_TYPE amount_of_work_on_full_verify_3; int gpu_device_id; int total_devices; cudaDeviceProp gpu_device_properties; extern __shared__ INT_TYPE extern_share_data[]; /* function declarations */ static void create_seq_gpu(double seed, double a); __global__ void create_seq_gpu_kernel(INT_TYPE* key_array, double seed, double a, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __device__ double find_my_seed_device(INT_TYPE kn, INT_TYPE np, long nn, double s, double a); static void full_verify_gpu(); __global__ void full_verify_gpu_kernel_1(INT_TYPE* key_array, INT_TYPE* key_buff2, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void full_verify_gpu_kernel_2(INT_TYPE* key_buff2, INT_TYPE* key_buff_ptr_global, INT_TYPE* key_array, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void full_verify_gpu_kernel_3(INT_TYPE* key_array, INT_TYPE* global_aux, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __device__ double randlc_device(double* X, double* A); static void rank_gpu(INT_TYPE iteration); __global__ void rank_gpu_kernel_1(INT_TYPE* key_array, INT_TYPE* partial_verify_vals, INT_TYPE* test_index_array, INT_TYPE iteration, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_2(INT_TYPE* key_buff1, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_3(INT_TYPE* key_buff_ptr, INT_TYPE* key_buff_ptr2, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_4(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE* sum, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_5(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_6(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE* offset, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); __global__ void rank_gpu_kernel_7(INT_TYPE* partial_verify_vals, INT_TYPE* key_buff_ptr, INT_TYPE* test_rank_array, INT_TYPE* passed_verification_device, INT_TYPE iteration, INT_TYPE number_of_blocks, INT_TYPE amount_of_work); static void release_gpu(); static void setup_gpu(); /* is */ int main(int argc, char** argv){ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n"); #endif #if defined(PROFILING) printf(" PROFILING mode on\n"); #endif INT_TYPE i, iteration; double timecounter; timer_clear(PROFILING_TOTAL_TIME); #if defined(PROFILING) timer_clear(PROFILING_CREATE); timer_clear(PROFILING_RANK); timer_clear(PROFILING_VERIFY); #endif #if defined(PROFILING) timer_start(PROFILING_TOTAL_TIME); #endif /* initialize the verification arrays if a valid class */ for(i=0; i<TEST_ARRAY_SIZE; i++){ switch(CLASS){ case 'S': test_index_array[i] = S_test_index_array[i]; test_rank_array[i] = S_test_rank_array[i]; break; case 'A': test_index_array[i] = A_test_index_array[i]; test_rank_array[i] = A_test_rank_array[i]; break; case 'W': test_index_array[i] = W_test_index_array[i]; test_rank_array[i] = W_test_rank_array[i]; break; case 'B': test_index_array[i] = B_test_index_array[i]; test_rank_array[i] = B_test_rank_array[i]; break; case 'C': test_index_array[i] = C_test_index_array[i]; test_rank_array[i] = C_test_rank_array[i]; break; case 'D': test_index_array[i] = D_test_index_array[i]; test_rank_array[i] = D_test_rank_array[i]; break; }; } /* printout initial NPB info */ printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - IS Benchmark\n\n"); printf(" Size: %ld (class %c)\n", (long)TOTAL_KEYS, CLASS); printf(" Iterations: %d\n", MAX_ITERATIONS); setup_gpu(); #if defined(PROFILING) timer_start(PROFILING_CREATE); #endif /* generate random number sequence and subsequent keys on all procs */ create_seq_gpu(314159265.00, /* random number gen seed */ 1220703125.00); /* random number gen mult */ #if defined(PROFILING) timer_stop(PROFILING_CREATE); #endif /* * do one interation for free (i.e., untimed) to guarantee initialization of * all data and code pages and respective tables */ rank_gpu(1); /* start verification counter */ passed_verification = 0; cudaMemcpy(passed_verification_device, &passed_verification, size_passed_verification_device, cudaMemcpyHostToDevice); if(CLASS != 'S')printf( "\n iteration\n"); #if defined(PROFILING) timer_start(PROFILING_RANK); #else timer_start(PROFILING_TOTAL_TIME); #endif /* this is the main iteration */ for(iteration=1; iteration<=MAX_ITERATIONS; iteration++){ if(CLASS != 'S')printf( " %ld\n", (long)iteration); rank_gpu(iteration); } #if defined(PROFILING) timer_stop(PROFILING_RANK); #else timer_stop(PROFILING_TOTAL_TIME); #endif cudaMemcpy(&passed_verification, passed_verification_device, size_passed_verification_device, cudaMemcpyDeviceToHost); /* * this tests that keys are in sequence: sorting of last ranked key seq * occurs here, but is an untimed operation */ #if defined(PROFILING) timer_start(PROFILING_VERIFY); #endif full_verify_gpu(); #if defined(PROFILING) timer_stop(PROFILING_VERIFY); #endif #if defined(PROFILING) timer_stop(PROFILING_TOTAL_TIME); timecounter = timer_read(PROFILING_RANK); #else timecounter = timer_read(PROFILING_TOTAL_TIME); #endif char gpu_config[256]; char gpu_config_string[2048]; #if defined(PROFILING) sprintf(gpu_config, "%5s\t%25s\t%25s\t%25s\n", "GPU Kernel", "Threads Per Block", "Time in Seconds", "Time in Percentage"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\t%25f\t%24.2f%%\n", " create", (long) threads_per_block_on_create_seq, timer_read(PROFILING_CREATE), (timer_read(PROFILING_CREATE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\t%25f\t%24.2f%%\n", " rank", (long) threads_per_block_on_rank, timer_read(PROFILING_RANK), (timer_read(PROFILING_RANK)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\t%25f\t%24.2f%%\n", " verify", (long) threads_per_block_on_full_verify, timer_read(PROFILING_VERIFY), (timer_read(PROFILING_VERIFY)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); #else sprintf(gpu_config, "%5s\t%25s\n", "GPU Kernel", "Threads Per Block"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\n", " create", (long) threads_per_block_on_create_seq); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\n", " rank", (long) threads_per_block_on_rank); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25ld\n", " verify", (long) threads_per_block_on_full_verify); strcat(gpu_config_string, gpu_config); #endif /* the final printout */ if(passed_verification != 5*MAX_ITERATIONS+1){passed_verification = 0;} c_print_results((char*)"IS", CLASS, (int)(TOTAL_KEYS/64), 64, 0, MAX_ITERATIONS, timecounter, ((double)(MAX_ITERATIONS*TOTAL_KEYS))/timecounter/1000000.0, (char*)"keys ranked", (int)passed_verification, (char*)NPBVERSION, (char*)COMPILETIME, (char*)COMPILERVERSION, (char*)LIBVERSION, (char*)CPU_MODEL, (char*)gpu_device_properties.name, (char*)gpu_config_string, (char*)CS1, (char*)CS2, (char*)CS3, (char*)CS4, (char*)CS5, (char*)CS6, (char*)CS7); release_gpu(); return 0; } static void create_seq_gpu(double seed, double a){ create_seq_gpu_kernel<<<blocks_per_grid_on_create_seq, threads_per_block_on_create_seq>>>(key_array_device, seed, a, blocks_per_grid_on_create_seq, amount_of_work_on_create_seq); cudaDeviceSynchronize(); } __global__ void create_seq_gpu_kernel(INT_TYPE* key_array, double seed, double a, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ double x, s; INT_TYPE i, k; INT_TYPE k1, k2; double an = a; INT_TYPE myid, num_procs; INT_TYPE mq; myid = blockIdx.x*blockDim.x+threadIdx.x; num_procs = amount_of_work; mq = (NUM_KEYS + num_procs - 1) / num_procs; k1 = mq * myid; k2 = k1 + mq; if(k2 > NUM_KEYS){k2 = NUM_KEYS;} s = find_my_seed_device(myid, num_procs, (long)4*NUM_KEYS, seed, an); k = MAX_KEY/4; for(i=k1; i<k2; i++){ x = randlc_device(&s, &an); x += randlc_device(&s, &an); x += randlc_device(&s, &an); x += randlc_device(&s, &an); key_array[i] = k*x; } } __device__ double find_my_seed_device(INT_TYPE kn, INT_TYPE np, long nn, double s, double a){ double t1,t2; long mq,nq,kk,ik; if(kn==0){return s;} mq = (nn/4 + np - 1) / np; nq = mq * 4 * kn; t1 = s; t2 = a; kk = nq; while(kk > 1){ ik = kk / 2; if(2*ik==kk){ (void)randlc_device(&t2, &t2); kk = ik; }else{ (void)randlc_device(&t1, &t2); kk = kk - 1; } } (void)randlc_device(&t1, &t2); return(t1); } static void full_verify_gpu(){ INT_TYPE* memory_aux_device; size_t size_memory_aux=sizeof(INT_TYPE)*(amount_of_work_on_full_verify_3/threads_per_block_on_full_verify_3); cudaMalloc(&memory_aux_device, size_memory_aux); /* full_verify_gpu_kernel_1 */ full_verify_gpu_kernel_1<<<blocks_per_grid_on_full_verify_1, threads_per_block_on_full_verify_1>>>(key_array_device, key_buff2_device, blocks_per_grid_on_full_verify_1, amount_of_work_on_full_verify_1); cudaDeviceSynchronize(); /* full_verify_gpu_kernel_2 */ full_verify_gpu_kernel_2<<<blocks_per_grid_on_full_verify_2, threads_per_block_on_full_verify_2>>>(key_buff2_device, key_buff1_device, key_array_device, blocks_per_grid_on_full_verify_2, amount_of_work_on_full_verify_2); cudaDeviceSynchronize(); /* full_verify_gpu_kernel_3 */ full_verify_gpu_kernel_3<<<blocks_per_grid_on_full_verify_3, threads_per_block_on_full_verify_3, size_shared_data_on_full_verify_3>>>(key_array_device, memory_aux_device, blocks_per_grid_on_full_verify_3, amount_of_work_on_full_verify_3); cudaDeviceSynchronize(); /* reduce on cpu */ INT_TYPE i, j = 0; INT_TYPE* memory_aux_host=(INT_TYPE*)malloc(size_memory_aux); cudaMemcpy(memory_aux_host, memory_aux_device, size_memory_aux, cudaMemcpyDeviceToHost); for(i=0; i<size_memory_aux/sizeof(INT_TYPE); i++){ j += memory_aux_host[i]; } if(j!=0){ printf( "Full_verify: number of keys out of sort: %ld\n", (long)j ); }else{ passed_verification++; } cudaFree(memory_aux_device); free(memory_aux_host); } __global__ void full_verify_gpu_kernel_1(INT_TYPE* key_array, INT_TYPE* key_buff2, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE i = blockIdx.x*blockDim.x+threadIdx.x; key_buff2[i] = key_array[i]; } __global__ void full_verify_gpu_kernel_2(INT_TYPE* key_buff2, INT_TYPE* key_buff_ptr_global, INT_TYPE* key_array, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE value = key_buff2[blockIdx.x*blockDim.x+threadIdx.x]; #if CLASS == 'D' INT_TYPE index = atomicAdd( (unsigned long long int*) &key_buff_ptr_global[value], (unsigned long long int) -1) -1; #else INT_TYPE index = atomicAdd(&key_buff_ptr_global[value], -1) -1; #endif key_array[index] = value; } __global__ void full_verify_gpu_kernel_3(INT_TYPE* key_array, INT_TYPE* global_aux, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE* shared_aux = (INT_TYPE*)(extern_share_data); INT_TYPE i = (blockIdx.x*blockDim.x+threadIdx.x) + 1; if(i<NUM_KEYS){ if(key_array[i-1]>key_array[i]){shared_aux[threadIdx.x]=1;} else{shared_aux[threadIdx.x]=0;} }else{shared_aux[threadIdx.x]=0;} __syncthreads(); for(i=blockDim.x/2; i>0; i>>=1){ if(threadIdx.x<i){ shared_aux[threadIdx.x] += shared_aux[threadIdx.x+i]; } __syncthreads(); } if(threadIdx.x==0){global_aux[blockIdx.x]=shared_aux[0];} } __device__ double randlc_device(double* X, double* A){ double T1, T2, T3, T4; double A1; double A2; double X1; double X2; double Z; INT_TYPE j; /* * -------------------------------------------------------------------- * break A into two parts such that A = 2^23 * A1 + A2 and set X = N. * -------------------------------------------------------------------- */ T1 = R23 * *A; j = T1; A1 = j; A2 = *A - T23 * A1; /* * -------------------------------------------------------------------- * break X into two parts such that X = 2^23 * X1 + X2, compute * Z = A1 * X2 + A2 * X1 (mod 2^23), and then * X = 2^23 * Z + A2 * X2 (mod 2^46). * -------------------------------------------------------------------- */ T1 = R23 * *X; j = T1; X1 = j; X2 = *X - T23 * X1; T1 = A1 * X2 + A2 * X1; j = R23 * T1; T2 = j; Z = T1 - T23 * T2; T3 = T23 * Z + A2 * X2; j = R46 * T3; T4 = j; *X = T3 - T46 * T4; return(R46 * *X); } static void rank_gpu(INT_TYPE iteration){ /* rank_gpu_kernel_1 */ rank_gpu_kernel_1<<<blocks_per_grid_on_rank_1, threads_per_block_on_rank_1>>>(key_array_device, partial_verify_vals_device, index_array_device, iteration, blocks_per_grid_on_rank_1, amount_of_work_on_rank_1); /* rank_gpu_kernel_2 */ rank_gpu_kernel_2<<<blocks_per_grid_on_rank_2, threads_per_block_on_rank_2>>>(key_buff1_device, blocks_per_grid_on_rank_2, amount_of_work_on_rank_2); /* rank_gpu_kernel_3 */ rank_gpu_kernel_3<<<blocks_per_grid_on_rank_3, threads_per_block_on_rank_3>>>(key_buff1_device, key_array_device, blocks_per_grid_on_rank_3, amount_of_work_on_rank_3); /* rank_gpu_kernel_4 */ rank_gpu_kernel_4<<<blocks_per_grid_on_rank_4, threads_per_block_on_rank_4, size_shared_data_on_rank_4>>>(key_buff1_device, key_buff1_device, sum_device, blocks_per_grid_on_rank_4, amount_of_work_on_rank_4); /* rank_gpu_kernel_5 */ rank_gpu_kernel_5<<<blocks_per_grid_on_rank_5, threads_per_block_on_rank_5, size_shared_data_on_rank_5>>>(sum_device, sum_device, blocks_per_grid_on_rank_5, amount_of_work_on_rank_5); /* rank_gpu_kernel_6 */ rank_gpu_kernel_6<<<blocks_per_grid_on_rank_6, threads_per_block_on_rank_6>>>(key_buff1_device, key_buff1_device, sum_device, blocks_per_grid_on_rank_6, amount_of_work_on_rank_6); /* rank_gpu_kernel_7 */ rank_gpu_kernel_7<<<blocks_per_grid_on_rank_7, threads_per_block_on_rank_7>>>(partial_verify_vals_device, key_buff1_device, rank_array_device, passed_verification_device, iteration, blocks_per_grid_on_rank_7, amount_of_work_on_rank_7); } __global__ void rank_gpu_kernel_1(INT_TYPE* key_array, INT_TYPE* partial_verify_vals, INT_TYPE* test_index_array, INT_TYPE iteration, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ key_array[iteration] = iteration; key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration; /* * -------------------------------------------------------------------- * determine where the partial verify test keys are, * -------------------------------------------------------------------- * load into top of array bucket_size * -------------------------------------------------------------------- */ #pragma unroll for(INT_TYPE i=0; i<TEST_ARRAY_SIZE; i++){ partial_verify_vals[i] = key_array[test_index_array[i]]; } } __global__ void rank_gpu_kernel_2(INT_TYPE* key_buff1, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ key_buff1[blockIdx.x*blockDim.x+threadIdx.x] = 0; } __global__ void rank_gpu_kernel_3(INT_TYPE* key_buff_ptr, INT_TYPE* key_buff_ptr2, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ /* * -------------------------------------------------------------------- * in this section, the keys themselves are used as their * own indexes to determine how many of each there are: their * individual population * -------------------------------------------------------------------- */ #if CLASS == 'D' atomicAdd( (unsigned long long int*) &key_buff_ptr[key_buff_ptr2[blockIdx.x*blockDim.x+threadIdx.x]], (unsigned long long int) 1); #else atomicAdd(&key_buff_ptr[key_buff_ptr2[blockIdx.x*blockDim.x+threadIdx.x]], 1); #endif } __global__ void rank_gpu_kernel_4(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE* sum, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE* shared_data = (INT_TYPE*)(extern_share_data); shared_data[threadIdx.x] = 0; INT_TYPE position = blockDim.x + threadIdx.x; INT_TYPE factor = MAX_KEY / number_of_blocks; INT_TYPE start = factor * blockIdx.x; INT_TYPE end = start + factor; for(INT_TYPE i=start; i<end; i+=blockDim.x){ shared_data[position] = source[i + threadIdx.x]; for(INT_TYPE offset=1; offset<blockDim.x; offset<<=1){ __syncthreads(); INT_TYPE t = shared_data[position] + shared_data[position - offset]; __syncthreads(); shared_data[position] = t; } INT_TYPE prv_val = (i == start) ? 0 : destiny[i - 1]; destiny[i + threadIdx.x] = shared_data[position] + prv_val; } __syncthreads(); if(threadIdx.x==0){sum[blockIdx.x]=destiny[end-1];} } __global__ void rank_gpu_kernel_5(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE* shared_data = (INT_TYPE*)(extern_share_data); shared_data[threadIdx.x] = 0; INT_TYPE position = blockDim.x + threadIdx.x; shared_data[position] = source[threadIdx.x]; for(INT_TYPE offset=1; offset<blockDim.x; offset<<=1){ __syncthreads(); INT_TYPE t = shared_data[position] + shared_data[position - offset]; __syncthreads(); shared_data[position] = t; } __syncthreads(); destiny[threadIdx.x] = shared_data[position - 1]; } __global__ void rank_gpu_kernel_6(INT_TYPE* source, INT_TYPE* destiny, INT_TYPE* offset, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ INT_TYPE factor = MAX_KEY / number_of_blocks; INT_TYPE start = factor * blockIdx.x; INT_TYPE end = start + factor; INT_TYPE sum = offset[blockIdx.x]; for(INT_TYPE i=start; i<end; i+=blockDim.x){ destiny[i + threadIdx.x] = source[i + threadIdx.x] + sum; } } __global__ void rank_gpu_kernel_7(INT_TYPE* partial_verify_vals, INT_TYPE* key_buff_ptr, INT_TYPE* test_rank_array, INT_TYPE* passed_verification_device, INT_TYPE iteration, INT_TYPE number_of_blocks, INT_TYPE amount_of_work){ /* * -------------------------------------------------------------------- * this is the partial verify test section * observe that test_rank_array vals are * shifted differently for different cases * -------------------------------------------------------------------- */ INT_TYPE i, k; INT_TYPE passed_verification = 0; for(i=0; i<TEST_ARRAY_SIZE; i++){ /* test vals were put here on partial_verify_vals */ k = partial_verify_vals[i]; if(0<k && k<=NUM_KEYS-1){ INT_TYPE key_rank = key_buff_ptr[k-1]; INT_TYPE failed = 0; switch(CLASS){ case 'S': if(i<=2){ if(key_rank != test_rank_array[i]+iteration) failed = 1; else passed_verification++; }else{ if(key_rank != test_rank_array[i]-iteration) failed = 1; else passed_verification++; } break; case 'W': if(i<2){ if(key_rank != test_rank_array[i]+(iteration-2)) failed = 1; else passed_verification++; }else{ if(key_rank != test_rank_array[i]-iteration) failed = 1; else passed_verification++; } break; case 'A': if(i<=2){ if(key_rank != test_rank_array[i]+(iteration-1)) failed = 1; else passed_verification++; }else{ if(key_rank != test_rank_array[i]-(iteration-1)) failed = 1; else passed_verification++; } break; case 'B': if(i==1 || i==2 || i==4){ if(key_rank != test_rank_array[i]+iteration) failed = 1; else passed_verification++; } else{ if(key_rank != test_rank_array[i]-iteration) failed = 1; else passed_verification++; } break; case 'C': if(i<=2){ if(key_rank != test_rank_array[i]+iteration) failed = 1; else passed_verification++; }else{ if(key_rank != test_rank_array[i]-iteration) failed = 1; else passed_verification++; } break; case 'D': if(i<2){ if(key_rank != test_rank_array[i]+iteration) failed = 1; else passed_verification++; }else{ if(key_rank != test_rank_array[i]-iteration) failed = 1; else passed_verification++; } break; } if(failed==1){ printf("Failed partial verification: iteration %ld, test key %ld\n", (long)iteration, (long)i); } } } *passed_verification_device += passed_verification; } static void release_gpu(){ cudaFree(key_array_device); cudaFree(key_buff1_device); cudaFree(key_buff2_device); cudaFree(index_array_device); cudaFree(rank_array_device); cudaFree(partial_verify_vals_device); cudaFree(passed_verification_device); cudaFree(key_scan_device); cudaFree(sum_device); } static void setup_gpu(){ /* * struct cudaDeviceProp{ * char name[256]; * size_t totalGlobalMem; * size_t sharedMemPerBlock; * int regsPerBlock; * int warpSize; * size_t memPitch; * int maxThreadsPerBlock; * int maxThreadsDim[3]; * int maxGridSize[3]; * size_t totalConstMem; * int major; * int minor; * int clockRate; * size_t textureAlignment; * int deviceOverlap; * int multiProcessorCount; * int kernelExecTimeoutEnabled; * int integrated; * int canMapHostMemory; * int computeMode; * int concurrentKernels; * int ECCEnabled; * int pciBusID; * int pciDeviceID; * int tccDriver; * } */ /* amount of available devices */ cudaGetDeviceCount(&total_devices); /* define gpu_device */ if(total_devices==0){ printf("\n\n\nNo Nvidia GPU found!\n\n\n"); exit(-1); }else if((GPU_DEVICE>=0)&& (GPU_DEVICE<total_devices)){ gpu_device_id = GPU_DEVICE; }else{ gpu_device_id = 0; } cudaSetDevice(gpu_device_id); cudaGetDeviceProperties(&gpu_device_properties, gpu_device_id); /* define threads_per_block */ if((IS_THREADS_PER_BLOCK_ON_CREATE_SEQ>=1)&& (IS_THREADS_PER_BLOCK_ON_CREATE_SEQ<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_create_seq = IS_THREADS_PER_BLOCK_ON_CREATE_SEQ; }else{ threads_per_block_on_create_seq = gpu_device_properties.warpSize; } if((IS_THREADS_PER_BLOCK_ON_RANK>=1)&& (IS_THREADS_PER_BLOCK_ON_RANK<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_rank = IS_THREADS_PER_BLOCK_ON_RANK; }else{ threads_per_block_on_rank = gpu_device_properties.warpSize; } if((IS_THREADS_PER_BLOCK_ON_FULL_VERIFY>=1)&& (IS_THREADS_PER_BLOCK_ON_FULL_VERIFY<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_full_verify = IS_THREADS_PER_BLOCK_ON_FULL_VERIFY; }else{ threads_per_block_on_full_verify = gpu_device_properties.warpSize; } threads_per_block_on_rank_1=1; threads_per_block_on_rank_2=threads_per_block_on_rank; threads_per_block_on_rank_3=threads_per_block_on_rank; threads_per_block_on_rank_4=threads_per_block_on_rank; threads_per_block_on_rank_5=threads_per_block_on_rank; threads_per_block_on_rank_6=threads_per_block_on_rank; threads_per_block_on_rank_7=1; threads_per_block_on_full_verify_1=threads_per_block_on_full_verify; threads_per_block_on_full_verify_2=threads_per_block_on_full_verify; threads_per_block_on_full_verify_3=threads_per_block_on_full_verify; amount_of_work_on_create_seq=threads_per_block_on_create_seq*threads_per_block_on_create_seq; amount_of_work_on_rank_1=1; amount_of_work_on_rank_2=MAX_KEY; amount_of_work_on_rank_3=NUM_KEYS; amount_of_work_on_rank_4=threads_per_block_on_rank_4*threads_per_block_on_rank_4; amount_of_work_on_rank_5=threads_per_block_on_rank_5; amount_of_work_on_rank_6=threads_per_block_on_rank_6*threads_per_block_on_rank_6; amount_of_work_on_rank_7=1; amount_of_work_on_full_verify_1=NUM_KEYS; amount_of_work_on_full_verify_2=NUM_KEYS; amount_of_work_on_full_verify_3=NUM_KEYS; blocks_per_grid_on_create_seq=(ceil((double)(amount_of_work_on_create_seq)/(double)(threads_per_block_on_create_seq))); blocks_per_grid_on_rank_1=1; blocks_per_grid_on_rank_2=(ceil((double)(amount_of_work_on_rank_2)/(double)(threads_per_block_on_rank_2))); blocks_per_grid_on_rank_3=(ceil((double)(amount_of_work_on_rank_3)/(double)(threads_per_block_on_rank_3))); if(amount_of_work_on_rank_4 > MAX_KEY){amount_of_work_on_rank_4=MAX_KEY;} blocks_per_grid_on_rank_4=(ceil((double)(amount_of_work_on_rank_4)/(double)(threads_per_block_on_rank_4))); blocks_per_grid_on_rank_5=1; if(amount_of_work_on_rank_6 > MAX_KEY){amount_of_work_on_rank_6=MAX_KEY;} blocks_per_grid_on_rank_6=(ceil((double)(amount_of_work_on_rank_6)/(double)(threads_per_block_on_rank_6))); blocks_per_grid_on_rank_7=1; blocks_per_grid_on_full_verify_1=(ceil((double)(amount_of_work_on_full_verify_1)/(double)(threads_per_block_on_full_verify_1))); blocks_per_grid_on_full_verify_2=(ceil((double)(amount_of_work_on_full_verify_2)/(double)(threads_per_block_on_full_verify_2))); blocks_per_grid_on_full_verify_3=(ceil((double)(amount_of_work_on_full_verify_3)/(double)(threads_per_block_on_full_verify_3))); size_test_array_device=TEST_ARRAY_SIZE*sizeof(INT_TYPE); size_key_array_device=SIZE_OF_BUFFERS*sizeof(INT_TYPE); size_key_buff1_device=MAX_KEY*sizeof(INT_TYPE); size_key_buff2_device=SIZE_OF_BUFFERS*sizeof(INT_TYPE); size_index_array_device=TEST_ARRAY_SIZE*sizeof(INT_TYPE); size_rank_array_device=TEST_ARRAY_SIZE*sizeof(INT_TYPE); size_partial_verify_vals_device=TEST_ARRAY_SIZE*sizeof(INT_TYPE); size_passed_verification_device=1*sizeof(INT_TYPE); size_key_scan_device=MAX_KEY*sizeof(INT_TYPE); size_sum_device=threads_per_block_on_rank*sizeof(INT_TYPE); size_shared_data_on_rank_4=2*threads_per_block_on_rank_4*sizeof(INT_TYPE); size_shared_data_on_rank_5=2*threads_per_block_on_rank_5*sizeof(INT_TYPE); size_shared_data_on_full_verify_3=threads_per_block_on_full_verify_3*sizeof(INT_TYPE); cudaMalloc(&key_array_device, size_key_array_device); cudaMalloc(&key_buff1_device, size_key_buff1_device); cudaMalloc(&key_buff2_device, size_key_buff2_device); cudaMalloc(&index_array_device, size_index_array_device); cudaMalloc(&rank_array_device, size_rank_array_device); cudaMalloc(&partial_verify_vals_device, size_partial_verify_vals_device); cudaMalloc(&passed_verification_device, size_passed_verification_device); cudaMalloc(&key_scan_device, size_key_scan_device); cudaMalloc(&sum_device, size_sum_device); cudaMemcpy(index_array_device, test_index_array, size_index_array_device, cudaMemcpyHostToDevice); cudaMemcpy(rank_array_device, test_rank_array, size_rank_array_device, cudaMemcpyHostToDevice); }
0432433004b6f2386a3925f261bd61625326d116.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/top_k_kernel.h" #include "paddle/fluid/operators/top_k_function_cuda.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/gather.cu.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { namespace ops = paddle::operators; #define FIXED_BLOCK_DIM_BASE(dim, ...) \ case (dim): { \ constexpr auto kBlockDim = (dim); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM(...) \ FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__) template <typename T, typename Context> void TopkKernel(const Context& dev_ctx, const DenseTensor& x, const Scalar& k_scalar, int axis, bool largest, bool sorted, DenseTensor* out, DenseTensor* indices) { const auto* input = &x; // get the input dims const auto& in_dims = input->dims(); // calcluate the real axis if (axis < 0) axis += in_dims.size(); int k = k_scalar.to<int>(); if (k_scalar.FromTensor()) { phi::DDim out_dims = out->dims(); out_dims[axis] = k; out->Resize(out_dims); indices->Resize(out_dims); } const auto& out_dims = out->dims(); const T* input_data = input->data<T>(); T* output_data = dev_ctx.template Alloc<T>(out); int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices); if (axis == in_dims.size() - 1) { // if get the topK from the last axis const int64_t& input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; if (k > input_width) { k = input_width; } // The conclusion is drawn from the data through multiple sets of // statistics if (input_width >= 128 && k >= input_width * 0.75) { auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>( &dev_ctx); if (ops::SortTopk<T>(*ctx, input, input_width, input_height, k, out, indices, largest)) { // Successed, return. return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } #if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 9000 if (input_width >= 1024 && input_height == 1) { // 1. Gather TopK, but without sorting constexpr int max_num_threads = 1024; if (largest) { hipLaunchKernelGGL(( ops::RadixTopK< T, true>), dim3(input_height), dim3(max_num_threads), 0, dev_ctx.stream(), input_data, k, input_height, input_width, output_data, indices_data); } else { hipLaunchKernelGGL(( ops::RadixTopK< T, false>), dim3(input_height), dim3(max_num_threads), 0, dev_ctx.stream(), input_data, k, input_height, input_width, output_data, indices_data); } // 2. Sort if needed if (sorted) { DenseTensor sorted_output; DenseTensor sorted_indices; DenseTensor gather_indices; sorted_output.Resize(out->dims()); sorted_indices.Resize(indices->dims()); gather_indices.Resize(indices->dims()); dev_ctx.template Alloc<T>(&sorted_output); dev_ctx.template Alloc<int64_t>(&sorted_indices); dev_ctx.template Alloc<int64_t>(&gather_indices); auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>( &dev_ctx); if (ops::SortTopk<T>(*ctx, out, k, input_height, k, &sorted_output, &sorted_indices, largest)) { funcs::GPUGather<int64_t, int64_t>( dev_ctx, *indices, sorted_indices, &gather_indices); Copy(dev_ctx, gather_indices, indices->place(), false, indices); Copy(dev_ctx, sorted_output, out->place(), false, out); return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } else { return; } } #endif // NOTE: pass lds and dim same to input width. // NOTE: old matrix implementation of stride is different to eigen. const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; switch (ops::GetDesiredBlockDim(input_width)) { #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(( FIXED_BLOCK_DIM(ops::KeMatrixTopK< T, 20, kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #else hipLaunchKernelGGL(( FIXED_BLOCK_DIM(ops::KeMatrixTopK< T, 5, kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #endif default: PADDLE_THROW(errors::Fatal( "the input data shape has error in the topk cuda kernel.")); } } else { // if get topK not from the last axis, will tranpose the tensor and get // TopK // first step, prepare the trans args for the tranpose std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); phi::DDim trans_dims(in_dims); phi::DDim trans_out_dims(out->dims()); for (int i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; trans_out_dims[i] = out_dims[trans[i]]; } // second step, tranpose the input DenseTensor trans_input; trans_input.Resize(trans_dims); dev_ctx.template Alloc<T>(&trans_input); int ndims = trans.size(); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, *input, &trans_input, trans); // third step, calcluate the topk // allocate the tmp cuda memory for the tmp result DenseTensor trans_ind; DenseTensor trans_out; trans_ind.Resize(trans_out_dims); trans_out.Resize(trans_out_dims); dev_ctx.template Alloc<int64_t>(&trans_ind); dev_ctx.template Alloc<T>(&trans_out); const int64_t input_height = phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; if (k > input_width) k = input_width; // The conclusion is drawn from the data through multiple sets of // statistics if (input_width >= 128 && k >= input_width * 0.75) { auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>( &dev_ctx); if (ops::SortTopk<T>(*ctx, &trans_input, input_width, input_height, k, &trans_out, &trans_ind, largest)) { // last step, tranpose back the indices and output funcs::TransCompute<phi::GPUContext, int64_t>( ndims, dev_ctx, trans_ind, indices, trans); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, trans_out, out, trans); return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; switch (ops::GetDesiredBlockDim(input_width)) { #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(( FIXED_BLOCK_DIM(ops::KeMatrixTopK< T, 20, kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), trans_out.data<T>(), k, trans_ind.data<int64_t>(), trans_input.data<T>(), input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #else hipLaunchKernelGGL(( FIXED_BLOCK_DIM(ops::KeMatrixTopK< T, 5, kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), trans_out.data<T>(), k, trans_ind.data<int64_t>(), trans_input.data<T>(), input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #endif default: PADDLE_THROW(errors::Fatal( "the input data shape has error in the topk cuda kernel.")); } // last step, tranpose back the indices and output funcs::TransCompute<phi::GPUContext, int64_t>( ndims, dev_ctx, trans_ind, indices, trans); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, trans_out, out, trans); } } #undef FIXED_BLOCK_DIM_BASE #undef FIXED_BLOCK_DIM } // namespace phi PD_REGISTER_KERNEL(top_k, GPU, ALL_LAYOUT, phi::TopkKernel, float, double, int, int64_t, phi::dtype::float16) {}
0432433004b6f2386a3925f261bd61625326d116.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/top_k_kernel.h" #include "paddle/fluid/operators/top_k_function_cuda.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/gather.cu.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { namespace ops = paddle::operators; #define FIXED_BLOCK_DIM_BASE(dim, ...) \ case (dim): { \ constexpr auto kBlockDim = (dim); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM(...) \ FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__) template <typename T, typename Context> void TopkKernel(const Context& dev_ctx, const DenseTensor& x, const Scalar& k_scalar, int axis, bool largest, bool sorted, DenseTensor* out, DenseTensor* indices) { const auto* input = &x; // get the input dims const auto& in_dims = input->dims(); // calcluate the real axis if (axis < 0) axis += in_dims.size(); int k = k_scalar.to<int>(); if (k_scalar.FromTensor()) { phi::DDim out_dims = out->dims(); out_dims[axis] = k; out->Resize(out_dims); indices->Resize(out_dims); } const auto& out_dims = out->dims(); const T* input_data = input->data<T>(); T* output_data = dev_ctx.template Alloc<T>(out); int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices); if (axis == in_dims.size() - 1) { // if get the topK from the last axis const int64_t& input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; if (k > input_width) { k = input_width; } // The conclusion is drawn from the data through multiple sets of // statistics if (input_width >= 128 && k >= input_width * 0.75) { auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>( &dev_ctx); if (ops::SortTopk<T>(*ctx, input, input_width, input_height, k, out, indices, largest)) { // Successed, return. return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } #if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 9000 if (input_width >= 1024 && input_height == 1) { // 1. Gather TopK, but without sorting constexpr int max_num_threads = 1024; if (largest) { ops::RadixTopK< T, true><<<input_height, max_num_threads, 0, dev_ctx.stream()>>>( input_data, k, input_height, input_width, output_data, indices_data); } else { ops::RadixTopK< T, false><<<input_height, max_num_threads, 0, dev_ctx.stream()>>>( input_data, k, input_height, input_width, output_data, indices_data); } // 2. Sort if needed if (sorted) { DenseTensor sorted_output; DenseTensor sorted_indices; DenseTensor gather_indices; sorted_output.Resize(out->dims()); sorted_indices.Resize(indices->dims()); gather_indices.Resize(indices->dims()); dev_ctx.template Alloc<T>(&sorted_output); dev_ctx.template Alloc<int64_t>(&sorted_indices); dev_ctx.template Alloc<int64_t>(&gather_indices); auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>( &dev_ctx); if (ops::SortTopk<T>(*ctx, out, k, input_height, k, &sorted_output, &sorted_indices, largest)) { funcs::GPUGather<int64_t, int64_t>( dev_ctx, *indices, sorted_indices, &gather_indices); Copy(dev_ctx, gather_indices, indices->place(), false, indices); Copy(dev_ctx, sorted_output, out->place(), false, out); return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } else { return; } } #endif // NOTE: pass lds and dim same to input width. // NOTE: old matrix implementation of stride is different to eigen. const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; switch (ops::GetDesiredBlockDim(input_width)) { #ifdef PADDLE_WITH_HIP FIXED_BLOCK_DIM(ops::KeMatrixTopK< T, 20, kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>( output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #else FIXED_BLOCK_DIM(ops::KeMatrixTopK< T, 5, kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>( output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #endif default: PADDLE_THROW(errors::Fatal( "the input data shape has error in the topk cuda kernel.")); } } else { // if get topK not from the last axis, will tranpose the tensor and get // TopK // first step, prepare the trans args for the tranpose std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); phi::DDim trans_dims(in_dims); phi::DDim trans_out_dims(out->dims()); for (int i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; trans_out_dims[i] = out_dims[trans[i]]; } // second step, tranpose the input DenseTensor trans_input; trans_input.Resize(trans_dims); dev_ctx.template Alloc<T>(&trans_input); int ndims = trans.size(); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, *input, &trans_input, trans); // third step, calcluate the topk // allocate the tmp cuda memory for the tmp result DenseTensor trans_ind; DenseTensor trans_out; trans_ind.Resize(trans_out_dims); trans_out.Resize(trans_out_dims); dev_ctx.template Alloc<int64_t>(&trans_ind); dev_ctx.template Alloc<T>(&trans_out); const int64_t input_height = phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; if (k > input_width) k = input_width; // The conclusion is drawn from the data through multiple sets of // statistics if (input_width >= 128 && k >= input_width * 0.75) { auto* ctx = reinterpret_cast<const paddle::platform::CUDADeviceContext*>( &dev_ctx); if (ops::SortTopk<T>(*ctx, &trans_input, input_width, input_height, k, &trans_out, &trans_ind, largest)) { // last step, tranpose back the indices and output funcs::TransCompute<phi::GPUContext, int64_t>( ndims, dev_ctx, trans_ind, indices, trans); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, trans_out, out, trans); return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; switch (ops::GetDesiredBlockDim(input_width)) { #ifdef PADDLE_WITH_HIP FIXED_BLOCK_DIM(ops::KeMatrixTopK< T, 20, kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>( trans_out.data<T>(), k, trans_ind.data<int64_t>(), trans_input.data<T>(), input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #else FIXED_BLOCK_DIM(ops::KeMatrixTopK< T, 5, kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>( trans_out.data<T>(), k, trans_ind.data<int64_t>(), trans_input.data<T>(), input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #endif default: PADDLE_THROW(errors::Fatal( "the input data shape has error in the topk cuda kernel.")); } // last step, tranpose back the indices and output funcs::TransCompute<phi::GPUContext, int64_t>( ndims, dev_ctx, trans_ind, indices, trans); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, trans_out, out, trans); } } #undef FIXED_BLOCK_DIM_BASE #undef FIXED_BLOCK_DIM } // namespace phi PD_REGISTER_KERNEL(top_k, GPU, ALL_LAYOUT, phi::TopkKernel, float, double, int, int64_t, phi::dtype::float16) {}
6e06f07e3873de62a17c546bd3fef7f8e71e9442.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <type_traits> #include <random> #include <mma.h> #include <wmma_extension/wmma_mma.hpp> #include "common.hpp" #ifndef TEST_ARCH #define TEST_ARCH (-1) #endif template <int M, int N, int K, class AB_T, class C_T, class D_T, class a_layout, class b_layout, nvcuda::wmma::layout_t c_layout, nvcuda::wmma::layout_t d_layout> __global__ void test_kernel( D_T* const d, const typename mtk::wmma::detail::common::storage_t<AB_T>::type* const a, const typename mtk::wmma::detail::common::storage_t<AB_T>::type* const b, const C_T* const c) { using AB_STORAGE_T = typename mtk::wmma::detail::common::storage_t<AB_T>::type; mtk::wmma::mma::fragment<nvcuda::wmma::matrix_a , M, N, K, AB_T, a_layout> frag_a; mtk::wmma::mma::fragment<nvcuda::wmma::matrix_b , M, N, K, AB_T, b_layout> frag_b; mtk::wmma::mma::fragment<nvcuda::wmma::accumulator, M, N, K, C_T> frag_c; mtk::wmma::mma::fragment<nvcuda::wmma::accumulator, M, N, K, D_T> frag_d; const unsigned lda = std::is_same<a_layout, nvcuda::wmma::col_major>::value ? M : K; const unsigned ldb = std::is_same<b_layout, nvcuda::wmma::col_major>::value ? K : N; const unsigned ldc = (c_layout == nvcuda::wmma::mem_col_major) ? M : N; const unsigned ldd = (d_layout == nvcuda::wmma::mem_col_major) ? M : N; mtk::wmma::mma::fill_zero(frag_d); mtk::wmma::mma::load_matrix_sync(frag_a, a, lda); mtk::wmma::mma::load_matrix_sync(frag_b, b, ldb); mtk::wmma::mma::load_matrix_sync(frag_c, c, ldc, c_layout); mtk::wmma::mma::mma_sync(frag_d, frag_a, frag_b, frag_c); mtk::wmma::mma::store_matrix_sync(d, frag_d, ldd, d_layout); } std::string get_layout_name(const nvcuda::wmma::layout_t layout) { if (layout == nvcuda::wmma::mem_col_major) { return mtk::test_utils::get_string<nvcuda::wmma::col_major>(); } else { return mtk::test_utils::get_string<nvcuda::wmma::row_major>(); } } template <int M, int N, int K, class AB_T, class C_T, class D_T, class a_layout, class b_layout, nvcuda::wmma::layout_t c_layout, nvcuda::wmma::layout_t d_layout> void test() { using AB_STORAGE_T = typename mtk::wmma::detail::common::storage_t<AB_T>::type; D_T* d_ptr; C_T* c_ptr; AB_STORAGE_T* a_ptr; AB_STORAGE_T* b_ptr; hipHostMalloc(&a_ptr, M * K * sizeof(AB_STORAGE_T)); hipHostMalloc(&b_ptr, K * N * sizeof(AB_STORAGE_T)); hipHostMalloc(&c_ptr, M * N * sizeof(C_T)); hipHostMalloc(&d_ptr, M * N * sizeof(D_T)); std::mt19937 mt(std::random_device{}()); std::uniform_real_distribution<float> dist(-1.0f, 1.0f); for (std::size_t i = 0; i < M * K; i++) { a_ptr[i] = mtk::wmma::detail::common::cast<AB_STORAGE_T>(dist(mt)); } for (std::size_t i = 0; i < K * N; i++) { b_ptr[i] = mtk::wmma::detail::common::cast<AB_STORAGE_T>(dist(mt)); } for (std::size_t i = 0; i < M * N; i++) { c_ptr[i] = mtk::wmma::detail::common::cast<C_T>(dist(mt)); } hipDeviceSynchronize(); hipLaunchKernelGGL(( test_kernel<M, N, K, AB_T, C_T, D_T, a_layout, b_layout, c_layout, d_layout>), dim3(1), dim3(32), 0, 0, d_ptr, a_ptr, b_ptr, c_ptr); hipDeviceSynchronize(); const auto error = mtk::test_utils::get_max_relative_error<M, N, K, AB_T, C_T, D_T, a_layout, b_layout, c_layout, d_layout>(a_ptr, b_ptr, c_ptr, d_ptr); std::printf("[%s] ARCH=%d, M=%2d, N=%2d, K=%2d, a_%5s_%s, b_%5s_%s, c_%5s_%s, d_%5s_%s : res = %e [%s]\n", __FILE__, TEST_ARCH, M, N, K, mtk::test_utils::get_string<AB_T>().c_str(), mtk::test_utils::get_string<a_layout>().c_str(), mtk::test_utils::get_string<AB_T>().c_str(), mtk::test_utils::get_string<b_layout>().c_str(), mtk::test_utils::get_string<C_T >().c_str(), get_layout_name(c_layout).c_str(), mtk::test_utils::get_string<D_T >().c_str(), get_layout_name(d_layout).c_str(), error, mtk::test_utils::get_test_result_string(error < mtk::test_utils::get_machine_eps<AB_T>() * 16) ); } int main() { #if TEST_ARCH >= 80 test<16, 8, 16, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 16, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<16, 8, 16, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 16, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<16, 8, 8, nvcuda::wmma::precision::tf32, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 8, nvcuda::wmma::precision::tf32, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<16, 8, 8, nvcuda::wmma::precision::tf32, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 8, nvcuda::wmma::precision::tf32, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); #endif #if TEST_ARCH >= 75 test<16, 8, 8, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 8, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<16, 8, 8, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 8, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); #endif #if TEST_ARCH >= 70 && TEST_ARCH <= 75 test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); #endif }
6e06f07e3873de62a17c546bd3fef7f8e71e9442.cu
#include <iostream> #include <type_traits> #include <random> #include <mma.h> #include <wmma_extension/wmma_mma.hpp> #include "common.hpp" #ifndef TEST_ARCH #define TEST_ARCH (-1) #endif template <int M, int N, int K, class AB_T, class C_T, class D_T, class a_layout, class b_layout, nvcuda::wmma::layout_t c_layout, nvcuda::wmma::layout_t d_layout> __global__ void test_kernel( D_T* const d, const typename mtk::wmma::detail::common::storage_t<AB_T>::type* const a, const typename mtk::wmma::detail::common::storage_t<AB_T>::type* const b, const C_T* const c) { using AB_STORAGE_T = typename mtk::wmma::detail::common::storage_t<AB_T>::type; mtk::wmma::mma::fragment<nvcuda::wmma::matrix_a , M, N, K, AB_T, a_layout> frag_a; mtk::wmma::mma::fragment<nvcuda::wmma::matrix_b , M, N, K, AB_T, b_layout> frag_b; mtk::wmma::mma::fragment<nvcuda::wmma::accumulator, M, N, K, C_T> frag_c; mtk::wmma::mma::fragment<nvcuda::wmma::accumulator, M, N, K, D_T> frag_d; const unsigned lda = std::is_same<a_layout, nvcuda::wmma::col_major>::value ? M : K; const unsigned ldb = std::is_same<b_layout, nvcuda::wmma::col_major>::value ? K : N; const unsigned ldc = (c_layout == nvcuda::wmma::mem_col_major) ? M : N; const unsigned ldd = (d_layout == nvcuda::wmma::mem_col_major) ? M : N; mtk::wmma::mma::fill_zero(frag_d); mtk::wmma::mma::load_matrix_sync(frag_a, a, lda); mtk::wmma::mma::load_matrix_sync(frag_b, b, ldb); mtk::wmma::mma::load_matrix_sync(frag_c, c, ldc, c_layout); mtk::wmma::mma::mma_sync(frag_d, frag_a, frag_b, frag_c); mtk::wmma::mma::store_matrix_sync(d, frag_d, ldd, d_layout); } std::string get_layout_name(const nvcuda::wmma::layout_t layout) { if (layout == nvcuda::wmma::mem_col_major) { return mtk::test_utils::get_string<nvcuda::wmma::col_major>(); } else { return mtk::test_utils::get_string<nvcuda::wmma::row_major>(); } } template <int M, int N, int K, class AB_T, class C_T, class D_T, class a_layout, class b_layout, nvcuda::wmma::layout_t c_layout, nvcuda::wmma::layout_t d_layout> void test() { using AB_STORAGE_T = typename mtk::wmma::detail::common::storage_t<AB_T>::type; D_T* d_ptr; C_T* c_ptr; AB_STORAGE_T* a_ptr; AB_STORAGE_T* b_ptr; cudaMallocHost(&a_ptr, M * K * sizeof(AB_STORAGE_T)); cudaMallocHost(&b_ptr, K * N * sizeof(AB_STORAGE_T)); cudaMallocHost(&c_ptr, M * N * sizeof(C_T)); cudaMallocHost(&d_ptr, M * N * sizeof(D_T)); std::mt19937 mt(std::random_device{}()); std::uniform_real_distribution<float> dist(-1.0f, 1.0f); for (std::size_t i = 0; i < M * K; i++) { a_ptr[i] = mtk::wmma::detail::common::cast<AB_STORAGE_T>(dist(mt)); } for (std::size_t i = 0; i < K * N; i++) { b_ptr[i] = mtk::wmma::detail::common::cast<AB_STORAGE_T>(dist(mt)); } for (std::size_t i = 0; i < M * N; i++) { c_ptr[i] = mtk::wmma::detail::common::cast<C_T>(dist(mt)); } cudaDeviceSynchronize(); test_kernel<M, N, K, AB_T, C_T, D_T, a_layout, b_layout, c_layout, d_layout><<<1, 32>>>(d_ptr, a_ptr, b_ptr, c_ptr); cudaDeviceSynchronize(); const auto error = mtk::test_utils::get_max_relative_error<M, N, K, AB_T, C_T, D_T, a_layout, b_layout, c_layout, d_layout>(a_ptr, b_ptr, c_ptr, d_ptr); std::printf("[%s] ARCH=%d, M=%2d, N=%2d, K=%2d, a_%5s_%s, b_%5s_%s, c_%5s_%s, d_%5s_%s : res = %e [%s]\n", __FILE__, TEST_ARCH, M, N, K, mtk::test_utils::get_string<AB_T>().c_str(), mtk::test_utils::get_string<a_layout>().c_str(), mtk::test_utils::get_string<AB_T>().c_str(), mtk::test_utils::get_string<b_layout>().c_str(), mtk::test_utils::get_string<C_T >().c_str(), get_layout_name(c_layout).c_str(), mtk::test_utils::get_string<D_T >().c_str(), get_layout_name(d_layout).c_str(), error, mtk::test_utils::get_test_result_string(error < mtk::test_utils::get_machine_eps<AB_T>() * 16) ); } int main() { #if TEST_ARCH >= 80 test<16, 8, 16, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 16, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<16, 8, 16, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 16, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<16, 8, 8, nvcuda::wmma::precision::tf32, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 8, nvcuda::wmma::precision::tf32, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<16, 8, 8, nvcuda::wmma::precision::tf32, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 8, nvcuda::wmma::precision::tf32, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); #endif #if TEST_ARCH >= 75 test<16, 8, 8, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 8, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<16, 8, 8, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<16, 8, 8, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); #endif #if TEST_ARCH >= 70 && TEST_ARCH <= 75 test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::row_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::row_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, float, float, nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_col_major, nvcuda::wmma::mem_row_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_col_major>(); test<8, 8, 4, half, half , half , nvcuda::wmma::col_major, nvcuda::wmma::col_major, nvcuda::wmma::mem_row_major, nvcuda::wmma::mem_row_major>(); #endif }
fc11951fb987bf5421106bb184abb9eb7fa00821.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <glog/logging.h> #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_decay_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxGaussianWeightForwardGPU(const int nthreads, const Dtype* label_data, const Dtype rate, const int outer_num, const int inner_num, const int dim, Dtype* weight_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / (inner_num * dim); const int j = index % inner_num; const int l = (index / inner_num) % dim; Dtype label_diff = l - label_data[j + inner_num * i]; weight_data[index] = exp(-(label_diff * label_diff) / (rate * rate)); } } template <typename Dtype> __global__ void SoftmaxDecayLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* weight_data, Dtype* loss_data) { CUDA_KERNEL_LOOP(index, nthreads) { loss_data[index] = -log(max(prob_data[index], Dtype(FLT_MIN))) * weight_data[index]; } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> void SoftmaxWithDecayLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // The forward pass computes the softmax prob values. softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label_data = bottom[1]->gpu_data(); Dtype* weight_data = weights_.mutable_gpu_data(); const int count = prob_.count(); // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // use label diff to save sum of weights Dtype* weight_sum_data = bottom[1]->mutable_gpu_diff(); // loss Dtype loss; // calculate weight using different method switch (method_) { case SoftmaxWithDecayLossParameter_Decay_GAUSSIAN: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxGaussianWeightForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, label_data, rate_, outer_num_, inner_num_, softmax_dim_, weight_data); break; case SoftmaxWithDecayLossParameter_Decay_POWER: NOT_IMPLEMENTED; break; default: LOG(FATAL) << "Unknown decay method: " << method_; } CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, softmax_dim_, inner_num_, weight_data, weight_sum_data); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, softmax_dim_, inner_num_, weight_sum_data, weight_data); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxDecayLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, prob_data, weight_data, loss_data); CUDA_POST_KERNEL_CHECK; caffe_gpu_asum(count, loss_data, &loss); top[0]->mutable_cpu_data()[0] = loss / (outer_num_ * inner_num_); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> void SoftmaxWithDecayLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* weight_data = weights_.gpu_data(); int count = prob_.count(); // caffe_gpu_mul(count, prob_data, weight_data, bottom_diff); caffe_gpu_sub(count, prob_data, weight_data, bottom_diff); // Scale gradient Dtype loss_weight = top[0]->cpu_diff()[0] / (outer_num_ * inner_num_); caffe_gpu_scal(count, loss_weight, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithDecayLossLayer); } // namespace caffe
fc11951fb987bf5421106bb184abb9eb7fa00821.cu
#include <glog/logging.h> #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_decay_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxGaussianWeightForwardGPU(const int nthreads, const Dtype* label_data, const Dtype rate, const int outer_num, const int inner_num, const int dim, Dtype* weight_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / (inner_num * dim); const int j = index % inner_num; const int l = (index / inner_num) % dim; Dtype label_diff = l - label_data[j + inner_num * i]; weight_data[index] = exp(-(label_diff * label_diff) / (rate * rate)); } } template <typename Dtype> __global__ void SoftmaxDecayLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* weight_data, Dtype* loss_data) { CUDA_KERNEL_LOOP(index, nthreads) { loss_data[index] = -log(max(prob_data[index], Dtype(FLT_MIN))) * weight_data[index]; } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> void SoftmaxWithDecayLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // The forward pass computes the softmax prob values. softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label_data = bottom[1]->gpu_data(); Dtype* weight_data = weights_.mutable_gpu_data(); const int count = prob_.count(); // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // use label diff to save sum of weights Dtype* weight_sum_data = bottom[1]->mutable_gpu_diff(); // loss Dtype loss; // calculate weight using different method switch (method_) { case SoftmaxWithDecayLossParameter_Decay_GAUSSIAN: // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxGaussianWeightForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, label_data, rate_, outer_num_, inner_num_, softmax_dim_, weight_data); break; case SoftmaxWithDecayLossParameter_Decay_POWER: NOT_IMPLEMENTED; break; default: LOG(FATAL) << "Unknown decay method: " << method_; } CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS>>>(outer_num_, softmax_dim_, inner_num_, weight_data, weight_sum_data); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, softmax_dim_, inner_num_, weight_sum_data, weight_data); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxDecayLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, prob_data, weight_data, loss_data); CUDA_POST_KERNEL_CHECK; caffe_gpu_asum(count, loss_data, &loss); top[0]->mutable_cpu_data()[0] = loss / (outer_num_ * inner_num_); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> void SoftmaxWithDecayLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* weight_data = weights_.gpu_data(); int count = prob_.count(); // caffe_gpu_mul(count, prob_data, weight_data, bottom_diff); caffe_gpu_sub(count, prob_data, weight_data, bottom_diff); // Scale gradient Dtype loss_weight = top[0]->cpu_diff()[0] / (outer_num_ * inner_num_); caffe_gpu_scal(count, loss_weight, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithDecayLossLayer); } // namespace caffe
71ae699bff622cb57121540971dd092891256582.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathPointwise.cu" #else #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(real* out, real* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(real* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ if (self_ == src) { \ if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(hipGetLastError()); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<real>::log, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<real>::lgamma, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<real>::log1p, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<real>::exp, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<real>::expm1, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<real>::cos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<real>::sin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<real>::sqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<real>::rsqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<real>::ceil, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<real>::floor, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<real>::trunc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<real>::acos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<real>::cosh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<real>::asin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<real>::sinh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<real>::erf, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<real>::erfinv,Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<real>::round, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<real>::cinv, Real) #endif IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<real>::neg, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<real>::abs, Real) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorSignOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorSignOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, real min_value, real max_value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorClampOp<real>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorClampOp<real>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(cross)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y)); int i; int nd = THCTensor_(nDimension)(state, x); ptrdiff_t nelem = THCTensor_(nElement)(state, x); THArgCheck(nd == THCTensor_(nDimension)(state, y), 1, "tensors must have same number of dimensions"); for (i = 0; i < nd; i++) { THArgCheck(THCTensor_(size)(state, x, i) == THCTensor_(size)(state, y, i), 1, "dimension %i of x and y does not match", i); if (dimension < 0 && THCTensor_(size)(state, x, i) == 3) { dimension = i; } } THArgCheck(dimension >= 0 && dimension < nd, 3, "dimension %d out of range", dimension+1); THArgCheck(THCTensor_(size)(state, x, dimension) == 3, 3, "dimension %d does not have size 3", dimension+1); THCTensor_(resizeAs)(state, self, x); int64_t sx = THCTensor_(stride)(state, x, dimension); int64_t sy = THCTensor_(stride)(state, y, dimension); int64_t so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3(state, nself, nx, ny, TensorCrossOp<real>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(atan2)(THCState *state, THCTensor *self_, THCTensor *tx, THCTensor *ty) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, tx, ty)); THArgCheck(THCTensor_(nElement)(state, tx) == THCTensor_(nElement)(state, ty), 3, "sizes do not match"); THCTensor_(resizeAs)(state, self_, tx); if (!THC_pointwiseApply3(state, self_, tx, ty, TensorATan2Op<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // fallback implementation using pow if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src); if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) { if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) { if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) { if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) { if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) { if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // fallback implementation using pow if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(hipGetLastError()); } void THCTensor_(tpow)(THCState *state, THCTensor *self_, real value, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorTPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorTPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, real w) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, result, a, b)); THArgCheck(THCTensor_(nElement)(state, a) == THCTensor_(nElement)(state, b), 3, "sizes do not match"); THCTensor_(resizeAs)(state, result, a); if (!THC_pointwiseApply3(state, result, a, b, TensorLerpOp<real>(w))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } #endif THC_API void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { if (value == ScalarConvert<int, real>::to(1)) { // self += src2 if (!THC_pointwiseApply2(state, self_, src2, TensorAddOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self += value * src2 if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src1); if (value == ScalarConvert<int, real>::to(1)) { // self = src1 + src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self = src1 + value * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { if (value == ScalarConvert<int, real>::to(1)) { // self -= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorSubOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self += -value * src2 if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>( ScalarNegate<real>::to(value)))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src1); if (value == ScalarConvert<int, real>::to(1)) { // self = src1 - src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorSubOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self = src1 - value * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>( ScalarNegate<real>::to(value)))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self *= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorMulOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorMulOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self = pow(self, src2) if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = pow(src1, src2) if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorDivOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorDivOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("clshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorLShiftOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorLShiftOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } THC_API void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("crshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorRShiftOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorRShiftOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } THC_API void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorMaxOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorMaxOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorMinOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorMinOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorCRemainderOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorCRemainderOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorCFmodOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorCFmodOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1(state, self, TensorMaxValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2(state, self, src, TensorMaxValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1(state, self, TensorMinValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2(state, self, src, TensorMinValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2)); if(self_ != t) { THCTensor_(resizeAs)(state, self_, t); THCTensor_(copy)(state, self_, t); } else { THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1), 1, "sizes do not match"); } THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2)); if(self_ != t) { THCTensor_(resizeAs)(state, self_, t); THCTensor_(copy)(state, self_, t); } else { THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1), 1, "sizes do not match"); } THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitand is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorBitAndOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitAndOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } THC_API void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorBitOrOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitOrOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } THC_API void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorBitXorOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitXorOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } #endif
71ae699bff622cb57121540971dd092891256582.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathPointwise.cu" #else #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(real* out, real* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(real* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ if (self_ == src) { \ if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(cudaGetLastError()); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<real>::log, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<real>::lgamma, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<real>::log1p, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<real>::exp, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<real>::expm1, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<real>::cos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<real>::sin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<real>::sqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<real>::rsqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<real>::ceil, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<real>::floor, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<real>::trunc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<real>::acos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<real>::cosh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<real>::asin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<real>::sinh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<real>::erf, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<real>::erfinv,Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<real>::round, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<real>::cinv, Real) #endif IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<real>::neg, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<real>::abs, Real) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorSignOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorSignOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, real min_value, real max_value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorClampOp<real>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorClampOp<real>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(cross)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y)); int i; int nd = THCTensor_(nDimension)(state, x); ptrdiff_t nelem = THCTensor_(nElement)(state, x); THArgCheck(nd == THCTensor_(nDimension)(state, y), 1, "tensors must have same number of dimensions"); for (i = 0; i < nd; i++) { THArgCheck(THCTensor_(size)(state, x, i) == THCTensor_(size)(state, y, i), 1, "dimension %i of x and y does not match", i); if (dimension < 0 && THCTensor_(size)(state, x, i) == 3) { dimension = i; } } THArgCheck(dimension >= 0 && dimension < nd, 3, "dimension %d out of range", dimension+1); THArgCheck(THCTensor_(size)(state, x, dimension) == 3, 3, "dimension %d does not have size 3", dimension+1); THCTensor_(resizeAs)(state, self, x); int64_t sx = THCTensor_(stride)(state, x, dimension); int64_t sy = THCTensor_(stride)(state, y, dimension); int64_t so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3(state, nself, nx, ny, TensorCrossOp<real>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(atan2)(THCState *state, THCTensor *self_, THCTensor *tx, THCTensor *ty) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, tx, ty)); THArgCheck(THCTensor_(nElement)(state, tx) == THCTensor_(nElement)(state, ty), 3, "sizes do not match"); THCTensor_(resizeAs)(state, self_, tx); if (!THC_pointwiseApply3(state, self_, tx, ty, TensorATan2Op<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) { if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // fallback implementation using pow if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src); if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) { if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) { if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) { if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) { if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) { if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // fallback implementation using pow if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(cudaGetLastError()); } void THCTensor_(tpow)(THCState *state, THCTensor *self_, real value, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1(state, self_, TensorTPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, TensorTPowOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, real w) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, result, a, b)); THArgCheck(THCTensor_(nElement)(state, a) == THCTensor_(nElement)(state, b), 3, "sizes do not match"); THCTensor_(resizeAs)(state, result, a); if (!THC_pointwiseApply3(state, result, a, b, TensorLerpOp<real>(w))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } #endif THC_API void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { if (value == ScalarConvert<int, real>::to(1)) { // self += src2 if (!THC_pointwiseApply2(state, self_, src2, TensorAddOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self += value * src2 if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src1); if (value == ScalarConvert<int, real>::to(1)) { // self = src1 + src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self = src1 + value * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { if (value == ScalarConvert<int, real>::to(1)) { // self -= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorSubOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self += -value * src2 if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>( ScalarNegate<real>::to(value)))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src1); if (value == ScalarConvert<int, real>::to(1)) { // self = src1 - src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorSubOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { // self = src1 - value * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>( ScalarNegate<real>::to(value)))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self *= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorMulOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 * src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorMulOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self = pow(self, src2) if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = pow(src1, src2) if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorDivOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorDivOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("clshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorLShiftOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorLShiftOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } THC_API void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("crshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorRShiftOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorRShiftOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } THC_API void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorMaxOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorMaxOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorMinOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorMinOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorCRemainderOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorCRemainderOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2(state, self, src2, TensorCFmodOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3(state, self, src1, src2, TensorCFmodOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1(state, self, TensorMaxValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2(state, self, src, TensorMaxValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1(state, self, TensorMinValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2(state, self, src, TensorMinValueOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THC_API void THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2)); if(self_ != t) { THCTensor_(resizeAs)(state, self_, t); THCTensor_(copy)(state, self_, t); } else { THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1), 1, "sizes do not match"); } THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2)); if(self_ != t) { THCTensor_(resizeAs)(state, self_, t); THCTensor_(copy)(state, self_, t); } else { THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1), 1, "sizes do not match"); } THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitand is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorBitAndOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitAndOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } THC_API void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorBitOrOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitOrOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } THC_API void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2(state, self_, src2, TensorBitXorOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitXorOp<real>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } #endif
016dc0130e6ed06beb8cd247db94a494a7197675.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <sys/time.h> #include <pthread.h> #include "consts.h" #include "colors.h" #include "threads.h" #include "times.h" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void prepare_thread_args (thread_args *args, struct s_rect *r, colorpoint *points, color **p_cols) { args->r = r; args->points = points; args->p_cols = p_cols; args->iterations = MIN_ITERATIONS; } void* calculate (thread_args *arg) { thread_args *args = (thread_args *)arg; int max_dimension = HEIGHT > WIDTH ? HEIGHT : WIDTH; int max_blocks = 64; int curr_blocks; curr_blocks = max_blocks; struct timeval stop, start; float elapsed = 0; gettimeofday(&start, NULL); for (int i = 0; i < WIDTH; i += max_blocks) { if (i + max_blocks >= WIDTH) { curr_blocks = WIDTH - i; } // mandelbrot<<<curr_blocks, diff_height>>>(args, max_dimension, i); hipLaunchKernelGGL(( TYPE), dim3(curr_blocks), dim3(HEIGHT), 0, 0, args, max_dimension, i); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); hipDeviceSynchronize(); } gettimeofday(&stop, NULL); elapsed = timedifference_msec(start, stop); printf("calculate took %f milliseconds\n", elapsed); return NULL; } __global__ void mandelbrot (thread_args *args, int max_dimension, int width_offset) { complex c; complex z = {0, 0}; // y = HEIGHT, x = WIDTH int y = threadIdx.x; // comme on ne peut pas lancer tous les calculs d'un coup, on indique quel offset on est int x = blockIdx.x + width_offset; int curr = y * max_dimension + x; rect *r = args->r; coord_to_complex(&c, x, y, r); unsigned int speed = suite(&z, &c, args->iterations - 1); color *cols = *(args->p_cols); args->points[curr].c = &cols[speed - 1]; args->points[curr].p.x = x; args->points[curr].p.y = y; } __global__ void julia (thread_args *args, int max_dimension, int width_offset) { complex c = JULIA_COORDS; // y = HEIGHT, x = WIDTH int y = threadIdx.x; // comme on ne peut pas lancer tous les calculs d'un coup, on indique quel offset on est int x = blockIdx.x + width_offset; int curr = y * max_dimension + x; rect *r = args->r; complex z; coord_to_complex(&z, x, y, r); unsigned int speed = suite(&z, &c, args->iterations - 1); color *cols = *(args->p_cols); args->points[curr].c = &cols[speed - 1]; args->points[curr].p.x = x; args->points[curr].p.y = y; } __device__ void coord_to_complex (complex *z, int x, int y, rect *r) { /* ici x c'est notre left ici y c'est notre top re = ((x / largeur) * (droite_precedente - gauche_precedente)) + gauche_precedente */ z->re = (((double)x / (double)WIDTH) * (r->right - r->left)) + r->left; z->im = (((double)y / (double)HEIGHT) * (r->top - r->bottom)) + r->bottom; } void coord_to_complex2 (complex *z, int x, int y, rect *r) { z->re = (((double)x / (double)WIDTH) * (r->right - r->left)) + r->left; z->im = (((double)y / (double)HEIGHT) * (r->top - r->bottom)) + r->bottom; } __device__ unsigned int suite (complex *z, complex *c, unsigned int iterations) { double re; double pow_re = pow(z->re, 2); double pow_im = pow(z->im, 2); unsigned int speed = 0; while (speed < iterations) { re = pow_re - pow_im; z->im = 2 * z->re * z->im; z->re = re; z->re += c->re; z->im += c->im; pow_re = pow(z->re, 2); pow_im = pow(z->im, 2); if (sqrt(pow_im + pow_re) > 4) { break; } speed++; } return speed; } /* // racine de la partie relle^2 et de im^2 double module (const complex *z) { return sqrt(pow(z->im, 2) + pow(z->re, 2)); } // re: ab - a'b' (a + ci)(b + di) ab + adi + a'b + a'b' // im: ab' + ba' void times (complex *a, const complex *b) { double re, im; re = pow(a->re, 2) - pow(a->im, 2); im = a->re * b->im + b->re * a->im; a->re = re; a->im = im; } void plus (complex *a, const complex *b) { a->re += b->re; a->im += b->im; } int divergence_speed (const complex *c, int iterations) { complex z = {0, 0}; unsigned int n = 0; // z = 0 + 0i // f(z) = z^2 + c while (n < iterations) { times(&z, &z); plus(&z, c); if (module(&z) > 4) return n; n++; } return iterations; } */ // on calcule la valeur du complexe en fonction de la position du cadre // Si on a 600 PX qu'on veut faire rentrer dans -1 1 // x = 400 // 400 / 600 => 4/6 * (distance -1 1) + dcalage
016dc0130e6ed06beb8cd247db94a494a7197675.cu
#include <sys/time.h> #include <pthread.h> #include "consts.h" #include "colors.h" #include "threads.h" #include "times.h" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void prepare_thread_args (thread_args *args, struct s_rect *r, colorpoint *points, color **p_cols) { args->r = r; args->points = points; args->p_cols = p_cols; args->iterations = MIN_ITERATIONS; } void* calculate (thread_args *arg) { thread_args *args = (thread_args *)arg; int max_dimension = HEIGHT > WIDTH ? HEIGHT : WIDTH; int max_blocks = 64; int curr_blocks; curr_blocks = max_blocks; struct timeval stop, start; float elapsed = 0; gettimeofday(&start, NULL); for (int i = 0; i < WIDTH; i += max_blocks) { if (i + max_blocks >= WIDTH) { curr_blocks = WIDTH - i; } // mandelbrot<<<curr_blocks, diff_height>>>(args, max_dimension, i); TYPE<<<curr_blocks, HEIGHT>>>(args, max_dimension, i); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); cudaDeviceSynchronize(); } gettimeofday(&stop, NULL); elapsed = timedifference_msec(start, stop); printf("calculate took %f milliseconds\n", elapsed); return NULL; } __global__ void mandelbrot (thread_args *args, int max_dimension, int width_offset) { complex c; complex z = {0, 0}; // y = HEIGHT, x = WIDTH int y = threadIdx.x; // comme on ne peut pas lancer tous les calculs d'un coup, on indique à quel offset on est int x = blockIdx.x + width_offset; int curr = y * max_dimension + x; rect *r = args->r; coord_to_complex(&c, x, y, r); unsigned int speed = suite(&z, &c, args->iterations - 1); color *cols = *(args->p_cols); args->points[curr].c = &cols[speed - 1]; args->points[curr].p.x = x; args->points[curr].p.y = y; } __global__ void julia (thread_args *args, int max_dimension, int width_offset) { complex c = JULIA_COORDS; // y = HEIGHT, x = WIDTH int y = threadIdx.x; // comme on ne peut pas lancer tous les calculs d'un coup, on indique à quel offset on est int x = blockIdx.x + width_offset; int curr = y * max_dimension + x; rect *r = args->r; complex z; coord_to_complex(&z, x, y, r); unsigned int speed = suite(&z, &c, args->iterations - 1); color *cols = *(args->p_cols); args->points[curr].c = &cols[speed - 1]; args->points[curr].p.x = x; args->points[curr].p.y = y; } __device__ void coord_to_complex (complex *z, int x, int y, rect *r) { /* ici x c'est notre left ici y c'est notre top re = ((x / largeur) * (droite_precedente - gauche_precedente)) + gauche_precedente */ z->re = (((double)x / (double)WIDTH) * (r->right - r->left)) + r->left; z->im = (((double)y / (double)HEIGHT) * (r->top - r->bottom)) + r->bottom; } void coord_to_complex2 (complex *z, int x, int y, rect *r) { z->re = (((double)x / (double)WIDTH) * (r->right - r->left)) + r->left; z->im = (((double)y / (double)HEIGHT) * (r->top - r->bottom)) + r->bottom; } __device__ unsigned int suite (complex *z, complex *c, unsigned int iterations) { double re; double pow_re = pow(z->re, 2); double pow_im = pow(z->im, 2); unsigned int speed = 0; while (speed < iterations) { re = pow_re - pow_im; z->im = 2 * z->re * z->im; z->re = re; z->re += c->re; z->im += c->im; pow_re = pow(z->re, 2); pow_im = pow(z->im, 2); if (sqrt(pow_im + pow_re) > 4) { break; } speed++; } return speed; } /* // racine de la partie réelle^2 et de im^2 double module (const complex *z) { return sqrt(pow(z->im, 2) + pow(z->re, 2)); } // re: ab - a'b' (a + ci)(b + di) ab + adi + a'b + a'b' // im: ab' + ba' void times (complex *a, const complex *b) { double re, im; re = pow(a->re, 2) - pow(a->im, 2); im = a->re * b->im + b->re * a->im; a->re = re; a->im = im; } void plus (complex *a, const complex *b) { a->re += b->re; a->im += b->im; } int divergence_speed (const complex *c, int iterations) { complex z = {0, 0}; unsigned int n = 0; // z = 0 + 0i // f(z) = z^2 + c while (n < iterations) { times(&z, &z); plus(&z, c); if (module(&z) > 4) return n; n++; } return iterations; } */ // on calcule la valeur du complexe en fonction de la position du cadre // Si on a 600 PX qu'on veut faire rentrer dans -1 1 // x = 400 // 400 / 600 => 4/6 * (distance -1 1) + décalage
288050d909a5ce22aa7dbb3623ca270ee539cdc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <float.h> #include "cuda_auxiliary.h" __global__ void gpu_mat_add(double *A, double *B, double *C, const int nx, const int ny) { unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) C[idx] = A[idx] + B[idx]; } void cpu_mat_add(double *A, double *B, double *C, const int nx, const int ny) { for (int iy = 0; iy < ny; ++iy) { for (int ix = 0; ix < nx; ++ix) C[ix] = A[ix] + B[ix]; A += nx; B += nx; C += nx; } } void check_results(double *cpu_array, double *gpu_array, int size) { for (int ix = 0; ix < size; ++ix) if (abs(cpu_array[ix] - gpu_array[ix]) >= DBL_EPSILON) { printf("CPU and GPU results differ at element %d\n", ix); printf("CPU value: %lg\n", cpu_array[ix]); printf("GPU value: %lg\n", gpu_array[ix]); return; } printf("GPU result is correct\n"); } int main(int argc, char **argv) { double *hst_A = NULL; double *hst_B = NULL; double *hst_C = NULL; double *dev_A = NULL; double *dev_B = NULL; double *dev_C = NULL; int nx = 1 << 13; int ny = 1 << 13; double cpu_time = 0.0; double gpu_time = 0.0; dim3 block_size; dim3 grid_size; if (argc != 2) { fprintf(stderr, "usage: %s dimx\n", argv[0]); exit(EXIT_FAILURE); } host_alloc(hst_A, double, nx * ny); host_alloc(hst_B, double, nx * ny); host_alloc(hst_C, double, nx * ny); cuda_exec(hipMalloc(&dev_A, nx * ny * sizeof(double))); cuda_exec(hipMalloc(&dev_B, nx * ny * sizeof(double))); cuda_exec(hipMalloc(&dev_C, nx * ny * sizeof(double))); init_matrix(hst_A, nx, ny, nx); init_matrix(hst_B, nx, ny, nx); cuda_exec(hipMemcpy(dev_A, hst_A, nx * ny * sizeof(double), hipMemcpyHostToDevice)); cuda_exec(hipMemcpy(dev_B, hst_B, nx * ny * sizeof(double), hipMemcpyHostToDevice)); block_size.x = atoi(argv[1]); block_size.y = 1; grid_size.x = min((nx + block_size.x - 1) / block_size.x, 65535); grid_size.y = ny; gpu_time -= timer(); hipLaunchKernelGGL(( gpu_mat_add), dim3(grid_size), dim3(block_size), 0, 0, dev_A, dev_B, dev_C, nx, ny); cuda_exec(hipDeviceSynchronize()); gpu_time += timer(); cpu_time -= timer(); cpu_mat_add(hst_A, hst_B, hst_C, nx, ny); cpu_time += timer(); cuda_exec(hipMemcpy(hst_B, dev_C, nx * ny * sizeof(double), hipMemcpyDeviceToHost)); check_results(hst_C, hst_B, nx * ny); printf("Execution configuration: grid (%d, %d), block (%d, %d)\n", grid_size.x, grid_size.y, block_size.x, block_size.y); printf("CPU time: %.3lgms\n", 1000 * cpu_time); printf("GPU time: %.3lgms\n", 1000 * gpu_time); free(hst_A); free(hst_B); free(hst_C); hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); return 0; }
288050d909a5ce22aa7dbb3623ca270ee539cdc9.cu
#include <stdio.h> #include <float.h> #include "cuda_auxiliary.h" __global__ void gpu_mat_add(double *A, double *B, double *C, const int nx, const int ny) { unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) C[idx] = A[idx] + B[idx]; } void cpu_mat_add(double *A, double *B, double *C, const int nx, const int ny) { for (int iy = 0; iy < ny; ++iy) { for (int ix = 0; ix < nx; ++ix) C[ix] = A[ix] + B[ix]; A += nx; B += nx; C += nx; } } void check_results(double *cpu_array, double *gpu_array, int size) { for (int ix = 0; ix < size; ++ix) if (abs(cpu_array[ix] - gpu_array[ix]) >= DBL_EPSILON) { printf("CPU and GPU results differ at element %d\n", ix); printf("CPU value: %lg\n", cpu_array[ix]); printf("GPU value: %lg\n", gpu_array[ix]); return; } printf("GPU result is correct\n"); } int main(int argc, char **argv) { double *hst_A = NULL; double *hst_B = NULL; double *hst_C = NULL; double *dev_A = NULL; double *dev_B = NULL; double *dev_C = NULL; int nx = 1 << 13; int ny = 1 << 13; double cpu_time = 0.0; double gpu_time = 0.0; dim3 block_size; dim3 grid_size; if (argc != 2) { fprintf(stderr, "usage: %s dimx\n", argv[0]); exit(EXIT_FAILURE); } host_alloc(hst_A, double, nx * ny); host_alloc(hst_B, double, nx * ny); host_alloc(hst_C, double, nx * ny); cuda_exec(cudaMalloc(&dev_A, nx * ny * sizeof(double))); cuda_exec(cudaMalloc(&dev_B, nx * ny * sizeof(double))); cuda_exec(cudaMalloc(&dev_C, nx * ny * sizeof(double))); init_matrix(hst_A, nx, ny, nx); init_matrix(hst_B, nx, ny, nx); cuda_exec(cudaMemcpy(dev_A, hst_A, nx * ny * sizeof(double), cudaMemcpyHostToDevice)); cuda_exec(cudaMemcpy(dev_B, hst_B, nx * ny * sizeof(double), cudaMemcpyHostToDevice)); block_size.x = atoi(argv[1]); block_size.y = 1; grid_size.x = min((nx + block_size.x - 1) / block_size.x, 65535); grid_size.y = ny; gpu_time -= timer(); gpu_mat_add<<<grid_size, block_size>>>(dev_A, dev_B, dev_C, nx, ny); cuda_exec(cudaDeviceSynchronize()); gpu_time += timer(); cpu_time -= timer(); cpu_mat_add(hst_A, hst_B, hst_C, nx, ny); cpu_time += timer(); cuda_exec(cudaMemcpy(hst_B, dev_C, nx * ny * sizeof(double), cudaMemcpyDeviceToHost)); check_results(hst_C, hst_B, nx * ny); printf("Execution configuration: grid (%d, %d), block (%d, %d)\n", grid_size.x, grid_size.y, block_size.x, block_size.y); printf("CPU time: %.3lgms\n", 1000 * cpu_time); printf("GPU time: %.3lgms\n", 1000 * gpu_time); free(hst_A); free(hst_B); free(hst_C); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); return 0; }
bd7933ddb8b9e3a4a9acf63d126a2ca0cf7ef1ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/beam_search.h" #include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { namespace math { struct Triple { __device__ __forceinline__ Triple() {} __device__ __forceinline__ Triple(int o, int i, float s) : offset(o), id(i), score(s) {} __device__ __forceinline__ void set(int o, int i, float s) { offset = o; id = i; score = s; } __device__ __forceinline__ void operator=(const Triple& in) { offset = in.offset; id = in.id; score = in.score; } __device__ __forceinline__ bool operator<(const float s) const { return score < s; } __device__ __forceinline__ bool operator<(const Triple& in) const { return (score < in.score) || ((score == in.score) && (offset < in.offset)); } int offset; int id; float score; }; __device__ __forceinline__ void Insert(Triple* top_beam, const Triple& p, int beam_size) { if (p < top_beam[beam_size - 1]) { return; } for (int k = beam_size - 2; k >= 0; --k) { if (top_beam[k] < p) { top_beam[k + 1] = top_beam[k]; } else { top_beam[k + 1] = p; return; } } top_beam[0] = p; } template <int MaxThreadsPerSeq, bool IsAccumulated = true> __device__ __forceinline__ int SelectTopBeam( Triple* top_beam, const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, const float* scores, const int seq_offset_start, const int seq_offset_end, const int seq_width, int beam_size, int end_id, int used_threads) { // top_beam is shared memory const int tid = threadIdx.x; const int tid_of_seq = threadIdx.x % MaxThreadsPerSeq; int num_used_threads = used_threads; Triple* top_beam_local = top_beam + tid * beam_size; if (tid_of_seq < num_used_threads) { for (int i = 0; i < beam_size; ++i) { top_beam_local[i].set(-1, -1, -INFINITY); } for (int offset = seq_offset_start; offset < seq_offset_end; ++offset) { int pre_id = static_cast<int>(pre_ids[offset]); if (pre_id == end_id) { if (tid_of_seq == 0) { Triple tmp(offset, end_id, pre_scores[offset]); Insert(top_beam_local, tmp, beam_size); } } else { int index = offset * seq_width + tid_of_seq; if (!IsAccumulated) { float pre_score = pre_scores[offset]; for (int i = tid_of_seq; i < seq_width; i += num_used_threads) { float score = pre_score + __logf(scores[index]); int id = ids ? static_cast<int>(ids[index]) : i; Triple tmp(offset, id, score); Insert(top_beam_local, tmp, beam_size); index += num_used_threads; } } else { for (int i = tid_of_seq; i < seq_width; i += num_used_threads) { int id = ids ? static_cast<int>(ids[index]) : i; float score = scores[index]; Triple tmp(offset, id, score); Insert(top_beam_local, tmp, beam_size); index += num_used_threads; } } } } } while (num_used_threads > 1) { if (num_used_threads > 16) { __syncthreads(); } if ((num_used_threads & 0x1) != 0) { // If num_used_threads is a odd number, merge local top_beam of thread 0 // and num_used_threads - 1 if (tid_of_seq == 0) { int index_in_sh = (num_used_threads - 1 + tid) * beam_size; for (int i = 0; i < beam_size; i++) { Insert(top_beam_local, top_beam[index_in_sh], beam_size); index_in_sh++; } } } num_used_threads = num_used_threads >> 1; if (tid_of_seq < num_used_threads) { int index_in_sh = (num_used_threads + tid) * beam_size; for (int i = 0; i < beam_size; i++) { Insert(top_beam_local, top_beam[index_in_sh], beam_size); index_in_sh++; } } } if (tid_of_seq == 0) { int num_items = 0; for (int i = 0; i < beam_size; ++i) { num_items = (top_beam_local[i].score > -INFINITY) ? num_items + 1 : num_items; } return num_items; } return 0; } __device__ __forceinline__ bool PruneEndBeams(Triple* top_beam_local, const int64_t* pre_ids, const int end_id, int num_items) { bool finish_flag = true; for (int i = 0; i < num_items; ++i) { int offset = top_beam_local[i].offset; if (top_beam_local[i].id != end_id || static_cast<int>(pre_ids[offset]) != end_id) { finish_flag = false; break; } } return finish_flag; } template <bool ReturnParentIdx = false> __device__ __forceinline__ void WriteBack( int64_t* selected_ids, float* selected_scores, int* parent_idx, size_t* selected_offsets, Triple* top_beam_local, const int seq_offset_start, const int seq_offset_end, const int selected_seq_start, const int selected_seq_length) { const int tid = threadIdx.x; // use 1 thread only for each sequence int global_index = selected_seq_start; for (int global_offset = seq_offset_start; global_offset < seq_offset_end; ++global_offset) { for (int local_index = 0; local_index < selected_seq_length; ++local_index) { if (top_beam_local[local_index].offset == global_offset) { selected_ids[global_index] = static_cast<int64_t>(top_beam_local[local_index].id); selected_scores[global_index] = top_beam_local[local_index].score; if (ReturnParentIdx) { parent_idx[global_index] = static_cast<int>(global_offset); } global_index++; } } selected_offsets[global_offset + 1] = static_cast<size_t>(global_index); } } template <int MaxLength, int MaxThreadsPerSeq, int MaxSeqs> __device__ void BeamSearchDetails( int64_t* selected_ids, float* selected_scores, int* parent_idx, size_t* selected_offsets, const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, const float* scores, const int seq_offset_start, const int seq_offset_end, const int seq_width, int beam_size, int end_id, bool is_accumulated, int num_used_threads) { __shared__ Triple top_beam[MaxLength]; int num_items = 0; if (is_accumulated) { num_items = SelectTopBeam<MaxThreadsPerSeq, true>( top_beam, pre_ids, pre_scores, ids, scores, seq_offset_start, seq_offset_end, seq_width, beam_size, end_id, num_used_threads); } else { num_items = SelectTopBeam<MaxThreadsPerSeq, false>( top_beam, pre_ids, pre_scores, ids, scores, seq_offset_start, seq_offset_end, seq_width, beam_size, end_id, num_used_threads); } const int tid = threadIdx.x; // use 1 thread only for each sequence const int tid_of_seq = tid % MaxThreadsPerSeq; if (tid_of_seq == 0) { // Use 1 thread for each sequence. Triple* top_beam_local = top_beam + tid * beam_size; bool finish_flag = PruneEndBeams(top_beam_local, pre_ids, end_id, num_items); int selected_seq_start = 0; int selected_seq_length = finish_flag ? 0 : num_items; if (MaxSeqs > 1) { const int seq_id = (MaxSeqs > 1) ? tid / MaxThreadsPerSeq : tid; __shared__ int shared_mem[MaxSeqs]; // [0, MaxSeqs - 1], length of each sequences shared_mem[seq_id] = selected_seq_length; __syncthreads(); for (int s = 0; s < seq_id; ++s) { selected_seq_start += shared_mem[s]; } if (seq_id == 0) { selected_offsets[0] = 0; } } else { selected_offsets[0] = 0; } if (parent_idx) { WriteBack<true>(selected_ids, selected_scores, parent_idx, selected_offsets, top_beam_local, seq_offset_start, seq_offset_end, selected_seq_start, selected_seq_length); } else { WriteBack<false>(selected_ids, selected_scores, parent_idx, selected_offsets, top_beam_local, seq_offset_start, seq_offset_end, selected_seq_start, selected_seq_length); } } } template <int MaxLength, int MaxThreadsPerSeq, int MaxSeqs> __global__ void BeamSearchKernel(int64_t* selected_ids, float* selected_scores, int* parent_idx, size_t* selected_offsets, const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, const float* scores, const size_t* seq_offsets, const int num_seqs, const int seq_width, int beam_size, int end_id, bool is_accumulated, int num_used_threads) { const int tid = threadIdx.x; const int seq_id = (MaxSeqs > 1) ? tid / MaxThreadsPerSeq : tid; int seq_offset_start = static_cast<int>(seq_offsets[seq_id]); int seq_offset_end = static_cast<int>(seq_offsets[seq_id + 1]); BeamSearchDetails<MaxLength, MaxThreadsPerSeq, MaxSeqs>( selected_ids, selected_scores, parent_idx, selected_offsets, pre_ids, pre_scores, ids, scores, seq_offset_start, seq_offset_end, seq_width, beam_size, end_id, is_accumulated, num_used_threads); } template <int MaxLength, int MaxThreadsPerSeq> __global__ void BeamSearchKernelSingle( int64_t* selected_ids, float* selected_scores, int* parent_idx, size_t* selected_offsets, const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, const float* scores, const int seq_length, const int seq_width, int beam_size, int end_id, bool is_accumulated, int num_used_threads) { const int seq_offset_start = 0; const int seq_offset_end = seq_length; BeamSearchDetails<MaxLength, MaxThreadsPerSeq, 1>( selected_ids, selected_scores, parent_idx, selected_offsets, pre_ids, pre_scores, ids, scores, seq_offset_start, seq_offset_end, seq_width, beam_size, end_id, is_accumulated, num_used_threads); } static inline int GetNumUsedThreads(const int max_threads_per_seq, const int seq_width, int beam_size) { int num_used_threads = (seq_width + beam_size - 1) / beam_size; num_used_threads = max_threads_per_seq < num_used_threads ? max_threads_per_seq : num_used_threads; num_used_threads = num_used_threads > 32 ? (num_used_threads >> 5) << 5 : (num_used_threads > 16 ? 32 : (num_used_threads > 8 ? 16 : (num_used_threads > 4 ? 8 : (num_used_threads > 2 ? 4 : num_used_threads)))); return num_used_threads; } template <typename T> class BeamSearchFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::LoDTensor* pre_ids, const framework::LoDTensor* pre_scores, const framework::LoDTensor* ids, const framework::LoDTensor* scores, framework::LoDTensor* selected_ids, framework::LoDTensor* selected_scores, framework::Tensor* parent_idx, size_t level, size_t beam_size, int end_id, bool is_accumulated) { auto abs_lod = framework::ToAbsOffset(scores->lod()); const int64_t* pre_ids_data = pre_ids->data<int64_t>(); const float* pre_scores_data = pre_scores->data<float>(); const int64_t* ids_data = ids ? ids->data<int64_t>() : nullptr; const float* scores_data = scores->data<float>(); const size_t num_seqs = abs_lod[level].size() - 1; size_t seq_width = 1; for (int i = 1; i < scores->dims().size(); i++) { seq_width *= scores->dims()[i]; } // Reserve a big enough memory. auto selected_dims = framework::make_ddim({static_cast<int64_t>(num_seqs * beam_size), 1}); int64_t* selected_ids_data = selected_ids->mutable_data<int64_t>(selected_dims, context.GetPlace()); float* selected_scores_data = selected_scores->mutable_data<float>(selected_dims, context.GetPlace()); int* parent_idx_data = parent_idx ? parent_idx->mutable_data<int>( {static_cast<int64_t>(num_seqs * beam_size)}, context.GetPlace()) : nullptr; framework::LoD selected_lod(2); selected_lod[0].assign(abs_lod[level].begin(), abs_lod[level].end()); selected_lod[1].resize(scores->dims()[0] + 1); size_t* selected_offsets = selected_lod[1].CUDAMutableData(context.GetPlace()); if (num_seqs == 1) { const int seq_length = static_cast<int>(abs_lod[level][1]); const int kMaxThreadsPerSeq = 1024; int num_used_threads = GetNumUsedThreads(kMaxThreadsPerSeq, static_cast<int>(seq_width), static_cast<int>(beam_size)); switch (platform::RoundToPowerOfTwo(beam_size * seq_width)) { CUDA_LAUNCH_KERNEL_HELPER( hipLaunchKernelGGL(( BeamSearchKernelSingle<kPowerOfTwoDim, kMaxThreadsPerSeq>), dim3(1), dim3(kMaxThreadsPerSeq), 0, context.stream(), selected_ids_data, selected_scores_data, parent_idx_data, selected_offsets, pre_ids_data, pre_scores_data, ids_data, scores_data, seq_length, static_cast<int>(seq_width), static_cast<int>(beam_size), static_cast<int>(end_id), is_accumulated, num_used_threads)); } } else if (num_seqs <= 4) { const size_t* seq_offsets = abs_lod[level].CUDAData(context.GetPlace()); // Use only 1 block const int kMaxThreadsPerSeq = 32; const int kMaxSeqs = 4; int num_used_threads = GetNumUsedThreads(kMaxThreadsPerSeq, static_cast<int>(seq_width), static_cast<int>(beam_size)); switch (platform::RoundToPowerOfTwo(beam_size * num_seqs * 32)) { CUDA_LAUNCH_KERNEL_HELPER( hipLaunchKernelGGL(( BeamSearchKernel<kPowerOfTwoDim, kMaxThreadsPerSeq, kMaxSeqs>), dim3(1), dim3(num_seqs * kMaxThreadsPerSeq), 0, context.stream(), selected_ids_data, selected_scores_data, parent_idx_data, selected_offsets, pre_ids_data, pre_scores_data, ids_data, scores_data, seq_offsets, static_cast<int>(num_seqs), static_cast<int>(seq_width), static_cast<int>(beam_size), end_id, is_accumulated, num_used_threads)); } } else { LOG(FATAL) << "Not implemented."; } context.Wait(); if (!framework::CheckLoD(selected_lod)) { PADDLE_THROW("lod %s is not right", framework::LoDToString(selected_lod)); } selected_ids->set_lod(selected_lod); selected_scores->set_lod(selected_lod); if (selected_lod[1].back() < num_seqs * beam_size) { auto final_selected_dims = framework::make_ddim( {static_cast<int64_t>(selected_lod[1].back()), 1}); selected_ids->Resize(final_selected_dims); selected_scores->Resize(final_selected_dims); if (parent_idx) { parent_idx->Resize({static_cast<int64_t>(selected_lod[1].back())}); } } } }; template class BeamSearchFunctor<platform::CUDADeviceContext, int>; template class BeamSearchFunctor<platform::CUDADeviceContext, int64_t>; template class BeamSearchFunctor<platform::CUDADeviceContext, float>; template class BeamSearchFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
bd7933ddb8b9e3a4a9acf63d126a2ca0cf7ef1ee.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/beam_search.h" #include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { namespace math { struct Triple { __device__ __forceinline__ Triple() {} __device__ __forceinline__ Triple(int o, int i, float s) : offset(o), id(i), score(s) {} __device__ __forceinline__ void set(int o, int i, float s) { offset = o; id = i; score = s; } __device__ __forceinline__ void operator=(const Triple& in) { offset = in.offset; id = in.id; score = in.score; } __device__ __forceinline__ bool operator<(const float s) const { return score < s; } __device__ __forceinline__ bool operator<(const Triple& in) const { return (score < in.score) || ((score == in.score) && (offset < in.offset)); } int offset; int id; float score; }; __device__ __forceinline__ void Insert(Triple* top_beam, const Triple& p, int beam_size) { if (p < top_beam[beam_size - 1]) { return; } for (int k = beam_size - 2; k >= 0; --k) { if (top_beam[k] < p) { top_beam[k + 1] = top_beam[k]; } else { top_beam[k + 1] = p; return; } } top_beam[0] = p; } template <int MaxThreadsPerSeq, bool IsAccumulated = true> __device__ __forceinline__ int SelectTopBeam( Triple* top_beam, const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, const float* scores, const int seq_offset_start, const int seq_offset_end, const int seq_width, int beam_size, int end_id, int used_threads) { // top_beam is shared memory const int tid = threadIdx.x; const int tid_of_seq = threadIdx.x % MaxThreadsPerSeq; int num_used_threads = used_threads; Triple* top_beam_local = top_beam + tid * beam_size; if (tid_of_seq < num_used_threads) { for (int i = 0; i < beam_size; ++i) { top_beam_local[i].set(-1, -1, -INFINITY); } for (int offset = seq_offset_start; offset < seq_offset_end; ++offset) { int pre_id = static_cast<int>(pre_ids[offset]); if (pre_id == end_id) { if (tid_of_seq == 0) { Triple tmp(offset, end_id, pre_scores[offset]); Insert(top_beam_local, tmp, beam_size); } } else { int index = offset * seq_width + tid_of_seq; if (!IsAccumulated) { float pre_score = pre_scores[offset]; for (int i = tid_of_seq; i < seq_width; i += num_used_threads) { float score = pre_score + __logf(scores[index]); int id = ids ? static_cast<int>(ids[index]) : i; Triple tmp(offset, id, score); Insert(top_beam_local, tmp, beam_size); index += num_used_threads; } } else { for (int i = tid_of_seq; i < seq_width; i += num_used_threads) { int id = ids ? static_cast<int>(ids[index]) : i; float score = scores[index]; Triple tmp(offset, id, score); Insert(top_beam_local, tmp, beam_size); index += num_used_threads; } } } } } while (num_used_threads > 1) { if (num_used_threads > 16) { __syncthreads(); } if ((num_used_threads & 0x1) != 0) { // If num_used_threads is a odd number, merge local top_beam of thread 0 // and num_used_threads - 1 if (tid_of_seq == 0) { int index_in_sh = (num_used_threads - 1 + tid) * beam_size; for (int i = 0; i < beam_size; i++) { Insert(top_beam_local, top_beam[index_in_sh], beam_size); index_in_sh++; } } } num_used_threads = num_used_threads >> 1; if (tid_of_seq < num_used_threads) { int index_in_sh = (num_used_threads + tid) * beam_size; for (int i = 0; i < beam_size; i++) { Insert(top_beam_local, top_beam[index_in_sh], beam_size); index_in_sh++; } } } if (tid_of_seq == 0) { int num_items = 0; for (int i = 0; i < beam_size; ++i) { num_items = (top_beam_local[i].score > -INFINITY) ? num_items + 1 : num_items; } return num_items; } return 0; } __device__ __forceinline__ bool PruneEndBeams(Triple* top_beam_local, const int64_t* pre_ids, const int end_id, int num_items) { bool finish_flag = true; for (int i = 0; i < num_items; ++i) { int offset = top_beam_local[i].offset; if (top_beam_local[i].id != end_id || static_cast<int>(pre_ids[offset]) != end_id) { finish_flag = false; break; } } return finish_flag; } template <bool ReturnParentIdx = false> __device__ __forceinline__ void WriteBack( int64_t* selected_ids, float* selected_scores, int* parent_idx, size_t* selected_offsets, Triple* top_beam_local, const int seq_offset_start, const int seq_offset_end, const int selected_seq_start, const int selected_seq_length) { const int tid = threadIdx.x; // use 1 thread only for each sequence int global_index = selected_seq_start; for (int global_offset = seq_offset_start; global_offset < seq_offset_end; ++global_offset) { for (int local_index = 0; local_index < selected_seq_length; ++local_index) { if (top_beam_local[local_index].offset == global_offset) { selected_ids[global_index] = static_cast<int64_t>(top_beam_local[local_index].id); selected_scores[global_index] = top_beam_local[local_index].score; if (ReturnParentIdx) { parent_idx[global_index] = static_cast<int>(global_offset); } global_index++; } } selected_offsets[global_offset + 1] = static_cast<size_t>(global_index); } } template <int MaxLength, int MaxThreadsPerSeq, int MaxSeqs> __device__ void BeamSearchDetails( int64_t* selected_ids, float* selected_scores, int* parent_idx, size_t* selected_offsets, const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, const float* scores, const int seq_offset_start, const int seq_offset_end, const int seq_width, int beam_size, int end_id, bool is_accumulated, int num_used_threads) { __shared__ Triple top_beam[MaxLength]; int num_items = 0; if (is_accumulated) { num_items = SelectTopBeam<MaxThreadsPerSeq, true>( top_beam, pre_ids, pre_scores, ids, scores, seq_offset_start, seq_offset_end, seq_width, beam_size, end_id, num_used_threads); } else { num_items = SelectTopBeam<MaxThreadsPerSeq, false>( top_beam, pre_ids, pre_scores, ids, scores, seq_offset_start, seq_offset_end, seq_width, beam_size, end_id, num_used_threads); } const int tid = threadIdx.x; // use 1 thread only for each sequence const int tid_of_seq = tid % MaxThreadsPerSeq; if (tid_of_seq == 0) { // Use 1 thread for each sequence. Triple* top_beam_local = top_beam + tid * beam_size; bool finish_flag = PruneEndBeams(top_beam_local, pre_ids, end_id, num_items); int selected_seq_start = 0; int selected_seq_length = finish_flag ? 0 : num_items; if (MaxSeqs > 1) { const int seq_id = (MaxSeqs > 1) ? tid / MaxThreadsPerSeq : tid; __shared__ int shared_mem[MaxSeqs]; // [0, MaxSeqs - 1], length of each sequences shared_mem[seq_id] = selected_seq_length; __syncthreads(); for (int s = 0; s < seq_id; ++s) { selected_seq_start += shared_mem[s]; } if (seq_id == 0) { selected_offsets[0] = 0; } } else { selected_offsets[0] = 0; } if (parent_idx) { WriteBack<true>(selected_ids, selected_scores, parent_idx, selected_offsets, top_beam_local, seq_offset_start, seq_offset_end, selected_seq_start, selected_seq_length); } else { WriteBack<false>(selected_ids, selected_scores, parent_idx, selected_offsets, top_beam_local, seq_offset_start, seq_offset_end, selected_seq_start, selected_seq_length); } } } template <int MaxLength, int MaxThreadsPerSeq, int MaxSeqs> __global__ void BeamSearchKernel(int64_t* selected_ids, float* selected_scores, int* parent_idx, size_t* selected_offsets, const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, const float* scores, const size_t* seq_offsets, const int num_seqs, const int seq_width, int beam_size, int end_id, bool is_accumulated, int num_used_threads) { const int tid = threadIdx.x; const int seq_id = (MaxSeqs > 1) ? tid / MaxThreadsPerSeq : tid; int seq_offset_start = static_cast<int>(seq_offsets[seq_id]); int seq_offset_end = static_cast<int>(seq_offsets[seq_id + 1]); BeamSearchDetails<MaxLength, MaxThreadsPerSeq, MaxSeqs>( selected_ids, selected_scores, parent_idx, selected_offsets, pre_ids, pre_scores, ids, scores, seq_offset_start, seq_offset_end, seq_width, beam_size, end_id, is_accumulated, num_used_threads); } template <int MaxLength, int MaxThreadsPerSeq> __global__ void BeamSearchKernelSingle( int64_t* selected_ids, float* selected_scores, int* parent_idx, size_t* selected_offsets, const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, const float* scores, const int seq_length, const int seq_width, int beam_size, int end_id, bool is_accumulated, int num_used_threads) { const int seq_offset_start = 0; const int seq_offset_end = seq_length; BeamSearchDetails<MaxLength, MaxThreadsPerSeq, 1>( selected_ids, selected_scores, parent_idx, selected_offsets, pre_ids, pre_scores, ids, scores, seq_offset_start, seq_offset_end, seq_width, beam_size, end_id, is_accumulated, num_used_threads); } static inline int GetNumUsedThreads(const int max_threads_per_seq, const int seq_width, int beam_size) { int num_used_threads = (seq_width + beam_size - 1) / beam_size; num_used_threads = max_threads_per_seq < num_used_threads ? max_threads_per_seq : num_used_threads; num_used_threads = num_used_threads > 32 ? (num_used_threads >> 5) << 5 : (num_used_threads > 16 ? 32 : (num_used_threads > 8 ? 16 : (num_used_threads > 4 ? 8 : (num_used_threads > 2 ? 4 : num_used_threads)))); return num_used_threads; } template <typename T> class BeamSearchFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::LoDTensor* pre_ids, const framework::LoDTensor* pre_scores, const framework::LoDTensor* ids, const framework::LoDTensor* scores, framework::LoDTensor* selected_ids, framework::LoDTensor* selected_scores, framework::Tensor* parent_idx, size_t level, size_t beam_size, int end_id, bool is_accumulated) { auto abs_lod = framework::ToAbsOffset(scores->lod()); const int64_t* pre_ids_data = pre_ids->data<int64_t>(); const float* pre_scores_data = pre_scores->data<float>(); const int64_t* ids_data = ids ? ids->data<int64_t>() : nullptr; const float* scores_data = scores->data<float>(); const size_t num_seqs = abs_lod[level].size() - 1; size_t seq_width = 1; for (int i = 1; i < scores->dims().size(); i++) { seq_width *= scores->dims()[i]; } // Reserve a big enough memory. auto selected_dims = framework::make_ddim({static_cast<int64_t>(num_seqs * beam_size), 1}); int64_t* selected_ids_data = selected_ids->mutable_data<int64_t>(selected_dims, context.GetPlace()); float* selected_scores_data = selected_scores->mutable_data<float>(selected_dims, context.GetPlace()); int* parent_idx_data = parent_idx ? parent_idx->mutable_data<int>( {static_cast<int64_t>(num_seqs * beam_size)}, context.GetPlace()) : nullptr; framework::LoD selected_lod(2); selected_lod[0].assign(abs_lod[level].begin(), abs_lod[level].end()); selected_lod[1].resize(scores->dims()[0] + 1); size_t* selected_offsets = selected_lod[1].CUDAMutableData(context.GetPlace()); if (num_seqs == 1) { const int seq_length = static_cast<int>(abs_lod[level][1]); const int kMaxThreadsPerSeq = 1024; int num_used_threads = GetNumUsedThreads(kMaxThreadsPerSeq, static_cast<int>(seq_width), static_cast<int>(beam_size)); switch (platform::RoundToPowerOfTwo(beam_size * seq_width)) { CUDA_LAUNCH_KERNEL_HELPER( BeamSearchKernelSingle<kPowerOfTwoDim, kMaxThreadsPerSeq><<< 1, kMaxThreadsPerSeq, 0, context.stream()>>>( selected_ids_data, selected_scores_data, parent_idx_data, selected_offsets, pre_ids_data, pre_scores_data, ids_data, scores_data, seq_length, static_cast<int>(seq_width), static_cast<int>(beam_size), static_cast<int>(end_id), is_accumulated, num_used_threads)); } } else if (num_seqs <= 4) { const size_t* seq_offsets = abs_lod[level].CUDAData(context.GetPlace()); // Use only 1 block const int kMaxThreadsPerSeq = 32; const int kMaxSeqs = 4; int num_used_threads = GetNumUsedThreads(kMaxThreadsPerSeq, static_cast<int>(seq_width), static_cast<int>(beam_size)); switch (platform::RoundToPowerOfTwo(beam_size * num_seqs * 32)) { CUDA_LAUNCH_KERNEL_HELPER( BeamSearchKernel<kPowerOfTwoDim, kMaxThreadsPerSeq, kMaxSeqs><<< 1, num_seqs * kMaxThreadsPerSeq, 0, context.stream()>>>( selected_ids_data, selected_scores_data, parent_idx_data, selected_offsets, pre_ids_data, pre_scores_data, ids_data, scores_data, seq_offsets, static_cast<int>(num_seqs), static_cast<int>(seq_width), static_cast<int>(beam_size), end_id, is_accumulated, num_used_threads)); } } else { LOG(FATAL) << "Not implemented."; } context.Wait(); if (!framework::CheckLoD(selected_lod)) { PADDLE_THROW("lod %s is not right", framework::LoDToString(selected_lod)); } selected_ids->set_lod(selected_lod); selected_scores->set_lod(selected_lod); if (selected_lod[1].back() < num_seqs * beam_size) { auto final_selected_dims = framework::make_ddim( {static_cast<int64_t>(selected_lod[1].back()), 1}); selected_ids->Resize(final_selected_dims); selected_scores->Resize(final_selected_dims); if (parent_idx) { parent_idx->Resize({static_cast<int64_t>(selected_lod[1].back())}); } } } }; template class BeamSearchFunctor<platform::CUDADeviceContext, int>; template class BeamSearchFunctor<platform::CUDADeviceContext, int64_t>; template class BeamSearchFunctor<platform::CUDADeviceContext, float>; template class BeamSearchFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
d87dfb774c815fd60bd74f29d28cb45d01231582.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/concat_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void Concat(const int nthreads, const Dtype* in_data, const bool forward, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { out_data[index] = in_data[top_index]; } } } template <typename Dtype> void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; if (bottom_concat_size == 0) { continue; } const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); offset_concat_axis += bottom_concat_axis; } } template <typename Dtype> void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { const int bottom_concat_axis = bottom[i]->shape(concat_axis_); if (propagate_down[i] && bottom_concat_axis != 0) { Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_diff, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); } offset_concat_axis += bottom_concat_axis; } } INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); } // namespace caffe
d87dfb774c815fd60bd74f29d28cb45d01231582.cu
#include <vector> #include "caffe/layers/concat_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void Concat(const int nthreads, const Dtype* in_data, const bool forward, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { out_data[index] = in_data[top_index]; } } } template <typename Dtype> void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; if (bottom_concat_size == 0) { continue; } const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, bottom_data, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); offset_concat_axis += bottom_concat_axis; } } template <typename Dtype> void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { const int bottom_concat_axis = bottom[i]->shape(concat_axis_); if (propagate_down[i] && bottom_concat_axis != 0) { Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, top_diff, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); } offset_concat_axis += bottom_concat_axis; } } INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); } // namespace caffe
3cd3302c0e3e11f61e69448a279f344994523194.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/IndexFlat.h> #include <faiss/gpu/GpuDistance.h> #include <faiss/gpu/StandardGpuResources.h> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/CopyUtils.cuh> #include <faiss/gpu/utils/Transpose.cuh> #include <faiss/gpu/test/TestUtils.h> #include <gtest/gtest.h> #include <sstream> #include <vector> void testTransposition(bool colMajorVecs, bool colMajorQueries, faiss::MetricType metric, float metricArg = 0) { using namespace faiss::gpu; int device = randVal(0, getNumDevices() - 1); StandardGpuResources res; res.noTempMemory(); int dim = randVal(20, 150); int numVecs = randVal(10, 30000); int numQuery = randVal(1, 1024); int k = ::min(numVecs, randVal(20, 70)); // Input data for CPU std::vector<float> vecs = randVecs(numVecs, dim); std::vector<float> queries = randVecs(numQuery, dim); if (metric == faiss::MetricType::METRIC_JensenShannon) { // make values positive for (auto& v : vecs) { v = std::abs(v); if (v == 0) { v = 1e-6; } } for (auto& q : queries) { q = std::abs(q); if (q == 0) { q = 1e-6; } } } // The CPU index is our reference for the results faiss::IndexFlat cpuIndex(dim, metric); cpuIndex.metric_arg = metricArg; cpuIndex.add(numVecs, vecs.data()); std::vector<float> cpuDistance(numQuery * k, 0); std::vector<faiss::Index::idx_t> cpuIndices(numQuery * k, -1); cpuIndex.search(numQuery, queries.data(), k, cpuDistance.data(), cpuIndices.data()); // The transpose and distance code assumes the desired device is already set DeviceScope scope(device); auto stream = res.getDefaultStream(device); // Copy input data to GPU, and pre-transpose both vectors and queries for // passing auto gpuVecs = toDeviceNonTemporary<float, 2>( res.getResources().get(), device, vecs.data(), stream, {numVecs, dim}); auto gpuQueries = toDeviceNonTemporary<float, 2>( res.getResources().get(), device, queries.data(), stream, {numQuery, dim}); DeviceTensor<float, 2, true> vecsT(res.getResources().get(), makeDevAlloc(AllocType::Other, stream), {dim, numVecs}); runTransposeAny(gpuVecs, 0, 1, vecsT, stream); DeviceTensor<float, 2, true> queriesT(res.getResources().get(), makeDevAlloc(AllocType::Other, stream), {dim, numQuery}); runTransposeAny(gpuQueries, 0, 1, queriesT, stream); std::vector<float> gpuDistance(numQuery * k, 0); std::vector<faiss::Index::idx_t> gpuIndices(numQuery * k, -1); GpuDistanceParams args; args.metric = metric; args.metricArg = metricArg; args.k = k; args.dims = dim; args.vectors = colMajorVecs ? vecsT.data() : gpuVecs.data(); args.vectorsRowMajor = !colMajorVecs; args.numVectors = numVecs; args.queries = colMajorQueries ? queriesT.data() : gpuQueries.data(); args.queriesRowMajor = !colMajorQueries; args.numQueries = numQuery; args.outDistances = gpuDistance.data(); args.outIndices = gpuIndices.data(); bfKnn(&res, args); std::stringstream str; str << "metric " << metric << " colMajorVecs " << colMajorVecs << " colMajorQueries " << colMajorQueries; compareLists(cpuDistance.data(), cpuIndices.data(), gpuDistance.data(), gpuIndices.data(), numQuery, k, str.str(), false, false, true, 6e-3f, 0.1f, 0.015f); } // Test different memory layouts for brute-force k-NN TEST(TestGpuDistance, Transposition_RR) { testTransposition(false, false, faiss::MetricType::METRIC_L2); testTransposition(false, false, faiss::MetricType::METRIC_INNER_PRODUCT); } TEST(TestGpuDistance, Transposition_RC) { testTransposition(false, true, faiss::MetricType::METRIC_L2); } TEST(TestGpuDistance, Transposition_CR) { testTransposition(true, false, faiss::MetricType::METRIC_L2); } TEST(TestGpuDistance, Transposition_CC) { testTransposition(true, true, faiss::MetricType::METRIC_L2); } TEST(TestGpuDistance, L1) { testTransposition(false, false, faiss::MetricType::METRIC_L1); } // Test other transpositions with the general distance kernel TEST(TestGpuDistance, L1_RC) { testTransposition(false, true, faiss::MetricType::METRIC_L1); } TEST(TestGpuDistance, L1_CR) { testTransposition(true, false, faiss::MetricType::METRIC_L1); } TEST(TestGpuDistance, L1_CC) { testTransposition(true, true, faiss::MetricType::METRIC_L1); } // Test remainder of metric types TEST(TestGpuDistance, Linf) { testTransposition(false, false, faiss::MetricType::METRIC_Linf); } TEST(TestGpuDistance, Lp) { testTransposition(false, false, faiss::MetricType::METRIC_Lp, 3); } TEST(TestGpuDistance, Canberra) { testTransposition(false, false, faiss::MetricType::METRIC_Canberra); } TEST(TestGpuDistance, BrayCurtis) { testTransposition(false, false, faiss::MetricType::METRIC_BrayCurtis); } TEST(TestGpuDistance, JensenShannon) { testTransposition(false, false, faiss::MetricType::METRIC_JensenShannon); } int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); // just run with a fixed test seed faiss::gpu::setTestSeed(100); return RUN_ALL_TESTS(); }
3cd3302c0e3e11f61e69448a279f344994523194.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/IndexFlat.h> #include <faiss/gpu/GpuDistance.h> #include <faiss/gpu/StandardGpuResources.h> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/CopyUtils.cuh> #include <faiss/gpu/utils/Transpose.cuh> #include <faiss/gpu/test/TestUtils.h> #include <gtest/gtest.h> #include <sstream> #include <vector> void testTransposition(bool colMajorVecs, bool colMajorQueries, faiss::MetricType metric, float metricArg = 0) { using namespace faiss::gpu; int device = randVal(0, getNumDevices() - 1); StandardGpuResources res; res.noTempMemory(); int dim = randVal(20, 150); int numVecs = randVal(10, 30000); int numQuery = randVal(1, 1024); int k = std::min(numVecs, randVal(20, 70)); // Input data for CPU std::vector<float> vecs = randVecs(numVecs, dim); std::vector<float> queries = randVecs(numQuery, dim); if (metric == faiss::MetricType::METRIC_JensenShannon) { // make values positive for (auto& v : vecs) { v = std::abs(v); if (v == 0) { v = 1e-6; } } for (auto& q : queries) { q = std::abs(q); if (q == 0) { q = 1e-6; } } } // The CPU index is our reference for the results faiss::IndexFlat cpuIndex(dim, metric); cpuIndex.metric_arg = metricArg; cpuIndex.add(numVecs, vecs.data()); std::vector<float> cpuDistance(numQuery * k, 0); std::vector<faiss::Index::idx_t> cpuIndices(numQuery * k, -1); cpuIndex.search(numQuery, queries.data(), k, cpuDistance.data(), cpuIndices.data()); // The transpose and distance code assumes the desired device is already set DeviceScope scope(device); auto stream = res.getDefaultStream(device); // Copy input data to GPU, and pre-transpose both vectors and queries for // passing auto gpuVecs = toDeviceNonTemporary<float, 2>( res.getResources().get(), device, vecs.data(), stream, {numVecs, dim}); auto gpuQueries = toDeviceNonTemporary<float, 2>( res.getResources().get(), device, queries.data(), stream, {numQuery, dim}); DeviceTensor<float, 2, true> vecsT(res.getResources().get(), makeDevAlloc(AllocType::Other, stream), {dim, numVecs}); runTransposeAny(gpuVecs, 0, 1, vecsT, stream); DeviceTensor<float, 2, true> queriesT(res.getResources().get(), makeDevAlloc(AllocType::Other, stream), {dim, numQuery}); runTransposeAny(gpuQueries, 0, 1, queriesT, stream); std::vector<float> gpuDistance(numQuery * k, 0); std::vector<faiss::Index::idx_t> gpuIndices(numQuery * k, -1); GpuDistanceParams args; args.metric = metric; args.metricArg = metricArg; args.k = k; args.dims = dim; args.vectors = colMajorVecs ? vecsT.data() : gpuVecs.data(); args.vectorsRowMajor = !colMajorVecs; args.numVectors = numVecs; args.queries = colMajorQueries ? queriesT.data() : gpuQueries.data(); args.queriesRowMajor = !colMajorQueries; args.numQueries = numQuery; args.outDistances = gpuDistance.data(); args.outIndices = gpuIndices.data(); bfKnn(&res, args); std::stringstream str; str << "metric " << metric << " colMajorVecs " << colMajorVecs << " colMajorQueries " << colMajorQueries; compareLists(cpuDistance.data(), cpuIndices.data(), gpuDistance.data(), gpuIndices.data(), numQuery, k, str.str(), false, false, true, 6e-3f, 0.1f, 0.015f); } // Test different memory layouts for brute-force k-NN TEST(TestGpuDistance, Transposition_RR) { testTransposition(false, false, faiss::MetricType::METRIC_L2); testTransposition(false, false, faiss::MetricType::METRIC_INNER_PRODUCT); } TEST(TestGpuDistance, Transposition_RC) { testTransposition(false, true, faiss::MetricType::METRIC_L2); } TEST(TestGpuDistance, Transposition_CR) { testTransposition(true, false, faiss::MetricType::METRIC_L2); } TEST(TestGpuDistance, Transposition_CC) { testTransposition(true, true, faiss::MetricType::METRIC_L2); } TEST(TestGpuDistance, L1) { testTransposition(false, false, faiss::MetricType::METRIC_L1); } // Test other transpositions with the general distance kernel TEST(TestGpuDistance, L1_RC) { testTransposition(false, true, faiss::MetricType::METRIC_L1); } TEST(TestGpuDistance, L1_CR) { testTransposition(true, false, faiss::MetricType::METRIC_L1); } TEST(TestGpuDistance, L1_CC) { testTransposition(true, true, faiss::MetricType::METRIC_L1); } // Test remainder of metric types TEST(TestGpuDistance, Linf) { testTransposition(false, false, faiss::MetricType::METRIC_Linf); } TEST(TestGpuDistance, Lp) { testTransposition(false, false, faiss::MetricType::METRIC_Lp, 3); } TEST(TestGpuDistance, Canberra) { testTransposition(false, false, faiss::MetricType::METRIC_Canberra); } TEST(TestGpuDistance, BrayCurtis) { testTransposition(false, false, faiss::MetricType::METRIC_BrayCurtis); } TEST(TestGpuDistance, JensenShannon) { testTransposition(false, false, faiss::MetricType::METRIC_JensenShannon); } int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); // just run with a fixed test seed faiss::gpu::setTestSeed(100); return RUN_ALL_TESTS(); }
a90a3f8992535709c8beec07497e8fea768abb14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "N3LDG_cuda.h" #include <array> #include <boost/format.hpp> #include <cstdlib> #include <cstddef> #include <vector> #include <algorithm> #include <cmath> #include <cstdio> #include <rocblas.h> #include "Printf_cuda.cuh" #include "Printf_cuda.cu" #include "Memory_cuda.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "cnmem.h" #include <string> #include <utility> #include <cstring> #include <cstdint> #include <chrono> #include <thread> #include <numeric> #include <memory> #include "profiler.h" #include "MyTensor-def.h" namespace n3ldg_cuda { using namespace std; using boost::format; #if USE_FLOAT #define cuda_sqrt(x) sqrtf(x) #define cuda_pow(x, y) powf(x, y) #define cuda_tanh(x) tanhf(x) #define cuda_exp(x) __expf(x) #define cuda_log(x) logf(x) #else #define cuda_sqrt(x) sqrt(x) #define cuda_pow(x, y) pow(x, y) #define cuda_tanh(x) tanh(x) #define cuda_exp(x) exp(x) #define cuda_log(x) log(x) #endif #define KERNEL_LOG #ifdef KERNEL_LOG #define KernelPrintLine(format, ...)\ {\ cuPrintf("block:x=%d,y=%d thread:x=%d,y=%d "#format"\n", blockIdx.x,\ blockIdx.y, threadIdx.x, threadIdx.y,__VA_ARGS__);\ } #else #define KernelPrintLine(format, ...) #endif constexpr int TPB = 1024; constexpr int BLOCK_COUNT = 56; void CallCuda(hipError_t status) { if (status != hipSuccess) { cerr << "cuda error:" << hipGetErrorString(status) << endl; abort(); } } void CheckCudaError() { //hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if (error != hipSuccess) { std::cerr << "cuda error:" << hipGetErrorName(error) << std::endl; std::cerr << "cuda error:" << hipGetErrorString(error) << std::endl; abort(); } } void CallCnmem(cnmemStatus_t status) { assert(status == CNMEM_STATUS_SUCCESS); } void CallCublas(hipblasStatus_t status) { assert(status == HIPBLAS_STATUS_SUCCESS); } void CallCurand(hiprandStatus_t status) { assert(status == HIPRAND_STATUS_SUCCESS); } hipblasHandle_t& GetCublasHandle() { static hipblasHandle_t handle; static bool init; if (!init) { init = true; CallCublas(hipblasCreate(&handle)); } return handle; } hipError_t MyCudaMemcpy(void *dest, const void *src, size_t count, hipMemcpyKind kind) { hipError_t e; e = hipMemcpyAsync(dest, src, count, kind); CallCuda(e); return e; } int NextTwoIntegerPowerNumber(int number) { int result = 1; while (number > result) { result <<= 1; } return result; } template <> vector<bool> GPUArray<bool>::toCpu() const { bool *cpu_arr = new bool[len]; CallCuda(MyCudaMemcpy(cpu_arr, value, sizeof(bool) * len, hipMemcpyDeviceToHost)); vector<bool> result; result.resize(len); for (int i = 0; i < len; ++i) { result.at(i) = cpu_arr[i]; } delete[] cpu_arr; return result; } void DeviceInt::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceInt::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(int), hipMemcpyDeviceToHost)); } void DeviceInt::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, &v, sizeof(int), hipMemcpyHostToDevice)); } DeviceInt::~DeviceInt() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void DeviceNumber::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceNumber::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(dtype), hipMemcpyDeviceToHost)); } DeviceNumber::~DeviceNumber() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor1D::init(int dim) { initOnDevice(dim); #if TEST_CUDA v = new dtype[dim]; zero(); #endif } void Tensor1D::initOnMemoryAndDevice(int dim) { initOnDevice(dim); v = new dtype[dim]; zero(); } void Tensor1D::initOnDevice(int dim) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, dim * sizeof(dtype))); this->dim = dim; } void Tensor1D::initOnMemory(int len) { v = new dtype[dim]; zero(); } Tensor1D::Tensor1D(const Tensor1D &t) { dim = t.dim; memcpy(v, t.v, dim *sizeof(dtype)); CallCuda(MyCudaMemcpy(value, t.value, dim * sizeof(dtype), hipMemcpyDeviceToDevice)); } Tensor1D::~Tensor1D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor1D::print() const { cout << "dim:" << dim << endl; PrintNums(value, dim); } void Tensor1D::copyFromHostToDevice() { assert(v != NULL); assert(value != NULL); CallCuda(MyCudaMemcpy(value, v, dim * sizeof(dtype), hipMemcpyHostToDevice)); } void Tensor1D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, dim * sizeof(dtype), hipMemcpyDeviceToHost)); } __device__ int DeviceDefaultIndex(); __device__ int DeviceDefaultStep(); int DefaultBlockCount(int len); __global__ void KernelCheckIsNumber(const dtype *v, int dim, int *error) { if (threadIdx.x == 0 && blockIdx.x == 0) { *error = 0; } int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim; i += step) { if (v[i] != v[i]) { *error = 1; return; } } } void CheckIsNumber(const dtype *v, int dim) { int block_count = DefaultBlockCount(dim); DeviceInt error; error.init(); hipLaunchKernelGGL(( KernelCheckIsNumber), dim3(block_count), dim3(TPB), 0, 0, v, dim, error.value); CheckCudaError(); error.copyFromDeviceToHost(); if (error.v != 0) { cerr << "nan checked!" << endl; abort(); } } void Tensor1D::checkIsNumber() const { n3ldg_cuda::CheckIsNumber(value, dim); } void Tensor2D::initOnMemoryAndDevice(int row, int col) { initOnDevice(row, col); v = new dtype[row * col]; zero(); } void Tensor2D::init(int row, int col) { initOnDevice(row, col); #if TEST_CUDA v = new dtype[row * col]; zero(); #endif } void Tensor2D::initOnDevice(int row, int col) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, row * col * sizeof(dtype))); this->row = row; this->col = col; this->size = row * col; } Tensor2D::Tensor2D(const Tensor2D &t) { row = t.row; col = t.col; memcpy(v, t.v, sizeof(dtype) * row * col); CallCuda(MyCudaMemcpy(value, t.value, sizeof(dtype) * row * col, hipMemcpyDeviceToDevice)); } Tensor2D::~Tensor2D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor2D::print() const { cout << "row:" << row << " col:" << col << endl; PrintNums(value, size); } void Tensor2D::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, v, size * sizeof(dtype), hipMemcpyHostToDevice)); } void Tensor2D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, size * sizeof(dtype), hipMemcpyDeviceToHost)); } void Assert(bool v, const std::string &message, const function<void(void)> &call) { #if TEST_CUDA if (!v) { std::cerr << message << std::endl; call(); abort(); } #endif } __device__ void DeviceAtomicAdd(dtype* address, dtype value) { float old = value; float new_old; do { new_old = atomicExch(address, 0.0); new_old += old; } while ((old = atomicExch(address, new_old))!=0.0); }; __device__ dtype cuda_dexp(dtype y) { return y; } __device__ dtype cuda_dtanh(dtype y) { return 1.0f - y * y; } __device__ dtype cuda_sigmoid(dtype x) { return 1.0f / (1.0f + cuda_exp(-x)); } __device__ dtype cuda_dsigmoid(dtype y) { return y * (1.0f - y); } __device__ dtype cuda_relu(dtype x) { return x > 0.0f ? x : 0.0f; } __device__ dtype cuda_drelu(dtype x) { return x > 0.0f ? 1 : 0.0f; } __device__ dtype cuda_leaky_relu(dtype x) { return x > 0.0f ? x : -0.1f * x; } __device__ dtype cuda_dleaky_relu(dtype x) { return x > 0.0f ? 1.0f : -0.1f; } __device__ dtype cuda_dsqrt(dtype y) { return 0.5 / y; } const dtype SELU_LAMBDA = 1.0507009873554804934193349852946; const dtype SELU_ALPHA = 1.6732632423543772848170429916717; __device__ dtype cuda_selu(dtype x) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA * (cuda_exp(x) - 1.0f) : SELU_LAMBDA * x; } __device__ dtype cuda_dselu(dtype x, dtype y) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA + y : SELU_LAMBDA; } void Random(dtype *v, int len, dtype bound) { dtype *mem = (dtype*)malloc(len * sizeof(dtype)); assert(mem != NULL); dtype min = -bound, max = bound; for (int i = 0; i < len; i++) { mem[i] = (dtype(rand()) / RAND_MAX) * (max - min) + min; } CallCuda(MyCudaMemcpy(v, mem, len * sizeof(dtype), hipMemcpyHostToDevice)); free(mem); } __device__ int DeviceDefaultIndex() { return blockIdx.x * blockDim.x + threadIdx.x; } __device__ int DeviceDefaultStep() { return gridDim.x * blockDim.x; } __device__ dtype DeviceAbs(dtype d) { return d > 0 ? d : -d; } int DefaultBlockCount(int len) { int block_count = (len - 1 + TPB) / TPB; return ::min(block_count, BLOCK_COUNT); } int DefaultBlockCountWithoutLimit(int len) { return (len - 1 + TPB) / TPB; } __global__ void KernelZero(dtype *v, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) { return; } v[index] = 0; } void Zero(dtype *v, int len) { int block_count = (len - 1 + TPB) / TPB; hipLaunchKernelGGL(( KernelZero), dim3(block_count), dim3(TPB), 0, 0, v, len); CheckCudaError(); } __global__ void PrintPointers(void **p, int len) { for (int i = 0; i < len; ++i) { printf("%p\n", p[i]); } } __global__ void KernelPrintNums(const dtype* p, int len) { for (int i = 0; i < len; ++i) { printf("%d %f\n", i, p[i]); } } void PrintNums(const dtype* p, int len) { hipLaunchKernelGGL(( KernelPrintNums), dim3(1), dim3(1), 0, 0, p, len); hipDeviceSynchronize(); CheckCudaError(); } __global__ void KernelPrintNums(const dtype *const *p, int index, int len) { for (int i = 0; i < len; ++i) { printf("%d %f\n", i, p[index][i]); } } void PrintNums(const dtype *const *p, int count_i, int len) { hipLaunchKernelGGL(( KernelPrintNums), dim3(1), dim3(1), 0, 0, p, count_i, len); hipDeviceSynchronize(); CheckCudaError(); } __global__ void KernelPrintInts(const int* p, int len) { for (int i = 0; i < len; ++i) { printf("%d\n", p[i]); } } void PrintInts(const int* p, int len) { hipLaunchKernelGGL(( KernelPrintInts), dim3(1), dim3(1), 0, 0, p, len); hipDeviceSynchronize(); CheckCudaError(); } void InitCuda(int device_id, float memory_in_gb) { std::cout << "device_id:" << device_id << std::endl; CallCuda(hipSetDeviceFlags(hipDeviceMapHost)); #if DEVICE_MEMORY == 0 cnmemDevice_t device; device.size = 10000000000; device.device = device_id; cnmemInit(1, &device, CNMEM_FLAGS_DEFAULT); #else CallCuda(hipSetDevice(device_id)); #endif CallCuda(hipDeviceSetCacheConfig(hipFuncCachePreferL1)); CallCuda(cudaPrintfInit()); MemoryPool::Ins().Init(memory_in_gb); } void EndCuda() { cudaPrintfEnd(); Profiler::Ins().Print(); } __global__ void KernelCopyFromOneVectorToMultiVectors(const dtype *src, dtype *const *dest, int count, int len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * len; i += step) { int count_i = i / len; int len_i = i % len; dest[count_i][len_i] = src[i]; } } void CopyFromOneVectorToMultiVals(const dtype *src, std::vector<dtype*> &vals, int count, int len) { NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); int block_count = (len * count - 1 + TPB) / TPB; block_count = ::min(block_count, BLOCK_COUNT); hipLaunchKernelGGL(( KernelCopyFromOneVectorToMultiVectors), dim3(block_count), dim3(TPB), 0, 0, src, (dtype *const *)val_arr.value, count, len); CheckCudaError(); } void CopyFromHostToDevice(const std::vector<dtype*> &src, std::vector<dtype*> &dest, int count, int dim) { dtype *long_src = (dtype*)malloc(count * dim * sizeof(dtype)); if (long_src == NULL) { std::cerr << "out of memory!" << std::endl; abort(); } for (int i = 0; i < count; ++i) { memcpy(long_src + i * dim, src.at(i), dim * sizeof(dtype)); } dtype *long_dest = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&long_dest, count * dim * sizeof(dtype*))); CallCuda(hipMemcpy(long_dest, long_src, count * dim * sizeof(dtype*), hipMemcpyHostToDevice)); CopyFromOneVectorToMultiVals(long_dest, dest, count, dim); free(long_src); CallCuda(MemoryPool::Ins().Free(long_dest)); } __global__ void KernelCopyFromMultiVectorsToOneVector(const dtype **src, dtype *dest, int count, int len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * len; i += step) { int count_i = i / len; int len_i = i % len; dest[i] = src[count_i][len_i]; } } void CopyFromMultiVectorsToOneVector(const std::vector<dtype*> &src, dtype *dest, int count, int len) { NumberPointerArray src_arr; src_arr.init((dtype**)src.data(), src.size()); int block_count = DefaultBlockCount(len * count); hipLaunchKernelGGL(( KernelCopyFromMultiVectorsToOneVector), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)src_arr.value, dest, count, len); CheckCudaError(); } void CopyFromDeviceToHost(const std::vector<dtype*> &src, std::vector<dtype*> &dest, int count, int dim) { dtype *long_src = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&long_src, count * dim * sizeof(dtype*))); CopyFromMultiVectorsToOneVector(src, long_src, count, dim); dtype *long_dest = (dtype*)malloc(count * dim * sizeof(dtype)); if (long_dest == NULL) { std::cerr << "out of memory!" << std::endl; abort(); } CallCuda(hipMemcpy(long_dest, long_src, count * dim * sizeof(dtype), hipMemcpyDeviceToHost)); for (int i = 0; i < count; ++i) { memcpy(dest.at(i), long_dest + i * dim, dim * sizeof(dtype)); } CallCuda(MemoryPool::Ins().Free(long_src)); free(long_dest); } __global__ void KernelActivationForward(ActivatedEnum activated, const dtype *const *xs, int count, int *dims, int max_dim, dtype *const *ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < max_dim * count; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { if (activated == ActivatedEnum::TANH) { ys[count_i][dim_i] = cuda_tanh(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { ys[count_i][dim_i] = cuda_sigmoid(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::EXP) { ys[count_i][dim_i] = cuda_exp(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::RELU) { ys[count_i][dim_i] = cuda_relu(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::SQRT) { ys[count_i][dim_i] = cuda_sqrt(xs[count_i][dim_i]); } else { printf("KernelActivationForward - error enum\n"); assert(false); } } } } void ActivationForward(ActivatedEnum activated, const std::vector<const dtype*> &xs, int count, const vector<int> &dims, std::vector<dtype*> &ys) { int max_dim = *max_element(dims.begin(), dims.end()); NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * max_dim); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelActivationForward), dim3(block_count), dim3(TPB), 0, 0, activated, (const dtype* const *)x_arr.value, count, dim_arr.value, max_dim, (dtype *const *)y_arr.value); CheckCudaError(); } __global__ void KernelActivationBackward(ActivatedEnum activated, const dtype *const *losses, const dtype *const *vals, int count, int *dims, int max_dim, dtype* const* in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < max_dim * count; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { dtype l; if (activated == ActivatedEnum::TANH) { l = cuda_dtanh(vals[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { l = cuda_dsigmoid(vals[count_i][dim_i]); } else if (activated == ActivatedEnum::EXP) { l = cuda_dexp(vals[count_i][dim_i]); } else if (activated == ActivatedEnum::RELU) { l = cuda_drelu(vals[count_i][dim_i]); } else if (activated == ActivatedEnum::SQRT) { l = cuda_dsqrt(vals[count_i][dim_i]); } else { printf("KernelActivationBackward - error enum\n"); assert(false); } dtype v = l * losses[count_i][dim_i]; DeviceAtomicAdd(in_losses[count_i] + dim_i, v); } } } void ActivationBackward(ActivatedEnum activated, const std::vector<const dtype*> &losses, const std::vector<dtype*> &vals, int count, const vector<int> &dims, std::vector<dtype*> &in_losses) { int max_dim = *max_element(dims.begin(), dims.end()); NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * max_dim); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelActivationBackward), dim3(block_count), dim3(TPB), 0, 0, activated, loss_arr.value, val_arr.value, count, dim_arr.value, max_dim, (dtype *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelDropoutForward(const dtype *const *xs, int count, int dim, bool is_training, const dtype* drop_mask, dtype drop_factor, dtype *const *ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (is_training) { if (drop_mask[i] < drop_factor) { ys[count_i][dim_i] = 0.0f; } else { ys[count_i][dim_i] = xs[count_i][dim_i]; } } else { ys[count_i][dim_i] = (1 - drop_factor) * xs[count_i][dim_i]; } } } void DropoutForward(const std::vector<dtype*> &xs, int count, int dim, bool is_training, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &ys) { if (drop_factor < 0 || drop_factor >= 1.0f) { std::cerr << "drop value is " << drop_factor << std::endl; abort(); } NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelDropoutForward), dim3(block_count), dim3(TPB), 0, 0, x_arr.value, count, dim, is_training, drop_mask, drop_factor, (dtype *const *)y_arr.value); CheckCudaError(); } __global__ void KernelDropoutBackward(const dtype *const *losses, const dtype *const *vals, int count, int dim, bool is_training, const dtype* drop_mask, dtype drop_factor, dtype *const *in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (is_training) { if (drop_mask[i] >= drop_factor) { DeviceAtomicAdd(in_losses[count_i] + dim_i, losses[count_i][dim_i]); } } else { DeviceAtomicAdd(in_losses[count_i] + dim_i, (1 - drop_factor) * losses[count_i][dim_i]); } } } void DropoutBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &vals, int count, int dim, bool is_training, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &in_losses) { if (drop_factor < 0 || drop_factor >= 1) { std::cerr << "drop value is " << drop_factor << std::endl; abort(); } NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelDropoutBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, val_arr.value, count, dim, is_training, drop_mask, drop_factor, (dtype *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelBucketForward(const dtype *input, int count, int dim, dtype *const *ys) { int index = DeviceDefaultIndex(); for (int i = index; i < count * dim; i+= DeviceDefaultStep()) { int count_i = i / dim; int dim_i = i % dim; ys[count_i][dim_i] = input[count_i * dim + dim_i]; } } void BucketForward(const std::vector<dtype> input, int count, int dim, std::vector<dtype*> &ys) { NumberArray input_arr; NumberPointerArray ys_arr; input_arr.init((dtype*)input.data(), input.size()); ys_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelBucketForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype*)input_arr.value, count, dim, (dtype *const *)ys_arr.value); CheckCudaError(); } __global__ void KernelCopyForUniNodeForward(const dtype** xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; int x_total_len = count * x_len; int b_total_len = count * b_len; for (int i = index; i < x_total_len + b_total_len; i += step) { if (i < x_total_len) { int count_i = i / x_len; int len_i = i % x_len; xs_dest[i] = xs[count_i][len_i]; } else if (use_b) { int b_i = i - x_total_len; int len_i = b_i % b_len; b_dest[b_i] = b[len_i]; } } } void CopyForUniNodeForward(const std::vector<dtype*> &xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { NumberPointerArray x_arr; x_arr.init((dtype**)xs.data(), xs.size()); int len = x_len + b_len; int block_count = ::min((count * len - 1 + TPB) / TPB, 56); hipLaunchKernelGGL(( KernelCopyForUniNodeForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)x_arr.value, (const dtype*)b, xs_dest, b_dest, count, x_len, b_len, use_b); CheckCudaError(); } void MatrixMultiplyMatrix(dtype *W, dtype *x, dtype *y, int row, int col, int count, bool useb, bool should_x_transpose, bool should_W_transpose) { hipblasHandle_t &handle = GetCublasHandle(); dtype alpha = 1; dtype beta = useb? 1 : 0; hipblasOperation_t x_op = should_x_transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N; int ldx = should_x_transpose ? count : col; hipblasOperation_t W_op = should_W_transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N; int ldw = should_W_transpose ? col : row; #if USE_FLOAT CallCublas(hipblasSgemm(handle, W_op, x_op, row, count, col, &alpha, W, ldw, x, ldx, &beta, y, row)); #else CallCublas(hipblasDgemm(handle, W_op, x_op, row, count, col, &alpha, W, ldw, x, ldx, &beta, y, row)); #endif } __global__ void KernelVerify(dtype *host, dtype *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i += step) { dtype loss = host[index] - device[index]; if (DeviceAbs(loss) > 0.001 && DeviceAbs(loss) > 0.001 * DeviceAbs(host[index])) { *success = false; KernelPrintLine("KernelVerify: host:%f device:%f loss:%f", host[index], device[index], loss); } } } bool Verify(dtype *host, dtype *device, int len, const char* message) { NumberArray arr; arr.init(host, len); int block_count = DefaultBlockCount(len); char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), hipMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); hipDeviceSynchronize(); cudaPrintfDisplay(stdout, true); if (!success) { cerr << message << endl; abort(); } return success; } __global__ void KernelVerify(bool *host, bool *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(bool *host, bool *device, int len, const char* message) { BoolArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), hipMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); hipDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } __global__ void KernelVerify(int *host, int *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(int *host, int *device, int len, const char* message) { IntArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), hipMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( KernelVerify), dim3(block_count), dim3(TPB), 0, 0, arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), hipMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); hipDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } constexpr int MAX_BLOCK_POWER = 100; MemoryPool& MemoryPool::Ins() { static MemoryPool *p; if (p == NULL) { p = new MemoryPool; p->free_blocks_.resize(MAX_BLOCK_POWER + 1); p->busy_blocks_.reserve(10000); } return *p; } void appendFreeBlock(const MemoryBlock &memory_block, vector<map<void*, MemoryBlock>> &free_blocks, int i, const unordered_map<void*, MemoryBlock> &busy_blocks) { if (memory_block.size != (1 << i)) { cerr << boost::format("incorrect block size %1%, but i is %2%") % memory_block.size % i << endl; abort(); } free_blocks.at(i).insert(make_pair(memory_block.p, memory_block)); } hipError_t MemoryPool::Malloc(void **p, int size) { assert(*p == NULL); Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Malloc"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemMalloc(p, size, NULL)); profiler.EndEvent(); return hipSuccess; #elif DEVICE_MEMORY == 1 hipError_t r = hipMalloc(p, size); profiler.EndEvent(); return r; #else int fit_size = 1; int n = 0; while (fit_size < size) { fit_size <<= 1; ++n; } hipError_t status = hipErrorMemoryAllocation; while (status != hipSuccess) { if (free_blocks_.at(n).empty()) { int higher_power = n + 1; while (higher_power <= MAX_BLOCK_POWER && free_blocks_.at(higher_power).empty()) { ++higher_power; } if (higher_power > MAX_BLOCK_POWER) { while (status != hipSuccess) { status = hipMalloc(p, fit_size); if (status != hipSuccess) { abort(); } } CallCuda(status); MemoryBlock block(*p, fit_size); busy_blocks_.insert(std::make_pair(*p, block)); } else { auto &v = free_blocks_.at(higher_power); MemoryBlock &to_split = v.rbegin()->second; int half_size = to_split.size >> 1; void *half_address = static_cast<void*>(static_cast<char*>(to_split.p) + half_size); MemoryBlock low_block(to_split.p, half_size, to_split.buddy), high_block(half_address, half_size, to_split.p); v.erase(v.rbegin()->first); appendFreeBlock(low_block, free_blocks_, higher_power - 1, busy_blocks_); appendFreeBlock(high_block, free_blocks_, higher_power - 1, busy_blocks_); } } else { status = hipSuccess; int this_size = free_blocks_.at(n).size(); MemoryBlock &block = free_blocks_.at(n).rbegin()->second; *p = block.p; busy_blocks_.insert(std::make_pair(block.p, block)); free_blocks_.at(n).erase(free_blocks_.at(n).rbegin()->first); } } profiler.EndEvent(); return status; #endif } std::pair<const MemoryBlock *, const MemoryBlock *> lowerAndhigherBlocks(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { cerr << "a.size is not equal to b.size" << endl; abort(); } int distance = static_cast<char*>(a.p) - static_cast<char*>(b.p); if (distance == 0) { cerr << "block a and b has the same address" << endl; abort(); } const MemoryBlock &low = distance > 0 ? b : a; const MemoryBlock &high = distance > 0 ? a : b; return std::make_pair(&low, &high); } bool isBuddies(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { return false; } auto pair = lowerAndhigherBlocks(a, b); return pair.second->buddy == pair.first->p && ((char*)pair.second->p - (char*)pair.first->p) == a.size; } MemoryBlock mergeBlocks(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { cerr << "sizes of memory blocks to merge not equal" << endl; abort(); } auto pair = lowerAndhigherBlocks(a, b); if ((char*)pair.second->p - (char*)pair.first->p != a.size || (a.p != b.buddy && a.buddy != b.p)) { cerr << "a and b are not buddies" << endl; cerr << boost::format("a:%1%\nb:%2%") % a.toString() % b.toString() << endl; abort(); } MemoryBlock block(pair.first->p, pair.first->size << 1, pair.first->buddy); return block; } void returnFreeBlock(const MemoryBlock &block, vector<map<void*, MemoryBlock>> &free_blocks, int power, const unordered_map<void*, MemoryBlock> &busy_blocks) { Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("returnFreeBlock"); MemoryBlock current_block = block; for (int i = power; i <= MAX_BLOCK_POWER; ++i) { map<void*, MemoryBlock> &v = free_blocks.at(i); void *free_p = (char*)current_block.p - (char*)current_block.buddy == current_block.size ? current_block.buddy : (void*)((char*)current_block.p + current_block.size); auto it = v.find(free_p); if (it == v.end() || (it->second.p != current_block.buddy && it->second.buddy != current_block.p)) { appendFreeBlock(current_block, free_blocks, i, busy_blocks); break; } else { MemoryBlock merged_block = mergeBlocks(it->second, current_block); current_block = merged_block; v.erase(it); } } profiler.EndEvent(); } hipError_t MemoryPool::Free(void *p) { Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Free"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemFree(p, NULL)); profiler.EndEvent(); #elif DEVICE_MEMORY == 1 hipError_t r = hipFree(p); profiler.EndEvent(); return r; #else auto it = busy_blocks_.find(p); if (it == busy_blocks_.end()) { cerr << "cannot find busy block " << p << endl; abort(); } int size = it->second.size; int n = 0; while (size > 1) { size >>= 1; ++n; } if (it->second.size != (1 << n)) { cerr << boost::format("size:%1% n:%2%") % it->second.size % n << endl; abort(); } auto block = it->second; busy_blocks_.erase(it); returnFreeBlock(block, free_blocks_, n, busy_blocks_); it = busy_blocks_.find(p); if (it != busy_blocks_.end()) { cerr << "can find erased block " << p << endl; abort(); } profiler.EndEvent(); if (busy_blocks_.find(p) != busy_blocks_.end()) { cerr << boost::format("Malloc - find freed p in busy blocks") << endl; } return hipSuccess; #endif } void Profiler::EndCudaEvent() { //hipDeviceSynchronize(); EndEvent(); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward( const dtype *lty, const dtype *lx, dtype *b, dtype *const *losses, int count, int out_dim, int in_dim, volatile dtype *block_sums, int *global_block_count, bool use_b) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (use_b) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } int lty_index = count_i * out_dim + dim_i; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } DeviceAtomicAdd(b + dim_i, sum); } } } } else { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i + count_i * in_dim; DeviceAtomicAdd(losses[count_i] + dim_i, lx[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForUniBackward(const dtype *lty, const dtype *lx, dtype *b, std::vector<dtype*> &losses, int count, int out_dim, int in_dim, bool use_b) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim, block_y, 1); NumberPointerArray loss_arr; loss_arr.init(losses.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); hipLaunchKernelGGL(( KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward), dim3(block_dim), dim3(TPB), 0, 0, lty, lx, b, (dtype *const *)loss_arr.value, count, out_dim, in_dim, block_sums.value, global_block_count_arr.value, use_b); CheckCudaError(); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward( const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, dtype *const *losses1, dtype *const *losses2, int count, int out_dim, int in_dim1, int in_dim2, bool use_b, volatile dtype *block_sums, int *global_block_count) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } //int lty_index = dim_i * count + count_i; int lty_index = dim_i + count_i * out_dim; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } if (use_b) { DeviceAtomicAdd(b + dim_i, sum); } } } } else if (dim_i < out_dim + in_dim1) { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i + count_i * in_dim1; DeviceAtomicAdd(losses1[count_i] + dim_i, lx1[lx_index]); } } else { if (count_i < count) { dim_i -= (out_dim + in_dim1); int lx_index = dim_i + count_i * in_dim2; DeviceAtomicAdd(losses2[count_i] + dim_i, lx2[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForBiBackward(const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, std::vector<dtype*> &losses1, std::vector<dtype*> &losses2, int count, int out_dim, int in_dim1, int in_dim2, bool use_b) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim1 + in_dim2, block_y, 1); NumberPointerArray loss1_arr; loss1_arr.init(losses1.data(), count); NumberPointerArray loss2_arr; loss2_arr.init(losses2.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); hipLaunchKernelGGL(( KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward), dim3(block_dim), dim3(TPB), 0, 0, lty, lx1, lx2, b, (dtype *const *)loss1_arr.value, (dtype *const *)loss2_arr.value, count, out_dim, in_dim1, in_dim2, use_b, block_sums.value, global_block_count_arr.value); CheckCudaError(); } constexpr int MAX_BATCH_COUNT = 1000000; __global__ void KernelInitCurandStates(hiprandState_t *states) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; for (int i = index; i < MAX_BATCH_COUNT; i += step) { hiprand_init(0, i, 0, &states[i]); } } hiprandState_t *GetCurandStates() { static hiprandState_t *states; if (states == NULL) { MemoryPool &pool = MemoryPool::Ins(); CallCuda(pool.Malloc((void**)&states, sizeof(hiprandState_t) * MAX_BATCH_COUNT)); hipLaunchKernelGGL(( KernelInitCurandStates), dim3(BLOCK_COUNT), dim3(TPB), 0, 0, states); CheckCudaError(); } return states; } hiprandGenerator_t &GetGenerator() { static hiprandGenerator_t gen; static bool init; if (!init) { CallCurand(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); CallCurand(hiprandSetPseudoRandomGeneratorSeed(gen, 0)); init = true; } return gen; } void CalculateDropoutMask(dtype drop_factor, int count, int dim, dtype* mask) { hiprandGenerator_t &gen = GetGenerator(); CallCurand(hiprandGenerateUniform(gen, mask, count * dim)); } __global__ void KernelConcatForward(const dtype *const *ins, int *in_dims, dtype *const *outs, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; dtype v = ins[count_i * in_count + offset_j][in_dim_i]; outs[count_i][out_dim_i] = v; } } void ConcatForward(const std::vector<dtype*> &in_vals, const std::vector<int> &in_dims, std::vector<dtype*> &vals, int count, int in_count, int out_dim) { int len = count * out_dim; int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_val_arr, val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); hipLaunchKernelGGL(( KernelConcatForward), dim3(block_count), dim3(TPB), 0, 0, in_val_arr.value, in_dim_arr.value, (dtype *const *)val_arr.value, count, in_count, out_dim); CheckCudaError(); } __global__ void KernelConcatBackward(dtype *const *in_losses, int *in_dims, const dtype *const *out_losses, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; DeviceAtomicAdd(in_losses[count_i * in_count + offset_j] + in_dim_i, out_losses[count_i][out_dim_i]); } } void ConcatBackward(const std::vector<dtype*> &in_losses, const std::vector<int> &in_dims, std::vector<dtype*> &losses, int count, int in_count, int out_dim) { int len = count * out_dim; int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_loss_arr, loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); hipLaunchKernelGGL(( KernelConcatBackward), dim3(block_count), dim3(TPB), 0, 0, (dtype *const *)in_loss_arr.value, in_dim_arr.value, loss_arr.value, count, in_count, out_dim); CheckCudaError(); } __global__ void KernelScalarConcatForward(const dtype *const *ins, int count, const int *dims, int max_dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < max_dim * count; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { results[count_i][dim_i] = ins[count_i * max_dim + dim_i][0]; } } } void ScalarConcatForward(const vector<dtype *> &ins, int count, const vector<int> &dims, int max_dim, const vector<dtype *> &results) { NumberPointerArray result_arr; result_arr.init((dtype**)results.data(), results.size()); NumberPointerArray in_arr; in_arr.init((dtype**)ins.data(), ins.size()); IntArray dim_arr; dim_arr.init((int *)dims.data(), dims.size()); int block_count = DefaultBlockCount(count * max_dim); hipLaunchKernelGGL(( KernelScalarConcatForward), dim3(block_count), dim3(TPB), 0, 0, in_arr.value, count, dim_arr.value, max_dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelScalarConcatBackward(const dtype *const *losses, int count, const int *dims, int max_dim, dtype *const *input_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < max_dim * count; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { DeviceAtomicAdd(input_losses[count_i * max_dim + dim_i], losses[count_i][dim_i]); } } } void ScalarConcatBackward(const vector<dtype *> &losses, int count, const vector<int> &dims, int max_dim, const vector<dtype *> in_losses) { NumberPointerArray loss_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); in_loss_arr.init((dtype **)in_losses.data(), in_losses.size()); IntArray dim_arr; dim_arr.init((int *)dims.data(), dims.size()); int block_count = DefaultBlockCount(count * max_dim); hipLaunchKernelGGL(( KernelScalarConcatBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, count, dim_arr.value, max_dim, (dtype *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelMemset(dtype *p, int len, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(dtype *p, int len, dtype value) { int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); hipLaunchKernelGGL(( KernelMemset), dim3(block_count), dim3(TPB), 0, 0, p, len, value); CheckCudaError(); } __global__ void KernelMemset(bool *p, int len, bool value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(bool *p, int len, bool value) { int block_count = ::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); hipLaunchKernelGGL(( KernelMemset), dim3(block_count), dim3(TPB), 0, 0, p, len, value); CheckCudaError(); } void *Malloc(int size) { void *p; CallCuda(hipMalloc(&p, size)); return p; } __global__ void KernelBatchMemset(dtype *const *p, int count, int *dims, int max_dim, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < max_dim * count ; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { p[count_i][dim_i] = value; } } } void BatchMemset(const std::vector<dtype*> &vec, int count, const vector<int> &dims, dtype value) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = (count * max_dim -1 + TPB) / TPB; block_count = ::min(block_count, BLOCK_COUNT); NumberPointerArray vec_arr; vec_arr.init((dtype**)vec.data(), vec.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelBatchMemset), dim3(block_count), dim3(TPB), 0, 0, (dtype *const *)vec_arr.value, count, dim_arr.value, max_dim, value); CheckCudaError(); } __global__ void KernelLookupForward(const int *xids, const dtype *vocabulary, int count, int dim, dtype **vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (xid >= 0) { int voc_i = xid * dim + dim_i; vals[count_i][dim_i] = vocabulary[voc_i]; } else { vals[count_i][dim_i] = 0.0f; } } } void LookupForward(const std::vector<int> &xids, const dtype *vocabulary, int count, int dim, std::vector<dtype*> &vals) { int block_count = ::min(BLOCK_COUNT, (count * dim - 1 + TPB) / TPB); IntArray xid_arr; xid_arr.init((int*)xids.data(), xids.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); hipLaunchKernelGGL(( KernelLookupForward), dim3(block_count), dim3(TPB), 0, 0, xid_arr.value, vocabulary, count, dim, const_cast<dtype**>(val_arr.value)); CheckCudaError(); } __global__ void KernelLookupBackward(const int *xids, const int *should_backward, const dtype** losses, int count, int dim, dtype *grad, bool *indexers) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (should_backward[count_i]) { if (dim_i == 0) { indexers[xid] = true; } DeviceAtomicAdd(grad + xid * dim + dim_i, losses[count_i][dim_i]); } } } void LookupBackward(const std::vector<int> &xids, const std::vector<int> &should_backward, const std::vector<dtype*> &losses, int count, int dim, dtype *grad, bool *indexers) { int block_count = ::min((count * dim - 1 + TPB) / TPB, BLOCK_COUNT); IntArray pl_arr; pl_arr.init((int*)xids.data(), xids.size()); IntArray xid_arr; xid_arr.init((int*)pl_arr.value, xids.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray should_backward_arr; should_backward_arr.init(should_backward.data(), should_backward.size()); hipLaunchKernelGGL(( KernelLookupBackward), dim3(block_count), dim3(TPB), 0, 0, const_cast<const int *>(xid_arr.value), should_backward_arr.value, const_cast<const dtype**>(loss_arr.value), count, dim, grad, indexers); CheckCudaError(); } __global__ void KernelLookupBackward(const int *xids, int *should_backward, const dtype** losses, int count, int dim, dtype *grad) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (should_backward[count_i]) { DeviceAtomicAdd(grad + xid * dim + dim_i, losses[count_i][dim_i]); } } } void LookupBackward(const std::vector<int> &xids, const vector<int> &should_backward, const std::vector<dtype*> &losses, int count, int dim, dtype *grad) { int block_count = ::min((count * dim - 1 + TPB) / TPB, BLOCK_COUNT); IntArray pl_arr; pl_arr.init((int*)xids.data(), xids.size()); IntArray xid_arr; xid_arr.init((int*)pl_arr.value, xids.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray should_backward_arr; should_backward_arr.init(should_backward.data(), should_backward.size()); hipLaunchKernelGGL(( KernelLookupBackward), dim3(block_count), dim3(TPB), 0, 0, const_cast<const int *>(xid_arr.value), should_backward_arr.value, const_cast<const dtype**>(loss_arr.value), count, dim, grad); CheckCudaError(); } __global__ void KernelParamRowForward(const dtype *param, int row_index, int param_row_count, int count, int dim, dtype *const *vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; int param_offset = dim_i * param_row_count + row_index; vals[count_i][dim_i] = param[param_offset]; } } void ParamRowForward(const dtype *param, int row_index, int param_row_count, int count, int dim, vector<dtype*> &vals) { NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelParamRowForward), dim3(block_count), dim3(TPB), 0, 0, param, row_index, param_row_count, count, dim, (dtype *const *)val_arr.value); CheckCudaError(); } __global__ void KernelPoolForward(PoolingEnum pooling, const dtype *const *ins, int *in_counts, int max_in_count, dtype *const *outs, int count, int dim, int* hit_inputs) { __shared__ volatile extern dtype pool_shared_arr[]; volatile dtype* shared_indexers = pool_shared_arr + blockDim.x; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = ins[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = pooling == PoolingEnum::MAX ? -1e10 : 1e10; } shared_indexers[threadIdx.x] = threadIdx.x; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; if (pooling == PoolingEnum::MAX) { if (pool_shared_arr[threadIdx.x] < pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } else { if (pool_shared_arr[threadIdx.x] > pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } } __syncthreads(); } if (threadIdx.x == 0) { hit_inputs[batch_i * dim + dim_i] = shared_indexers[0]; outs[batch_i][dim_i] = pool_shared_arr[0]; } } void PoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, std::vector<dtype*> &vals, int count, const std::vector<int> &in_counts, int dim, int *hit_inputs) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); hipLaunchKernelGGL(( KernelPoolForward), dim3(block_dim), dim3(thread_count), thread_count * 2 * sizeof(dtype), 0, pooling, in_val_arr.value, in_count_arr.value, max_in_count, (dtype *const *)val_arr.value, count, dim, hit_inputs); CheckCudaError(); } __global__ void KernelPoolBackward(const dtype *const * losses, const int *hit_inputs, int max_in_count, int count, int dim, dtype *const *in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; int input_i = hit_inputs[i]; dtype loss = losses[count_i][dim_i]; DeviceAtomicAdd(in_losses[count_i * max_in_count + input_i] + dim_i, loss); } } void PoolBackward(const std::vector<dtype*> &losses, std::vector<dtype*> &in_losses, const std::vector<int> &in_counts, const int *hit_inputs, int count, int dim) { NumberPointerArray loss_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int block_count = (count * dim - 1 + TPB) / TPB; block_count = ::min(block_count, BLOCK_COUNT); hipLaunchKernelGGL(( KernelPoolBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype**)loss_arr.value, hit_inputs, max_in_count, count, dim, (dtype *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelSumPoolForward(PoolingEnum pooling, const dtype *const *in_vals, int count, int dim, const int *in_counts, int max_in_count, dtype *const *vals) { __shared__ volatile extern dtype pool_shared_arr[]; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = in_vals[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; pool_shared_arr[threadIdx.x] += pool_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[batch_i][dim_i] = pooling == PoolingEnum::SUM ? pool_shared_arr[0] : pool_shared_arr[0] / in_counts[batch_i]; } } void SumPoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, int count, int dim, const std::vector<int> &in_counts, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); hipLaunchKernelGGL(( KernelSumPoolForward), dim3(block_dim), dim3(thread_count), thread_count * sizeof(dtype), 0, pooling, in_val_arr.value, count, dim, in_count_arr.value, max_in_count, (dtype *const *)val_arr.value); CheckCudaError(); } __global__ void KernelSumBackward(PoolingEnum pooling, const dtype *const *losses, const int *in_counts, int max_in_count, int count, int dim, dtype *const *in_losses) { int global_in_count_i = blockIdx.x * max_in_count + blockIdx.y; for (int i = threadIdx.x; i < dim; i += blockDim.x) { if (blockIdx.y < in_counts[blockIdx.x]) { DeviceAtomicAdd(in_losses[global_in_count_i] + i, pooling == PoolingEnum::SUM ? losses[blockIdx.x][i] : losses[blockIdx.x][i] / in_counts[blockIdx.x]); } } } void SumPoolBackward(PoolingEnum pooling, const std::vector<dtype*> &losses, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses) { int thread_count = 8; while (thread_count < dim) { thread_count <<= 1; } thread_count = ::min(TPB, thread_count); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); dim3 block_dim(count, max_in_count, 1); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray in_loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); hipLaunchKernelGGL(( KernelSumBackward), dim3(block_dim), dim3(thread_count), 0, 0, pooling, loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, (dtype *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelPMultiForward(const dtype **ins1, const dtype **ins2, int count, int dim, dtype *const *vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; vals[count_i][dim_i] = ins1[count_i][dim_i] * ins2[count_i][dim_i]; } } void PMultiForward(const std::vector<dtype*> &ins1, const std::vector<dtype*> &ins2, int count, int dim, std::vector<dtype*> &vals) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray ins1_arr, ins2_arr, vals_arr; ins1_arr.init((dtype**)ins1.data(), count); ins2_arr.init((dtype**)ins2.data(), count); vals_arr.init((dtype**)vals.data(), count); hipLaunchKernelGGL(( KernelPMultiForward), dim3(block_count), dim3(TPB), 0, 0, ins1_arr.value, ins2_arr.value, count, dim, (dtype *const *)vals_arr.value); CheckCudaError(); } __global__ void KernelDivForward(const dtype *const *numerators, const dtype *const *denominators, int count, int *dims, int max_dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { results[count_i][dim_i] = numerators[count_i][dim_i] / denominators[count_i][0]; } } } void DivForward(const vector<const dtype*> numerators, const vector<const dtype*> denominators, int count, const vector<int> &dims, vector<dtype*> &results) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = DefaultBlockCount(count * max_dim); NumberPointerArray numerator_arr, denominator_arr, result_arr; numerator_arr.init((dtype**)numerators.data(), count); denominator_arr.init((dtype**)denominators.data(), count); result_arr.init((dtype**)results.data(), count); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelDivForward), dim3(block_count), dim3(TPB), 0, 0, numerator_arr.value, denominator_arr.value, count, dim_arr.value, max_dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelDivNumeratorBackward(const dtype *const *losses, const dtype *const *denominator_vals, int count, int *dims, int max_dim, dtype *const *numerator_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { DeviceAtomicAdd(numerator_losses[count_i] + dim_i, losses[count_i][dim_i] / denominator_vals[count_i][0]); } } } __global__ void KernelDivDenominatorBackward(const dtype *const *losses, const dtype *const *numerator_vals, const dtype *const *denominator_vals, int count, int *dims, volatile dtype *block_sums, int *block_counters, dtype *const *denominator_losses) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; __shared__ volatile dtype square; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } int count_i = blockIdx.x; if (threadIdx.x == 0) { is_last_block = false; square = denominator_vals[count_i][0] * denominator_vals[count_i][0]; } __syncthreads(); int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_sum[threadIdx.x] = offset < dims[count_i] ? losses[count_i][offset] * numerator_vals[count_i][offset] / square : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_sums[block_sums_offset] = shared_sum[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; sum += block_sums[offset]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { DeviceAtomicAdd(denominator_losses[count_i], -shared_sum[0]); } } } void DivBackward(const vector<const dtype*> &losses, const vector<const dtype*> &denominator_vals, const vector<const dtype*> &numerator_vals, int count, const vector<int> &dims, vector<dtype*> &numerator_losses, vector<dtype*> &denominator_losses) { int max_dim = *max_element(dims.begin(), dims.end()); NumberPointerArray loss_arr, denominator_val_arr, numerator_val_arr, numerator_loss_arr, denominator_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); denominator_val_arr.init((dtype**)denominator_vals.data(), denominator_vals.size()); numerator_val_arr.init((dtype**)numerator_vals.data(), numerator_vals.size()); numerator_loss_arr.init((dtype**)numerator_losses.data(), numerator_losses.size()); denominator_loss_arr.init((dtype**)denominator_losses.data(), denominator_losses.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); int block_count = DefaultBlockCount(count * max_dim); hipLaunchKernelGGL(( KernelDivNumeratorBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, denominator_val_arr.value, count, dim_arr.value, max_dim, (dtype *const *)numerator_loss_arr.value); CheckCudaError(); int thread_count = min(NextTwoIntegerPowerNumber(max_dim), TPB); int block_y_count = (max_dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_sums; block_sums.init(block_y_count * count); IntArray block_counters; block_counters.init(count); hipLaunchKernelGGL(( KernelDivDenominatorBackward), dim3(block_dim) , dim3(thread_count), 0, 0, loss_arr.value, numerator_val_arr.value, denominator_val_arr.value, count, dim_arr.value, block_sums.value, block_counters.value, (dtype *const *)denominator_loss_arr.value); CheckCudaError(); } __global__ void KernelFullDivForward(const dtype *const *numerators, const dtype *const *denominators, int count, int dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; results[count_i][dim_i] = numerators[count_i][dim_i] / denominators[count_i][dim_i]; } } void FullDivForward(const vector<const dtype*> numerators, const vector<const dtype*> denominators, int count, int dim, vector<dtype*> &results) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray numerator_arr, denominator_arr, result_arr; numerator_arr.init((dtype**)numerators.data(), count); denominator_arr.init((dtype**)denominators.data(), count); result_arr.init((dtype**)results.data(), count); hipLaunchKernelGGL(( KernelFullDivForward), dim3(block_count), dim3(TPB), 0, 0, numerator_arr.value, denominator_arr.value, count, dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelFullDivBackward(const dtype *const *losses, const dtype *const *numerator_vals, const dtype *const *denominator_vals, int count, int dim, dtype *const *numerator_losses, dtype *const *denominator_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; DeviceAtomicAdd(numerator_losses[count_i] + dim_i, losses[count_i][dim_i] / denominator_vals[count_i][dim_i]); DeviceAtomicAdd(denominator_losses[count_i] + dim_i, -losses[count_i][dim_i] * numerator_vals[count_i][dim_i] / (denominator_vals[count_i][dim_i] * denominator_vals[count_i][dim_i])); } } void FullDivBackward(const vector<const dtype*> &losses, const vector<const dtype*> &denominator_vals, const vector<const dtype*> &numerator_vals, int count, int dim, vector<dtype*> &numerator_losses, vector<dtype*> &denominator_losses) { NumberPointerArray loss_arr, denominator_val_arr, numerator_val_arr, numerator_loss_arr, denominator_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); denominator_val_arr.init((dtype**)denominator_vals.data(), denominator_vals.size()); numerator_val_arr.init((dtype**)numerator_vals.data(), numerator_vals.size()); numerator_loss_arr.init((dtype**)numerator_losses.data(), numerator_losses.size()); denominator_loss_arr.init((dtype**)denominator_losses.data(), denominator_losses.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelFullDivBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, numerator_val_arr.value, denominator_val_arr.value, count, dim, (dtype *const *)numerator_loss_arr.value, (dtype *const *)denominator_loss_arr.value); CheckCudaError(); } __global__ void KernelSplitForward(const dtype *const *inputs, const int *offsets, int count, int dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int offset = offsets[count_i]; results[count_i][dim_i] = inputs[count_i][offset + dim_i]; } } void SplitForward(const vector<const dtype*> &inputs, const vector<int> &offsets, int count, int dim, vector<dtype*> &results) { NumberPointerArray input_arr, result_arr; input_arr.init((dtype**)inputs.data(), inputs.size()); result_arr.init((dtype**)results.data(), results.size()); IntArray offset_arr; offset_arr.init((int*)offsets.data(), offsets.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelSplitForward), dim3(block_count), dim3(TPB), 0, 0, input_arr.value, offset_arr.value, count, dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelSplitBackward(const dtype *const *losses, const int *offsets, int count, int dim, dtype *const *input_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int offset = offsets[count_i]; DeviceAtomicAdd(input_losses[count_i] + offset + dim_i, losses[count_i][dim_i]); } } void SplitBackward(const vector<const dtype*> &losses, const vector<int> offsets, int count, int dim, const vector<dtype*> &input_losses) { NumberPointerArray loss_arr, input_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); input_loss_arr.init((dtype**)input_losses.data(), input_losses.size()); IntArray offset_arr; offset_arr.init((int*)offsets.data(), offsets.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelSplitBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, offset_arr.value, count, dim, (dtype *const *)input_loss_arr.value); CheckCudaError(); } __global__ void KernelSubForward(const dtype *const *minuend, const dtype *const *subtrahend, int count, int *dims, int max_dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { results[count_i][dim_i] = minuend[count_i][dim_i] - subtrahend[count_i][dim_i]; } } } void SubForward(const std::vector<const dtype*> &minuend, const std::vector<const dtype*> &subtrahend, int count, const vector<int> &dims, std::vector<dtype*> &results) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = DefaultBlockCount(count * max_dim); NumberPointerArray minuend_arr, subtrahend_arr, result_arr; minuend_arr.init((dtype**)minuend.data(), count); subtrahend_arr.init((dtype**)subtrahend.data(), count); result_arr.init((dtype**)results.data(), count); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelSubForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype* const*)minuend_arr.value, (const dtype *const *)subtrahend_arr.value, count, dim_arr.value, max_dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelSubBackward(const dtype *const *losses, int count, int *dims, int max_dim, dtype *const *minuend_losses, dtype *const *subtrahend_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { DeviceAtomicAdd(minuend_losses[count_i] + dim_i, losses[count_i][dim_i]); DeviceAtomicAdd(subtrahend_losses[count_i] + dim_i, -losses[count_i][dim_i]); } } } void SubBackward(const std::vector<const dtype*> &losses, int count, const vector<int> &dims, std::vector<dtype*> &minuend_losses, std::vector<dtype*> &subtrahend_losses) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = DefaultBlockCount(count * max_dim); NumberPointerArray loss_arr, minuend_loss_arr, subtrahend_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); minuend_loss_arr.init((dtype**)minuend_losses.data(), minuend_losses.size()); subtrahend_loss_arr.init((dtype**)subtrahend_losses.data(), subtrahend_losses.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelSubBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype *const *)loss_arr.value, count, dim_arr.value, max_dim, (dtype *const *)minuend_loss_arr.value, (dtype *const *)subtrahend_loss_arr.value); CheckCudaError(); } __global__ void KernelPMultiBackward(const dtype **losses, const dtype *const *in_vals1, const dtype *const *in_vals2, int count, int dim, dtype *const *in_losses1, dtype *const *in_losses2) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; DeviceAtomicAdd(in_losses1[count_i] + dim_i, losses[count_i][dim_i] * in_vals2[count_i][dim_i]); DeviceAtomicAdd(in_losses2[count_i] + dim_i, losses[count_i][dim_i] * in_vals1[count_i][dim_i]); } } void PMultiBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals1, const std::vector<dtype*> &in_vals2, int count, int dim, std::vector<dtype*> &in_losses1, std::vector<dtype*> &in_losses2) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray losses_arr, in_vals1_arr, in_vals2_arr, in_losses1_arr, in_losses2_arr; losses_arr.init((dtype**)losses.data(), losses.size()); in_vals1_arr.init((dtype**)in_vals1.data(), in_vals1.size()); in_vals2_arr.init((dtype**)in_vals2.data(), in_vals2.size()); in_losses1_arr.init((dtype**)in_losses1.data(), in_losses1.size()); in_losses2_arr.init((dtype**)in_losses2.data(), in_losses2.size()); hipLaunchKernelGGL(( KernelPMultiBackward), dim3(block_count), dim3(TPB), 0, 0, losses_arr.value, in_vals1_arr.value, in_vals2_arr.value, count, dim, (dtype *const *)in_losses1_arr.value, (dtype *const *)in_losses2_arr.value); CheckCudaError(); } __global__ void KernelPAddForward(const dtype *const *const *ins, int count, int dim, int in_count, dtype *const *vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i+= step) { int count_i = i / dim; int dim_i = i % dim; dtype sum = ins[0][count_i][dim_i]; for (int j = 1; j < in_count; ++j) { sum += ins[j][count_i][dim_i]; } vals[count_i][dim_i] = sum; } } void PAddForward(const std::vector<std::vector<dtype*>> &ins, int count, int dim, int in_count, std::vector<dtype*> &vals) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(ins.size()); for (const std::vector<dtype*> &x : ins) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> ins_gpu; ins_gpu.reserve(ins.size()); for (auto &ptr : gpu_addr) { ins_gpu.push_back((dtype**)ptr->value); } NumberPointerPointerArray in_arr; in_arr.init(ins_gpu.data(), ins_gpu.size()); NumberPointerArray out_arr; out_arr.init(vals.data(), vals.size()); int block_count = DefaultBlockCount(count * dim); hipLaunchKernelGGL(( KernelPAddForward), dim3(block_count), dim3(TPB), 0, 0, in_arr.value, count, dim, in_count, (dtype *const *)out_arr.value); CheckCudaError(); } __global__ void KernelPAddBackward(const dtype **losses, int count, int dim, int in_count, dtype *const *const *in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int dim_mul_count = dim * count; for (int i = index; i < dim_mul_count * in_count; i += step) { int in_count_i = i / dim_mul_count; int dim_mul_count_i = i % dim_mul_count; int count_i = dim_mul_count_i / dim; int dim_i = dim_mul_count_i % dim; DeviceAtomicAdd(in_losses[in_count_i][count_i] + dim_i, losses[count_i][dim_i]); } } void PAddBackward(const std::vector<dtype*> &losses, int count, int dim, int in_count, std::vector<std::vector<dtype*>> &in_losses) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(in_losses.size()); for (const std::vector<dtype*> &x : in_losses) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> in_losses_gpu; in_losses_gpu.reserve(in_losses.size()); for (auto &ptr : gpu_addr) { in_losses_gpu.push_back((dtype **)ptr->value); } NumberPointerPointerArray in_loss_arr; in_loss_arr.init(in_losses_gpu.data(), in_losses_gpu.size()); NumberPointerArray out_loss_arr; out_loss_arr.init((dtype**)losses.data(), losses.size()); int block_count = DefaultBlockCount(in_count * count * dim); hipLaunchKernelGGL(( KernelPAddBackward), dim3(block_count), dim3(TPB), 0, 0, out_loss_arr.value, count, dim, in_count, (dtype *const *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelSoftMaxLoss(const dtype **vals, dtype **losses, int *correct_count, int *answers, int batchsize, int count, int dim) { volatile __shared__ int opt_label; volatile __shared__ dtype shared_val[TPB]; volatile __shared__ int64_t max_indexes[TPB]; volatile __shared__ dtype scores_sum[TPB]; volatile __shared__ dtype scores[TPB]; int dim_i = threadIdx.x; int count_i = blockIdx.x; if (count_i == 0 && dim_i == 0) { *correct_count = 0; } shared_val[dim_i] = dim_i < dim ? vals[count_i][dim_i] : -1e10; max_indexes[dim_i] = dim_i; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (shared_val[threadIdx.x + i] > shared_val[threadIdx.x]) { // race shared_val[threadIdx.x] = shared_val[threadIdx.x + i]; // race max_indexes[threadIdx.x] = max_indexes[threadIdx.x + i]; // race } __syncthreads(); } if (threadIdx.x == 0) { opt_label = max_indexes[0]; if (answers[count_i] == opt_label) { atomicAdd(correct_count, 1); } } __syncthreads(); dtype max_score = vals[count_i][opt_label]; dtype score = dim_i < dim ? cuda_exp(vals[count_i][dim_i] - max_score) : 0.0f; scores[dim_i] = score; scores_sum[dim_i] = score; for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { scores_sum[threadIdx.x] = scores_sum[threadIdx.x] + scores_sum[threadIdx.x + i]; // race __syncthreads(); } if (dim_i < dim) { losses[count_i][dim_i] = (scores[dim_i] / scores_sum[0] - (dim_i == answers[count_i] ? 1 : 0)) / batchsize; } } void SoftMaxLoss(const std::vector<dtype*> &vals, std::vector<dtype*> &losses, int *correct_count, const std::vector<int> &answers, int batchsize, int count, int dim) { if (dim > TPB) { abort(); } int thread_count = NextTwoIntegerPowerNumber(dim); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray answer_arr; answer_arr.init((int*)answers.data(), answers.size()); hipLaunchKernelGGL(( KernelSoftMaxLoss), dim3(count), dim3(thread_count), 0, 0, const_cast<const dtype **>(val_arr.value), const_cast<dtype **>(loss_arr.value), correct_count, answer_arr.value, batchsize, count, dim); CheckCudaError(); } __global__ void KernelCrossEntropyLoss(const dtype *const *vals, const int *answers, int count, dtype factor, dtype *const *losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count; i += step) { int answer = answers[i]; DeviceAtomicAdd(losses[i] + answer, - 1 / vals[i][answer] * factor); } } __global__ void KernelCrossEntropgyLossValue(const dtype *const *vals, const int *answers, int count, volatile dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { is_last_block = false; } shared_sum[threadIdx.x] = 0.0f; for (int i = index; i < count; i += blockDim.x * gridDim.x) { int answer_offset = answers[i]; shared_sum[threadIdx.x] -= cuda_log(vals[i][answer_offset]); } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype CrossEntropyLoss(const vector<dtype *> &vals, const vector<int> &answers, int count, dtype factor, vector<dtype *> &losses) { NumberPointerArray val_arr, loss_arr; val_arr.init((dtype**)vals.data(), vals.size()); loss_arr.init((dtype**)losses.data(), losses.size()); IntArray answer_arr; answer_arr.init((int*)answers.data(), answers.size()); hipLaunchKernelGGL(( KernelCrossEntropyLoss), dim3(DefaultBlockCount(count)), dim3(TPB), 0, 0, val_arr.value, answer_arr.value, count, factor, (dtype *const *)loss_arr.value); CheckCudaError(); int block_count = DefaultBlockCount(count); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); hipLaunchKernelGGL(( KernelCrossEntropgyLossValue), dim3(block_count), dim3(TPB), 0, 0, val_arr.value, answer_arr.value, count, global_sum.value, block_counter.value, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v * factor; } __global__ void KernelMultiCrossEntropyLoss(const dtype *const *vals, const int *const *answers, int count, int dim, dtype factor, dtype *const *losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype val = vals[count_i][dim_i]; dtype grad = (answers[count_i][dim_i] ? (-1 / val) : (1 / (1 - val))) * factor; DeviceAtomicAdd(losses[count_i] + dim_i, grad); } } __global__ void KernelMultiCrossEntropyLossVector(const dtype *const *in_vals, const int *const *answers, int count, int dim, dtype *const *result) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype in_val = in_vals[count_i][dim_i]; dtype v = answers[count_i][dim_i] ? -cuda_log(in_val) : -cuda_log(1 - in_val); result[count_i][dim_i] = v; } } template<typename T> vector<T *> GPUArrayVectors(vector<shared_ptr<GPUArray<T>>> &ptrs, int count, int dim) { vector<T *> result; for (int i = 0; i < count; ++i) { shared_ptr<GPUArray<T>> e(new GPUArray<T>); e->init(dim); ptrs.push_back(e); result.push_back((T *)e->value); } return result; } dtype MultiCrossEntropyLoss(const vector<dtype*> &vals, const vector<vector<int>> &answers, int count, int dim, dtype factor, const vector<dtype*> &losses) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray val_arr, loss_arr; val_arr.init((dtype**)vals.data(), count); loss_arr.init((dtype**)losses.data(), count); vector<shared_ptr<IntArray>> answer_gpus; vector<int *> answer_gpu_pointers; for (auto &answer : answers) { shared_ptr<IntArray> answer_gpu(new IntArray); answer_gpu->init(answer.data(), answer.size()); answer_gpus.push_back(answer_gpu); answer_gpu_pointers.push_back(answer_gpu->value); } IntPointerArray answer_arr; answer_arr.init((int**)answer_gpu_pointers.data(), count); hipLaunchKernelGGL(( KernelMultiCrossEntropyLoss), dim3(block_count), dim3(TPB), 0, 0, val_arr.value, answer_arr.value, count, dim, factor, (dtype *const *)loss_arr.value); CheckCudaError(); vector<shared_ptr<NumberArray>> nums; vector<dtype *> logged_vec = GPUArrayVectors(nums, count, dim); NumberPointerArray logged_arr; logged_arr.init(logged_vec.data(), count); hipLaunchKernelGGL(( KernelMultiCrossEntropyLossVector), dim3(block_count), dim3(TPB), 0, 0, val_arr.value, answer_arr.value, count, dim, (dtype *const *)logged_arr.value); CheckCudaError(); vector<shared_ptr<NumberArray>> ce_loss_arrs; vector<dtype *> ce_losses = GPUArrayVectors(ce_loss_arrs, count, 1); for (auto &ptr : ce_loss_arrs) { vector<dtype> vec = ptr->toCpu(); } vector<const dtype *> const_logged_arr; auto return_const = [](dtype *v) -> const dtype* { return const_cast<const dtype*>(v); }; transform(logged_vec.begin(), logged_vec.end(), back_inserter(const_logged_arr), return_const); vector<int> dims; for (int i = 0; i < count; ++i) { dims.push_back(dim); } VectorSumForward(const_logged_arr, count, dims, ce_losses); dtype ce_loss_sum = 0.0f; for (auto &ptr : ce_loss_arrs) { vector<dtype> vec = ptr->toCpu(); if (vec.size() != 1) { cerr << "vec size is not 1" << endl; abort(); } dtype l = vec.front() * factor; ce_loss_sum += l; } return ce_loss_sum; } __global__ void KernelKLCrossEntropyLoss(const dtype *const *vals, const dtype *const *answers, int count, int dim, dtype factor, dtype *const *losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype val = vals[count_i][dim_i]; dtype grad = -answers[count_i][dim_i] / val * factor; DeviceAtomicAdd(losses[count_i] + dim_i, grad); } } __global__ void KernelKLCrossEntropyLossVector(const dtype *const *in_vals, const dtype *const *answers, int count, int dim, dtype *const *result) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype in_val = in_vals[count_i][dim_i]; dtype v = -answers[count_i][dim_i] * cuda_log(in_val); result[count_i][dim_i] = v; } } dtype KLCrossEntropyLoss(const vector<dtype*> &vals, const vector<shared_ptr<vector<dtype>>> &answers, int count, int dim, dtype factor, const vector<dtype*> &losses) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray val_arr, loss_arr; val_arr.init((dtype**)vals.data(), count); loss_arr.init((dtype**)losses.data(), count); vector<shared_ptr<NumberArray>> answer_gpus; vector<dtype *> answer_gpu_pointers; for (auto &answer : answers) { shared_ptr<NumberArray> answer_gpu(new NumberArray); answer_gpu->init(answer->data(), answer->size()); answer_gpus.push_back(answer_gpu); answer_gpu_pointers.push_back(answer_gpu->value); } NumberPointerArray answer_arr; answer_arr.init((dtype**)answer_gpu_pointers.data(), count); hipLaunchKernelGGL(( KernelKLCrossEntropyLoss), dim3(block_count), dim3(TPB), 0, 0, val_arr.value, answer_arr.value, count, dim, factor, (dtype *const *)loss_arr.value); CheckCudaError(); vector<shared_ptr<NumberArray>> nums; vector<dtype *> logged_vec = GPUArrayVectors(nums, count, dim); NumberPointerArray logged_arr; logged_arr.init(logged_vec.data(), count); hipLaunchKernelGGL(( KernelKLCrossEntropyLossVector), dim3(block_count), dim3(TPB), 0, 0, val_arr.value, answer_arr.value, count, dim, (dtype *const *)logged_arr.value); CheckCudaError(); vector<shared_ptr<NumberArray>> ce_loss_arrs; vector<dtype *> ce_losses = GPUArrayVectors(ce_loss_arrs, count, 1); for (auto &ptr : ce_loss_arrs) { vector<dtype> vec = ptr->toCpu(); } vector<const dtype *> const_logged_arr; auto return_const = [](dtype *v) -> const dtype* { return const_cast<const dtype*>(v); }; transform(logged_vec.begin(), logged_vec.end(), back_inserter(const_logged_arr), return_const); vector<int> dims; for (int i = 0; i < count; ++i) { dims.push_back(dim); } VectorSumForward(const_logged_arr, count, dims, ce_losses); dtype ce_loss_sum = 0.0f; for (auto &ptr : ce_loss_arrs) { vector<dtype> vec = ptr->toCpu(); if (vec.size() != 1) { cerr << "vec size is not 1" << endl; abort(); } dtype l = vec.front() * factor; ce_loss_sum += l; } return ce_loss_sum; } __global__ void KernelMax(const dtype *const *v, int count, int dim, volatile dtype *block_maxes, volatile int *block_max_is, int *block_counters, int *max_indexes, dtype *max_vals) { __shared__ volatile dtype shared_max[TPB]; __shared__ volatile int shared_max_i[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_max[threadIdx.x] = offset < dim ? v[count_i][offset] : -1e10; shared_max_i[threadIdx.x] = offset; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x] < shared_max[threadIdx.x + i]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } int block_maxes_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_maxes[block_maxes_offset] = shared_max[0]; block_max_is[block_maxes_offset] = shared_max_i[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype max = -1e10; int max_i = 100000; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; if (block_maxes[offset] > max) { max = block_maxes[offset]; max_i = block_max_is[offset]; } } shared_max[threadIdx.x] = max; shared_max_i[threadIdx.x] = max_i; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x + i] > shared_max[threadIdx.x]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { max_vals[count_i] = shared_max[0]; max_indexes[count_i] = shared_max_i[0]; } } } __global__ void KernelSingleMax(const dtype *const *v, int count, int dim, int *max_indexes, dtype *max_vals) { for (int count_i = 0; count_i < count; ++count_i) { dtype max_val = -1e10; int max_i; for (int dim_i = 0; dim_i < dim; ++ dim_i) { if (v[count_i][dim_i] > max_val) { max_val = v[count_i][dim_i]; max_i = dim_i; } } max_indexes[count_i] = max_i; max_vals[count_i] = max_val; } } void Max(const dtype *const *v, int count, int dim, int *max_indexes, dtype *max_vals) { int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB); int block_y_count = (dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_maxes; block_maxes.init(block_y_count * count); IntArray block_max_is, block_counters; block_max_is.init(block_y_count * count); block_counters.init(count); hipLaunchKernelGGL(( KernelMax), dim3(block_dim), dim3(thread_count), 0, 0, v, count, dim, block_maxes.value, block_max_is.value, block_counters.value, max_indexes, max_vals); CheckCudaError(); #if TEST_CUDA NumberArray max_val_arr; IntArray max_indexer_arr; max_val_arr.init(count); max_indexer_arr.init(count); hipLaunchKernelGGL(( KernelSingleMax), dim3(1), dim3(1), 0, 0, v, count, dim, max_indexer_arr.value, max_val_arr.value); CheckCudaError(); vector<int> max_indexer_target(count), max_indexer_gold(count); MyCudaMemcpy(max_indexer_target.data(), max_indexes, count * sizeof(int), hipMemcpyDeviceToHost); MyCudaMemcpy(max_indexer_gold.data(), max_indexer_arr.value, count * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < count; ++i) { if (max_indexer_target.at(i) != max_indexer_gold.at(i)) { cerr << format("max_indexer_target:%1% max_indexer_gold:%2%") % max_indexer_target.at(i) % max_indexer_gold.at(i) << endl; PrintNums(v, i, dim); abort(); } } #endif CheckCudaError(); } vector<int> Predict(const vector<dtype*> &vals, int count, int dim) { NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray max_index_arr; max_index_arr.init(vals.size()); NumberArray max_val_arr; max_val_arr.init(vals.size()); Max(val_arr.value, count, dim, max_index_arr.value, max_val_arr.value); return max_index_arr.toCpu(); } __global__ void KernelSum(const dtype *const *v, int count, int dim, volatile dtype *block_sums, int *block_counters, dtype *sum_vals) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_sum[threadIdx.x] = offset < dim ? v[count_i][offset] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_sums[block_sums_offset] = shared_sum[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; sum += block_sums[offset]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { sum_vals[count_i] = shared_sum[0]; } } } void Sum(const dtype *const *v, int count, int dim, dtype *sum_vals) { int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB); int block_y_count = (dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_sums; block_sums.init(block_y_count * count); IntArray block_counters; block_counters.init(count); hipLaunchKernelGGL(( KernelSum), dim3(block_dim), dim3(thread_count), 0, 0, v, count, dim, block_sums.value, block_counters.value, sum_vals); CheckCudaError(); } __global__ void KernelSoftMaxLossByExp(const dtype *const *exps, int count, int dim, const dtype *const *vals, const dtype *sums, const dtype *max_vals, const int *answers, dtype reverse_batchsize, dtype *const *grads, dtype *losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype loss = exps[count_i][dim_i] / sums[count_i]; if (dim_i == answers[count_i]) { loss -= 1.0f; } grads[count_i][dim_i] = loss * reverse_batchsize; losses[count_i] = (cuda_log(sums[count_i]) - vals[count_i][answers[count_i]] + max_vals[count_i]) * reverse_batchsize; } } void SoftMaxLossByExp(const dtype *const *exps, int count, int dim, const dtype *const *vals, const dtype *sums, const dtype *max_vals, const int *answers, dtype reverse_batchsize, dtype *const *grads, dtype *losses) { int block_count = DefaultBlockCount(dim * count); hipLaunchKernelGGL(( KernelSoftMaxLossByExp), dim3(block_count), dim3(TPB), 0, 0, exps, count, dim, vals, sums, max_vals, answers, reverse_batchsize, (dtype *const *)grads, losses); CheckCudaError(); } __global__ void KernelMaxScalarForward(const dtype *const *v, int count, int* dims, int max_dim, volatile dtype *block_maxes, volatile int *block_max_is, int *block_counters, int *max_indexes, dtype *const *max_vals) { __shared__ volatile dtype shared_max[TPB]; __shared__ volatile int shared_max_i[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_max[threadIdx.x] = offset < dims[count_i] ? v[count_i][offset] : -1e10; shared_max_i[threadIdx.x] = offset; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x] < shared_max[threadIdx.x + i]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { int block_maxes_offset = blockIdx.x * gridDim.y + blockIdx.y; int max_ii = shared_max_i[0]; if (max_ii < 0 || max_ii >= max_dim) { printf("threadIdx.x == 0 after first reduce max_ii:%d v:%f\n", max_ii, shared_max[0]); for (int i = 0; i < TPB; ++i) { printf("shared_max[%d]:%f shared_max_i[%d]:%d\n", i, shared_max[i], i, shared_max_i[i]); } assert(false); } block_maxes[block_maxes_offset] = shared_max[0]; block_max_is[block_maxes_offset] = shared_max_i[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype max = -1e10; int max_i = 100000; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; int max_ii = block_max_is[offset]; if (max_ii < 0 || max_ii >= max_dim) { printf("offset:%d is_last_block block_maxes[offset]:%f block_max_is[offset]:%d\n", offset, block_maxes[offset], block_max_is[offset]); assert(false); } if (block_maxes[offset] > max) { max = block_maxes[offset]; max_i = block_max_is[offset]; } } shared_max[threadIdx.x] = max; shared_max_i[threadIdx.x] = max_i; // printf("max:%f max_i:%d\n", max, max_i); __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x + i] > shared_max[threadIdx.x]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { max_vals[count_i][0] = shared_max[0]; max_indexes[count_i] = shared_max_i[0]; int max_ii = max_indexes[count_i]; if (max_ii < 0 || max_ii >= max_dim) { printf("threadIdx.x == 0 max_i:%d count_i:%d max_val:%f\n", max_indexes[count_i], count_i, max_vals[count_i][0]); assert(false); } } } } void MaxScalarForward(const vector<const dtype*> &inputs, int count, const vector<int> &dims, vector<dtype*> &results, vector<int> &max_indexes) { int max_dim = *max_element(dims.begin(), dims.end()); int thread_count = min(NextTwoIntegerPowerNumber(max_dim), TPB); int block_y_count = (max_dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_maxes; block_maxes.init(block_y_count * count); IntArray block_max_is, block_counters; block_max_is.init(block_y_count * count); block_counters.init(count); NumberPointerArray input_arr; input_arr.init((dtype**)inputs.data(), inputs.size()); NumberPointerArray result_arr; result_arr.init((dtype**)results.data(), results.size()); IntArray max_index_arr; max_index_arr.init(max_indexes.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelMaxScalarForward), dim3(block_dim), dim3(thread_count), 0, 0, (const dtype *const *)input_arr.value, count, dim_arr.value, max_dim, block_maxes.value, block_max_is.value, block_counters.value, max_index_arr.value, (dtype *const *)result_arr.value); CheckCudaError(); MyCudaMemcpy(max_indexes.data(), max_index_arr.value, count * sizeof(int), hipMemcpyDeviceToHost); } __global__ void KernelMaxScalarBackward(const dtype *const *losses, const int *indexes, int count, dtype *const *input_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count; i += step) { DeviceAtomicAdd(input_losses[i] + indexes[i], losses[i][0]); } } void MaxScalarBackward(const vector<const dtype *> &losses, const vector<int> &indexes, int count, const vector<dtype*> &input_losses) { int block_count = DefaultBlockCount(count); NumberPointerArray loss_arr, input_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); input_loss_arr.init((dtype**)input_losses.data(), input_losses.size()); IntArray index_arr; index_arr.init((int*)indexes.data(), indexes.size()); hipLaunchKernelGGL(( KernelMaxScalarBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype *const *)loss_arr.value, index_arr.value, count, (dtype *const *)input_loss_arr.value); CheckCudaError(); } __global__ void KernelVectorSumForward(const dtype *const *v, int count, int *dims, volatile dtype *block_sums, int *block_counters, dtype *const *results) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_sum[threadIdx.x] = offset < dims[count_i] ? v[count_i][offset] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_sums[block_sums_offset] = shared_sum[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; sum += block_sums[offset]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { results[count_i][0] = shared_sum[0]; } } } void VectorSumForward(const vector<const dtype *> &inputs, int count, const vector<int> &dims, vector<dtype*> &results) { int max_dim = *max_element(dims.begin(), dims.end()); int thread_count = min(NextTwoIntegerPowerNumber(max_dim), TPB); int block_y_count = (max_dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_sums; block_sums.init(block_y_count * count); IntArray block_counters; block_counters.init(count); NumberPointerArray input_arr; input_arr.init((dtype**)inputs.data(), inputs.size()); NumberPointerArray result_arr; result_arr.init((dtype**)results.data(), results.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelVectorSumForward), dim3(block_dim), dim3(thread_count), 0, 0, (const dtype *const *)input_arr.value, count, dim_arr.value, block_sums.value, block_counters.value, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelVectorSumBackward(const dtype *const *losses, int count, int *dims, int max_dim, dtype *const *input_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { DeviceAtomicAdd(input_losses[count_i] + dim_i, losses[count_i][0]); } } } void VectorSumBackward(const vector<const dtype*> &losses, int count, const vector<int> &dims, vector<dtype*> &input_losses) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = DefaultBlockCount(count * max_dim); NumberPointerArray loss_arr, input_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); input_loss_arr.init((dtype**)input_losses.data(), input_losses.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelVectorSumBackward), dim3(block_count), dim3(TPB), 0, 0, (const dtype *const *)loss_arr.value, count, dim_arr.value, max_dim, (dtype *const *)input_loss_arr.value); CheckCudaError(); } __global__ void KernelScalarToVectorForward(const dtype* const* inputs, int count, int *dims, int max_dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { results[count_i][dim_i] = inputs[count_i][0]; } } } void ScalarToVectorForward(const vector<const dtype*> &inputs, int count, const vector<int> &dims, vector<dtype*> &results) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = DefaultBlockCount(max_dim * count); NumberPointerArray input_arr; input_arr.init((dtype**)inputs.data(), inputs.size()); NumberPointerArray result_arr; result_arr.init((dtype**)results.data(), inputs.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelScalarToVectorForward), dim3(block_count), dim3(TPB), 0, 0, (const dtype* const *)input_arr.value, count, dim_arr.value, max_dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelScalarToVectorBackward(const dtype *const *losses, int count, int *dims, volatile dtype *block_sums, int *block_counters, dtype *const *input_losses) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_sum[threadIdx.x] = offset < dims[count_i] ? losses[count_i][offset] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_sums[block_sums_offset] = shared_sum[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; sum += block_sums[offset]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { DeviceAtomicAdd(input_losses[count_i], shared_sum[0]); } } } void ScalarToVectorBackward(const vector<const dtype*> &losses, int count, const vector<int> &dims, vector<dtype*> &input_losses) { int max_dim = *max_element(dims.begin(), dims.end()); int thread_count = min(NextTwoIntegerPowerNumber(max_dim), TPB); int block_y_count = (max_dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_sums; block_sums.init(block_y_count * count); IntArray block_counters; block_counters.init(count); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); NumberPointerArray input_loss_arr; input_loss_arr.init((dtype**)input_losses.data(), input_losses.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); hipLaunchKernelGGL(( KernelScalarToVectorBackward), dim3(block_dim), dim3(thread_count), 0, 0, (const dtype *const *)loss_arr.value, count, dim_arr.value, block_sums.value, block_counters.value, (dtype *const *)input_loss_arr.value); CheckCudaError(); } __global__ void KernelBiasForward(const dtype *const *in_vals, const dtype *bias, int count, int dim, dtype *const *vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; vals[count_i][dim_i] = in_vals[count_i][dim_i] + bias[dim_i]; } } void BiasForward(const vector<dtype*> &in_vals, const dtype *bias, int count, int dim, const vector<dtype *> &vals) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray in_arr, val_arr; in_arr.init(in_vals.data(), in_vals.size()); val_arr.init(vals.data(), vals.size()); hipLaunchKernelGGL(( KernelBiasForward), dim3(block_count), dim3(TPB), 0, 0, in_arr.value, bias, count, dim, (dtype *const *)val_arr.value); } __global__ void KernelBiasBackward(const dtype *const *losses, int count, int dim, dtype *bias_losses, dtype *const *in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; DeviceAtomicAdd(bias_losses + dim_i, losses[count_i][dim_i]); DeviceAtomicAdd(in_losses[count_i] + dim_i, losses[count_i][dim_i]); } } void BiasBackward(const vector<dtype *> &losses, int count, int dim, dtype *bias_loss, const vector<dtype *> input_losses) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray loss_arr, input_loss_arr; loss_arr.init(losses.data(), losses.size()); input_loss_arr.init(input_losses.data(), input_losses.size()); hipLaunchKernelGGL(( KernelBiasBackward), dim3(block_count), dim3(TPB), 0, 0, loss_arr.value, count, dim, bias_loss, (dtype *const *)input_loss_arr.value); } __global__ void KernelSquareSum(const dtype *v, int len, volatile dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { is_last_block = false; } shared_sum[threadIdx.x] = 0.0f; for (int i = index; i < len; i += blockDim.x * gridDim.x) { shared_sum[threadIdx.x] += v[i] * v[i]; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, int len) { int block_count = DefaultBlockCount(len); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); hipLaunchKernelGGL(( KernelSquareSum), dim3(block_count), dim3(TPB), 0, 0, v, len, global_sum.value, block_counter.value, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelSquareSum(const dtype *v, const bool *indexers, int count, int dim, volatile dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { global_sum[blockIdx.x] = 0.0f; is_last_block = false; } int count_i = index / dim; if (index < count * dim && indexers[count_i]) { shared_sum[threadIdx.x] = v[index] * v[index]; } else { shared_sum[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { float sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, const bool *indexers, int count, int dim) { int block_count = DefaultBlockCountWithoutLimit(count * dim); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); hipLaunchKernelGGL(( KernelSquareSum), dim3(block_count), dim3(TPB), 0, 0, v, indexers, count, dim, global_sum.value, block_counter.value, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelRescale(dtype *v, int len, dtype scale) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i += step) { v[i] *= scale; } } void Rescale(dtype *v, int len, dtype scale) { int block_count = DefaultBlockCount(len); hipLaunchKernelGGL(( KernelRescale), dim3(block_count), dim3(TPB), 0, 0, v, len, scale); CheckCudaError(); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps, dtype x) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (!is_bias) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x; dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); dtype x = 1.0f / (1 - pow(belta1, iter + 1)); hipLaunchKernelGGL(( KernelUpdateAdam), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, is_bias, aux_mean, aux_square, iter, belta1, belta2, alpha, reg, eps, x); CheckCudaError(); } __global__ void KernelUpdateAdamW(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps, dtype x) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x; dtype square_plus_eps = aux_square[i] + eps; val[i] = (1 - (is_bias? 0.0f : reg)) * val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } void UpdateAdamW(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); dtype x = 1.0f / (1 - pow(belta1, iter + 1)); hipLaunchKernelGGL(( KernelUpdateAdamW), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, is_bias, aux_mean, aux_square, iter, belta1, belta2, alpha, reg, eps, x); CheckCudaError(); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / row; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iters[count_i] + 1)) / (1 - cuda_pow(belta1, iters[count_i] + 1)); dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } } __global__ void KernelSelfPlusIters(const bool *indexers, int *iters, int count) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count; i += step) { if (indexers[i]) { ++iters[i]; } } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); hipLaunchKernelGGL(( KernelUpdateAdam), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_mean, aux_square, indexers, iters, belta1, belta2, alpha, reg, eps); CheckCudaError(); block_count = DefaultBlockCount(col); hipLaunchKernelGGL(( KernelSelfPlusIters), dim3(block_count), dim3(TPB), 0, 0, indexers, iters, col); CheckCudaError(); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); hipLaunchKernelGGL(( KernelUpdateAdagrad), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_square, alpha, reg, eps); CheckCudaError(); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / col; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); hipLaunchKernelGGL(( KernelUpdateAdagrad), dim3(block_count), dim3(TPB), 0, 0, val, grad, row, col, aux_square, indexers, alpha, reg, eps); CheckCudaError(); } void *GraphHostAlloc() { void *m; CallCuda(hipHostMalloc(&m, 10000000, hipHostMallocWriteCombined)); if (m == NULL) { abort(); } return m; } }
a90a3f8992535709c8beec07497e8fea768abb14.cu
#include "N3LDG_cuda.h" #include <array> #include <boost/format.hpp> #include <cstdlib> #include <cstddef> #include <vector> #include <algorithm> #include <cmath> #include <cstdio> #include <cublas_v2.h> #include "Printf_cuda.cuh" #include "Printf_cuda.cu" #include "Memory_cuda.h" #include <curand.h> #include <curand_kernel.h> #include "cnmem.h" #include <string> #include <utility> #include <cstring> #include <cstdint> #include <chrono> #include <thread> #include <numeric> #include <memory> #include "profiler.h" #include "MyTensor-def.h" namespace n3ldg_cuda { using namespace std; using boost::format; #if USE_FLOAT #define cuda_sqrt(x) sqrtf(x) #define cuda_pow(x, y) powf(x, y) #define cuda_tanh(x) tanhf(x) #define cuda_exp(x) __expf(x) #define cuda_log(x) logf(x) #else #define cuda_sqrt(x) sqrt(x) #define cuda_pow(x, y) pow(x, y) #define cuda_tanh(x) tanh(x) #define cuda_exp(x) exp(x) #define cuda_log(x) log(x) #endif #define KERNEL_LOG #ifdef KERNEL_LOG #define KernelPrintLine(format, ...)\ {\ cuPrintf("block:x=%d,y=%d thread:x=%d,y=%d "#format"\n", blockIdx.x,\ blockIdx.y, threadIdx.x, threadIdx.y,__VA_ARGS__);\ } #else #define KernelPrintLine(format, ...) #endif constexpr int TPB = 1024; constexpr int BLOCK_COUNT = 56; void CallCuda(cudaError_t status) { if (status != cudaSuccess) { cerr << "cuda error:" << cudaGetErrorString(status) << endl; abort(); } } void CheckCudaError() { //cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { std::cerr << "cuda error:" << cudaGetErrorName(error) << std::endl; std::cerr << "cuda error:" << cudaGetErrorString(error) << std::endl; abort(); } } void CallCnmem(cnmemStatus_t status) { assert(status == CNMEM_STATUS_SUCCESS); } void CallCublas(cublasStatus_t status) { assert(status == CUBLAS_STATUS_SUCCESS); } void CallCurand(curandStatus status) { assert(status == CURAND_STATUS_SUCCESS); } cublasHandle_t& GetCublasHandle() { static cublasHandle_t handle; static bool init; if (!init) { init = true; CallCublas(cublasCreate(&handle)); } return handle; } cudaError_t MyCudaMemcpy(void *dest, const void *src, size_t count, cudaMemcpyKind kind) { cudaError_t e; e = cudaMemcpyAsync(dest, src, count, kind); CallCuda(e); return e; } int NextTwoIntegerPowerNumber(int number) { int result = 1; while (number > result) { result <<= 1; } return result; } template <> vector<bool> GPUArray<bool>::toCpu() const { bool *cpu_arr = new bool[len]; CallCuda(MyCudaMemcpy(cpu_arr, value, sizeof(bool) * len, cudaMemcpyDeviceToHost)); vector<bool> result; result.resize(len); for (int i = 0; i < len; ++i) { result.at(i) = cpu_arr[i]; } delete[] cpu_arr; return result; } void DeviceInt::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceInt::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(int), cudaMemcpyDeviceToHost)); } void DeviceInt::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, &v, sizeof(int), cudaMemcpyHostToDevice)); } DeviceInt::~DeviceInt() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void DeviceNumber::init() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); value = NULL; } CallCuda(MemoryPool::Ins().Malloc((void**)&value, sizeof(int))); } void DeviceNumber::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(&v, value, sizeof(dtype), cudaMemcpyDeviceToHost)); } DeviceNumber::~DeviceNumber() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor1D::init(int dim) { initOnDevice(dim); #if TEST_CUDA v = new dtype[dim]; zero(); #endif } void Tensor1D::initOnMemoryAndDevice(int dim) { initOnDevice(dim); v = new dtype[dim]; zero(); } void Tensor1D::initOnDevice(int dim) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, dim * sizeof(dtype))); this->dim = dim; } void Tensor1D::initOnMemory(int len) { v = new dtype[dim]; zero(); } Tensor1D::Tensor1D(const Tensor1D &t) { dim = t.dim; memcpy(v, t.v, dim *sizeof(dtype)); CallCuda(MyCudaMemcpy(value, t.value, dim * sizeof(dtype), cudaMemcpyDeviceToDevice)); } Tensor1D::~Tensor1D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor1D::print() const { cout << "dim:" << dim << endl; PrintNums(value, dim); } void Tensor1D::copyFromHostToDevice() { assert(v != NULL); assert(value != NULL); CallCuda(MyCudaMemcpy(value, v, dim * sizeof(dtype), cudaMemcpyHostToDevice)); } void Tensor1D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, dim * sizeof(dtype), cudaMemcpyDeviceToHost)); } __device__ int DeviceDefaultIndex(); __device__ int DeviceDefaultStep(); int DefaultBlockCount(int len); __global__ void KernelCheckIsNumber(const dtype *v, int dim, int *error) { if (threadIdx.x == 0 && blockIdx.x == 0) { *error = 0; } int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim; i += step) { if (v[i] != v[i]) { *error = 1; return; } } } void CheckIsNumber(const dtype *v, int dim) { int block_count = DefaultBlockCount(dim); DeviceInt error; error.init(); KernelCheckIsNumber<<<block_count, TPB>>>(v, dim, error.value); CheckCudaError(); error.copyFromDeviceToHost(); if (error.v != 0) { cerr << "nan checked!" << endl; abort(); } } void Tensor1D::checkIsNumber() const { n3ldg_cuda::CheckIsNumber(value, dim); } void Tensor2D::initOnMemoryAndDevice(int row, int col) { initOnDevice(row, col); v = new dtype[row * col]; zero(); } void Tensor2D::init(int row, int col) { initOnDevice(row, col); #if TEST_CUDA v = new dtype[row * col]; zero(); #endif } void Tensor2D::initOnDevice(int row, int col) { CallCuda(MemoryPool::Ins().Malloc((void**)&value, row * col * sizeof(dtype))); this->row = row; this->col = col; this->size = row * col; } Tensor2D::Tensor2D(const Tensor2D &t) { row = t.row; col = t.col; memcpy(v, t.v, sizeof(dtype) * row * col); CallCuda(MyCudaMemcpy(value, t.value, sizeof(dtype) * row * col, cudaMemcpyDeviceToDevice)); } Tensor2D::~Tensor2D() { if (value != NULL) { CallCuda(MemoryPool::Ins().Free(value)); } } void Tensor2D::print() const { cout << "row:" << row << " col:" << col << endl; PrintNums(value, size); } void Tensor2D::copyFromHostToDevice() { CallCuda(MyCudaMemcpy(value, v, size * sizeof(dtype), cudaMemcpyHostToDevice)); } void Tensor2D::copyFromDeviceToHost() { CallCuda(MyCudaMemcpy(v, value, size * sizeof(dtype), cudaMemcpyDeviceToHost)); } void Assert(bool v, const std::string &message, const function<void(void)> &call) { #if TEST_CUDA if (!v) { std::cerr << message << std::endl; call(); abort(); } #endif } __device__ void DeviceAtomicAdd(dtype* address, dtype value) { float old = value; float new_old; do { new_old = atomicExch(address, 0.0); new_old += old; } while ((old = atomicExch(address, new_old))!=0.0); }; __device__ dtype cuda_dexp(dtype y) { return y; } __device__ dtype cuda_dtanh(dtype y) { return 1.0f - y * y; } __device__ dtype cuda_sigmoid(dtype x) { return 1.0f / (1.0f + cuda_exp(-x)); } __device__ dtype cuda_dsigmoid(dtype y) { return y * (1.0f - y); } __device__ dtype cuda_relu(dtype x) { return x > 0.0f ? x : 0.0f; } __device__ dtype cuda_drelu(dtype x) { return x > 0.0f ? 1 : 0.0f; } __device__ dtype cuda_leaky_relu(dtype x) { return x > 0.0f ? x : -0.1f * x; } __device__ dtype cuda_dleaky_relu(dtype x) { return x > 0.0f ? 1.0f : -0.1f; } __device__ dtype cuda_dsqrt(dtype y) { return 0.5 / y; } const dtype SELU_LAMBDA = 1.0507009873554804934193349852946; const dtype SELU_ALPHA = 1.6732632423543772848170429916717; __device__ dtype cuda_selu(dtype x) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA * (cuda_exp(x) - 1.0f) : SELU_LAMBDA * x; } __device__ dtype cuda_dselu(dtype x, dtype y) { return x <= 0.0f ? SELU_LAMBDA * SELU_ALPHA + y : SELU_LAMBDA; } void Random(dtype *v, int len, dtype bound) { dtype *mem = (dtype*)malloc(len * sizeof(dtype)); assert(mem != NULL); dtype min = -bound, max = bound; for (int i = 0; i < len; i++) { mem[i] = (dtype(rand()) / RAND_MAX) * (max - min) + min; } CallCuda(MyCudaMemcpy(v, mem, len * sizeof(dtype), cudaMemcpyHostToDevice)); free(mem); } __device__ int DeviceDefaultIndex() { return blockIdx.x * blockDim.x + threadIdx.x; } __device__ int DeviceDefaultStep() { return gridDim.x * blockDim.x; } __device__ dtype DeviceAbs(dtype d) { return d > 0 ? d : -d; } int DefaultBlockCount(int len) { int block_count = (len - 1 + TPB) / TPB; return std::min(block_count, BLOCK_COUNT); } int DefaultBlockCountWithoutLimit(int len) { return (len - 1 + TPB) / TPB; } __global__ void KernelZero(dtype *v, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) { return; } v[index] = 0; } void Zero(dtype *v, int len) { int block_count = (len - 1 + TPB) / TPB; KernelZero<<<block_count, TPB>>>(v, len); CheckCudaError(); } __global__ void PrintPointers(void **p, int len) { for (int i = 0; i < len; ++i) { printf("%p\n", p[i]); } } __global__ void KernelPrintNums(const dtype* p, int len) { for (int i = 0; i < len; ++i) { printf("%d %f\n", i, p[i]); } } void PrintNums(const dtype* p, int len) { KernelPrintNums<<<1, 1>>>(p, len); cudaDeviceSynchronize(); CheckCudaError(); } __global__ void KernelPrintNums(const dtype *const *p, int index, int len) { for (int i = 0; i < len; ++i) { printf("%d %f\n", i, p[index][i]); } } void PrintNums(const dtype *const *p, int count_i, int len) { KernelPrintNums<<<1, 1>>>(p, count_i, len); cudaDeviceSynchronize(); CheckCudaError(); } __global__ void KernelPrintInts(const int* p, int len) { for (int i = 0; i < len; ++i) { printf("%d\n", p[i]); } } void PrintInts(const int* p, int len) { KernelPrintInts<<<1, 1>>>(p, len); cudaDeviceSynchronize(); CheckCudaError(); } void InitCuda(int device_id, float memory_in_gb) { std::cout << "device_id:" << device_id << std::endl; CallCuda(cudaSetDeviceFlags(cudaDeviceMapHost)); #if DEVICE_MEMORY == 0 cnmemDevice_t device; device.size = 10000000000; device.device = device_id; cnmemInit(1, &device, CNMEM_FLAGS_DEFAULT); #else CallCuda(cudaSetDevice(device_id)); #endif CallCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); CallCuda(cudaPrintfInit()); MemoryPool::Ins().Init(memory_in_gb); } void EndCuda() { cudaPrintfEnd(); Profiler::Ins().Print(); } __global__ void KernelCopyFromOneVectorToMultiVectors(const dtype *src, dtype *const *dest, int count, int len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * len; i += step) { int count_i = i / len; int len_i = i % len; dest[count_i][len_i] = src[i]; } } void CopyFromOneVectorToMultiVals(const dtype *src, std::vector<dtype*> &vals, int count, int len) { NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); int block_count = (len * count - 1 + TPB) / TPB; block_count = std::min(block_count, BLOCK_COUNT); KernelCopyFromOneVectorToMultiVectors<<<block_count, TPB>>>(src, (dtype *const *)val_arr.value, count, len); CheckCudaError(); } void CopyFromHostToDevice(const std::vector<dtype*> &src, std::vector<dtype*> &dest, int count, int dim) { dtype *long_src = (dtype*)malloc(count * dim * sizeof(dtype)); if (long_src == NULL) { std::cerr << "out of memory!" << std::endl; abort(); } for (int i = 0; i < count; ++i) { memcpy(long_src + i * dim, src.at(i), dim * sizeof(dtype)); } dtype *long_dest = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&long_dest, count * dim * sizeof(dtype*))); CallCuda(cudaMemcpy(long_dest, long_src, count * dim * sizeof(dtype*), cudaMemcpyHostToDevice)); CopyFromOneVectorToMultiVals(long_dest, dest, count, dim); free(long_src); CallCuda(MemoryPool::Ins().Free(long_dest)); } __global__ void KernelCopyFromMultiVectorsToOneVector(const dtype **src, dtype *dest, int count, int len) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * len; i += step) { int count_i = i / len; int len_i = i % len; dest[i] = src[count_i][len_i]; } } void CopyFromMultiVectorsToOneVector(const std::vector<dtype*> &src, dtype *dest, int count, int len) { NumberPointerArray src_arr; src_arr.init((dtype**)src.data(), src.size()); int block_count = DefaultBlockCount(len * count); KernelCopyFromMultiVectorsToOneVector<<<block_count, TPB>>>( (const dtype**)src_arr.value, dest, count, len); CheckCudaError(); } void CopyFromDeviceToHost(const std::vector<dtype*> &src, std::vector<dtype*> &dest, int count, int dim) { dtype *long_src = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&long_src, count * dim * sizeof(dtype*))); CopyFromMultiVectorsToOneVector(src, long_src, count, dim); dtype *long_dest = (dtype*)malloc(count * dim * sizeof(dtype)); if (long_dest == NULL) { std::cerr << "out of memory!" << std::endl; abort(); } CallCuda(cudaMemcpy(long_dest, long_src, count * dim * sizeof(dtype), cudaMemcpyDeviceToHost)); for (int i = 0; i < count; ++i) { memcpy(dest.at(i), long_dest + i * dim, dim * sizeof(dtype)); } CallCuda(MemoryPool::Ins().Free(long_src)); free(long_dest); } __global__ void KernelActivationForward(ActivatedEnum activated, const dtype *const *xs, int count, int *dims, int max_dim, dtype *const *ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < max_dim * count; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { if (activated == ActivatedEnum::TANH) { ys[count_i][dim_i] = cuda_tanh(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { ys[count_i][dim_i] = cuda_sigmoid(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::EXP) { ys[count_i][dim_i] = cuda_exp(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::RELU) { ys[count_i][dim_i] = cuda_relu(xs[count_i][dim_i]); } else if (activated == ActivatedEnum::SQRT) { ys[count_i][dim_i] = cuda_sqrt(xs[count_i][dim_i]); } else { printf("KernelActivationForward - error enum\n"); assert(false); } } } } void ActivationForward(ActivatedEnum activated, const std::vector<const dtype*> &xs, int count, const vector<int> &dims, std::vector<dtype*> &ys) { int max_dim = *max_element(dims.begin(), dims.end()); NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * max_dim); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelActivationForward<<<block_count, TPB>>>(activated, (const dtype* const *)x_arr.value, count, dim_arr.value, max_dim, (dtype *const *)y_arr.value); CheckCudaError(); } __global__ void KernelActivationBackward(ActivatedEnum activated, const dtype *const *losses, const dtype *const *vals, int count, int *dims, int max_dim, dtype* const* in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < max_dim * count; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { dtype l; if (activated == ActivatedEnum::TANH) { l = cuda_dtanh(vals[count_i][dim_i]); } else if (activated == ActivatedEnum::SIGMOID) { l = cuda_dsigmoid(vals[count_i][dim_i]); } else if (activated == ActivatedEnum::EXP) { l = cuda_dexp(vals[count_i][dim_i]); } else if (activated == ActivatedEnum::RELU) { l = cuda_drelu(vals[count_i][dim_i]); } else if (activated == ActivatedEnum::SQRT) { l = cuda_dsqrt(vals[count_i][dim_i]); } else { printf("KernelActivationBackward - error enum\n"); assert(false); } dtype v = l * losses[count_i][dim_i]; DeviceAtomicAdd(in_losses[count_i] + dim_i, v); } } } void ActivationBackward(ActivatedEnum activated, const std::vector<const dtype*> &losses, const std::vector<dtype*> &vals, int count, const vector<int> &dims, std::vector<dtype*> &in_losses) { int max_dim = *max_element(dims.begin(), dims.end()); NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * max_dim); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelActivationBackward<<<block_count, TPB>>>(activated, loss_arr.value, val_arr.value, count, dim_arr.value, max_dim, (dtype *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelDropoutForward(const dtype *const *xs, int count, int dim, bool is_training, const dtype* drop_mask, dtype drop_factor, dtype *const *ys) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (is_training) { if (drop_mask[i] < drop_factor) { ys[count_i][dim_i] = 0.0f; } else { ys[count_i][dim_i] = xs[count_i][dim_i]; } } else { ys[count_i][dim_i] = (1 - drop_factor) * xs[count_i][dim_i]; } } } void DropoutForward(const std::vector<dtype*> &xs, int count, int dim, bool is_training, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &ys) { if (drop_factor < 0 || drop_factor >= 1.0f) { std::cerr << "drop value is " << drop_factor << std::endl; abort(); } NumberPointerArray x_arr, y_arr; x_arr.init((dtype**)xs.data(), xs.size()); y_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); KernelDropoutForward<<<block_count, TPB>>>(x_arr.value, count, dim, is_training, drop_mask, drop_factor, (dtype *const *)y_arr.value); CheckCudaError(); } __global__ void KernelDropoutBackward(const dtype *const *losses, const dtype *const *vals, int count, int dim, bool is_training, const dtype* drop_mask, dtype drop_factor, dtype *const *in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; if (is_training) { if (drop_mask[i] >= drop_factor) { DeviceAtomicAdd(in_losses[count_i] + dim_i, losses[count_i][dim_i]); } } else { DeviceAtomicAdd(in_losses[count_i] + dim_i, (1 - drop_factor) * losses[count_i][dim_i]); } } } void DropoutBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &vals, int count, int dim, bool is_training, const dtype *drop_mask, dtype drop_factor, std::vector<dtype*> &in_losses) { if (drop_factor < 0 || drop_factor >= 1) { std::cerr << "drop value is " << drop_factor << std::endl; abort(); } NumberPointerArray loss_arr, val_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); val_arr.init((dtype**)vals.data(), vals.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int block_count = DefaultBlockCount(count * dim); KernelDropoutBackward<<<block_count, TPB>>>(loss_arr.value, val_arr.value, count, dim, is_training, drop_mask, drop_factor, (dtype *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelBucketForward(const dtype *input, int count, int dim, dtype *const *ys) { int index = DeviceDefaultIndex(); for (int i = index; i < count * dim; i+= DeviceDefaultStep()) { int count_i = i / dim; int dim_i = i % dim; ys[count_i][dim_i] = input[count_i * dim + dim_i]; } } void BucketForward(const std::vector<dtype> input, int count, int dim, std::vector<dtype*> &ys) { NumberArray input_arr; NumberPointerArray ys_arr; input_arr.init((dtype*)input.data(), input.size()); ys_arr.init((dtype**)ys.data(), ys.size()); int block_count = DefaultBlockCount(count * dim); KernelBucketForward<<<block_count, TPB>>>((const dtype*)input_arr.value, count, dim, (dtype *const *)ys_arr.value); CheckCudaError(); } __global__ void KernelCopyForUniNodeForward(const dtype** xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; int x_total_len = count * x_len; int b_total_len = count * b_len; for (int i = index; i < x_total_len + b_total_len; i += step) { if (i < x_total_len) { int count_i = i / x_len; int len_i = i % x_len; xs_dest[i] = xs[count_i][len_i]; } else if (use_b) { int b_i = i - x_total_len; int len_i = b_i % b_len; b_dest[b_i] = b[len_i]; } } } void CopyForUniNodeForward(const std::vector<dtype*> &xs, const dtype* b, dtype* xs_dest, dtype* b_dest, int count, int x_len, int b_len, bool use_b) { NumberPointerArray x_arr; x_arr.init((dtype**)xs.data(), xs.size()); int len = x_len + b_len; int block_count = std::min((count * len - 1 + TPB) / TPB, 56); KernelCopyForUniNodeForward<<<block_count, TPB>>>( (const dtype**)x_arr.value, (const dtype*)b, xs_dest, b_dest, count, x_len, b_len, use_b); CheckCudaError(); } void MatrixMultiplyMatrix(dtype *W, dtype *x, dtype *y, int row, int col, int count, bool useb, bool should_x_transpose, bool should_W_transpose) { cublasHandle_t &handle = GetCublasHandle(); dtype alpha = 1; dtype beta = useb? 1 : 0; cublasOperation_t x_op = should_x_transpose ? CUBLAS_OP_T : CUBLAS_OP_N; int ldx = should_x_transpose ? count : col; cublasOperation_t W_op = should_W_transpose ? CUBLAS_OP_T : CUBLAS_OP_N; int ldw = should_W_transpose ? col : row; #if USE_FLOAT CallCublas(cublasSgemm(handle, W_op, x_op, row, count, col, &alpha, W, ldw, x, ldx, &beta, y, row)); #else CallCublas(cublasDgemm(handle, W_op, x_op, row, count, col, &alpha, W, ldw, x, ldx, &beta, y, row)); #endif } __global__ void KernelVerify(dtype *host, dtype *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i += step) { dtype loss = host[index] - device[index]; if (DeviceAbs(loss) > 0.001 && DeviceAbs(loss) > 0.001 * DeviceAbs(host[index])) { *success = false; KernelPrintLine("KernelVerify: host:%f device:%f loss:%f", host[index], device[index], loss); } } } bool Verify(dtype *host, dtype *device, int len, const char* message) { NumberArray arr; arr.init(host, len); int block_count = DefaultBlockCount(len); char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), cudaMemcpyHostToDevice)); KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), cudaMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); cudaDeviceSynchronize(); cudaPrintfDisplay(stdout, true); if (!success) { cerr << message << endl; abort(); } return success; } __global__ void KernelVerify(bool *host, bool *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(bool *host, bool *device, int len, const char* message) { BoolArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, 8 * sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), cudaMemcpyHostToDevice)); KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), cudaMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); cudaDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } __global__ void KernelVerify(int *host, int *device, int len, const char *message, bool *success) { int index = DeviceDefaultIndex(); if (index < len) { if (host[index] != device[index]) { *success = false; printf("KernelVerify %s: host:%d device:%d \n", message, host[index], device[index]); KernelPrintLine("KernelVerify: host:%d device:%d", host[index], device[index]); } } } bool Verify(int *host, int *device, int len, const char* message) { IntArray arr; arr.init(host, len); int block_count = (len + TPB - 1) / TPB; char *m = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&m, (strlen(message) + 1) * sizeof(char))); CallCuda(MyCudaMemcpy(m, message, (strlen(message) + 1) * sizeof(char), cudaMemcpyHostToDevice)); bool success = true; bool *dev_success = NULL; CallCuda(MemoryPool::Ins().Malloc((void**)&dev_success, sizeof(bool))); CallCuda(MyCudaMemcpy(dev_success, &success, sizeof(bool), cudaMemcpyHostToDevice)); KernelVerify<<<block_count, TPB>>>(arr.value, device, len, m, dev_success); CheckCudaError(); CallCuda(MyCudaMemcpy(&success, dev_success, sizeof(bool), cudaMemcpyDeviceToHost)); MemoryPool::Ins().Free(dev_success); MemoryPool::Ins().Free(m); cudaDeviceSynchronize(); cudaPrintfDisplay(stdout, true); return success; } constexpr int MAX_BLOCK_POWER = 100; MemoryPool& MemoryPool::Ins() { static MemoryPool *p; if (p == NULL) { p = new MemoryPool; p->free_blocks_.resize(MAX_BLOCK_POWER + 1); p->busy_blocks_.reserve(10000); } return *p; } void appendFreeBlock(const MemoryBlock &memory_block, vector<map<void*, MemoryBlock>> &free_blocks, int i, const unordered_map<void*, MemoryBlock> &busy_blocks) { if (memory_block.size != (1 << i)) { cerr << boost::format("incorrect block size %1%, but i is %2%") % memory_block.size % i << endl; abort(); } free_blocks.at(i).insert(make_pair(memory_block.p, memory_block)); } cudaError_t MemoryPool::Malloc(void **p, int size) { assert(*p == NULL); Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Malloc"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemMalloc(p, size, NULL)); profiler.EndEvent(); return cudaSuccess; #elif DEVICE_MEMORY == 1 cudaError_t r = cudaMalloc(p, size); profiler.EndEvent(); return r; #else int fit_size = 1; int n = 0; while (fit_size < size) { fit_size <<= 1; ++n; } cudaError_t status = cudaErrorMemoryAllocation; while (status != cudaSuccess) { if (free_blocks_.at(n).empty()) { int higher_power = n + 1; while (higher_power <= MAX_BLOCK_POWER && free_blocks_.at(higher_power).empty()) { ++higher_power; } if (higher_power > MAX_BLOCK_POWER) { while (status != cudaSuccess) { status = cudaMalloc(p, fit_size); if (status != cudaSuccess) { abort(); } } CallCuda(status); MemoryBlock block(*p, fit_size); busy_blocks_.insert(std::make_pair(*p, block)); } else { auto &v = free_blocks_.at(higher_power); MemoryBlock &to_split = v.rbegin()->second; int half_size = to_split.size >> 1; void *half_address = static_cast<void*>(static_cast<char*>(to_split.p) + half_size); MemoryBlock low_block(to_split.p, half_size, to_split.buddy), high_block(half_address, half_size, to_split.p); v.erase(v.rbegin()->first); appendFreeBlock(low_block, free_blocks_, higher_power - 1, busy_blocks_); appendFreeBlock(high_block, free_blocks_, higher_power - 1, busy_blocks_); } } else { status = cudaSuccess; int this_size = free_blocks_.at(n).size(); MemoryBlock &block = free_blocks_.at(n).rbegin()->second; *p = block.p; busy_blocks_.insert(std::make_pair(block.p, block)); free_blocks_.at(n).erase(free_blocks_.at(n).rbegin()->first); } } profiler.EndEvent(); return status; #endif } std::pair<const MemoryBlock *, const MemoryBlock *> lowerAndhigherBlocks(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { cerr << "a.size is not equal to b.size" << endl; abort(); } int distance = static_cast<char*>(a.p) - static_cast<char*>(b.p); if (distance == 0) { cerr << "block a and b has the same address" << endl; abort(); } const MemoryBlock &low = distance > 0 ? b : a; const MemoryBlock &high = distance > 0 ? a : b; return std::make_pair(&low, &high); } bool isBuddies(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { return false; } auto pair = lowerAndhigherBlocks(a, b); return pair.second->buddy == pair.first->p && ((char*)pair.second->p - (char*)pair.first->p) == a.size; } MemoryBlock mergeBlocks(const MemoryBlock &a, const MemoryBlock &b) { if (a.size != b.size) { cerr << "sizes of memory blocks to merge not equal" << endl; abort(); } auto pair = lowerAndhigherBlocks(a, b); if ((char*)pair.second->p - (char*)pair.first->p != a.size || (a.p != b.buddy && a.buddy != b.p)) { cerr << "a and b are not buddies" << endl; cerr << boost::format("a:%1%\nb:%2%") % a.toString() % b.toString() << endl; abort(); } MemoryBlock block(pair.first->p, pair.first->size << 1, pair.first->buddy); return block; } void returnFreeBlock(const MemoryBlock &block, vector<map<void*, MemoryBlock>> &free_blocks, int power, const unordered_map<void*, MemoryBlock> &busy_blocks) { Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("returnFreeBlock"); MemoryBlock current_block = block; for (int i = power; i <= MAX_BLOCK_POWER; ++i) { map<void*, MemoryBlock> &v = free_blocks.at(i); void *free_p = (char*)current_block.p - (char*)current_block.buddy == current_block.size ? current_block.buddy : (void*)((char*)current_block.p + current_block.size); auto it = v.find(free_p); if (it == v.end() || (it->second.p != current_block.buddy && it->second.buddy != current_block.p)) { appendFreeBlock(current_block, free_blocks, i, busy_blocks); break; } else { MemoryBlock merged_block = mergeBlocks(it->second, current_block); current_block = merged_block; v.erase(it); } } profiler.EndEvent(); } cudaError_t MemoryPool::Free(void *p) { Profiler &profiler = Profiler::Ins(); profiler.BeginEvent("Free"); #if DEVICE_MEMORY == 0 CallCnmem(cnmemFree(p, NULL)); profiler.EndEvent(); #elif DEVICE_MEMORY == 1 cudaError_t r = cudaFree(p); profiler.EndEvent(); return r; #else auto it = busy_blocks_.find(p); if (it == busy_blocks_.end()) { cerr << "cannot find busy block " << p << endl; abort(); } int size = it->second.size; int n = 0; while (size > 1) { size >>= 1; ++n; } if (it->second.size != (1 << n)) { cerr << boost::format("size:%1% n:%2%") % it->second.size % n << endl; abort(); } auto block = it->second; busy_blocks_.erase(it); returnFreeBlock(block, free_blocks_, n, busy_blocks_); it = busy_blocks_.find(p); if (it != busy_blocks_.end()) { cerr << "can find erased block " << p << endl; abort(); } profiler.EndEvent(); if (busy_blocks_.find(p) != busy_blocks_.end()) { cerr << boost::format("Malloc - find freed p in busy blocks") << endl; } return cudaSuccess; #endif } void Profiler::EndCudaEvent() { //cudaDeviceSynchronize(); EndEvent(); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward( const dtype *lty, const dtype *lx, dtype *b, dtype *const *losses, int count, int out_dim, int in_dim, volatile dtype *block_sums, int *global_block_count, bool use_b) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (use_b) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } int lty_index = count_i * out_dim + dim_i; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } DeviceAtomicAdd(b + dim_i, sum); } } } } else { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i + count_i * in_dim; DeviceAtomicAdd(losses[count_i] + dim_i, lx[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForUniBackward(const dtype *lty, const dtype *lx, dtype *b, std::vector<dtype*> &losses, int count, int out_dim, int in_dim, bool use_b) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim, block_y, 1); NumberPointerArray loss_arr; loss_arr.init(losses.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); KernelAddLtyToParamBiasAndAddLxToInputLossesForUniBackward<<<block_dim, TPB>>>(lty, lx, b, (dtype *const *)loss_arr.value, count, out_dim, in_dim, block_sums.value, global_block_count_arr.value, use_b); CheckCudaError(); } __global__ void KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward( const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, dtype *const *losses1, dtype *const *losses2, int count, int out_dim, int in_dim1, int in_dim2, bool use_b, volatile dtype *block_sums, int *global_block_count) { __shared__ volatile dtype shared_arr[TPB]; int count_i = blockIdx.y * blockDim.x + threadIdx.x; int dim_i = blockIdx.x; if (dim_i < out_dim) { if (threadIdx.x == 0 && blockIdx.y == 0) { global_block_count[dim_i] = 0; } //int lty_index = dim_i * count + count_i; int lty_index = dim_i + count_i * out_dim; shared_arr[threadIdx.x] = count_i < count ? lty[lty_index] : 0.0f; __syncthreads(); for (int i = (TPB >> 1); i > 0; i>>=1) { if (threadIdx.x < i) { shared_arr[threadIdx.x] += shared_arr[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { block_sums[gridDim.y * blockIdx.x + blockIdx.y] = shared_arr[0]; if (atomicAdd(global_block_count + dim_i, 1) == gridDim.y - 1) { dtype sum = 0.0; for (int i = 0; i < gridDim.y; ++i) { sum += block_sums[gridDim.y * blockIdx.x + i]; } if (use_b) { DeviceAtomicAdd(b + dim_i, sum); } } } } else if (dim_i < out_dim + in_dim1) { if (count_i < count) { dim_i -= out_dim; int lx_index = dim_i + count_i * in_dim1; DeviceAtomicAdd(losses1[count_i] + dim_i, lx1[lx_index]); } } else { if (count_i < count) { dim_i -= (out_dim + in_dim1); int lx_index = dim_i + count_i * in_dim2; DeviceAtomicAdd(losses2[count_i] + dim_i, lx2[lx_index]); } } } void AddLtyToParamBiasAndAddLxToInputLossesForBiBackward(const dtype *lty, const dtype *lx1, const dtype *lx2, dtype *b, std::vector<dtype*> &losses1, std::vector<dtype*> &losses2, int count, int out_dim, int in_dim1, int in_dim2, bool use_b) { int block_y = (count - 1 + TPB) / TPB; dim3 block_dim(out_dim + in_dim1 + in_dim2, block_y, 1); NumberPointerArray loss1_arr; loss1_arr.init(losses1.data(), count); NumberPointerArray loss2_arr; loss2_arr.init(losses2.data(), count); Tensor1D block_sums; block_sums.init(block_y * out_dim); IntArray global_block_count_arr; global_block_count_arr.init(out_dim); KernelAddLtyToParamBiasAndAddLxToInputLossesForBiBackward<<<block_dim, TPB>>>(lty, lx1, lx2, b, (dtype *const *)loss1_arr.value, (dtype *const *)loss2_arr.value, count, out_dim, in_dim1, in_dim2, use_b, block_sums.value, global_block_count_arr.value); CheckCudaError(); } constexpr int MAX_BATCH_COUNT = 1000000; __global__ void KernelInitCurandStates(curandState_t *states) { int index = blockIdx.x * blockDim.x + threadIdx.x; int step = gridDim.x * blockDim.x; for (int i = index; i < MAX_BATCH_COUNT; i += step) { curand_init(0, i, 0, &states[i]); } } curandState_t *GetCurandStates() { static curandState_t *states; if (states == NULL) { MemoryPool &pool = MemoryPool::Ins(); CallCuda(pool.Malloc((void**)&states, sizeof(curandState_t) * MAX_BATCH_COUNT)); KernelInitCurandStates<<<BLOCK_COUNT, TPB>>>( states); CheckCudaError(); } return states; } curandGenerator_t &GetGenerator() { static curandGenerator_t gen; static bool init; if (!init) { CallCurand(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); CallCurand(curandSetPseudoRandomGeneratorSeed(gen, 0)); init = true; } return gen; } void CalculateDropoutMask(dtype drop_factor, int count, int dim, dtype* mask) { curandGenerator_t &gen = GetGenerator(); CallCurand(curandGenerateUniform(gen, mask, count * dim)); } __global__ void KernelConcatForward(const dtype *const *ins, int *in_dims, dtype *const *outs, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; dtype v = ins[count_i * in_count + offset_j][in_dim_i]; outs[count_i][out_dim_i] = v; } } void ConcatForward(const std::vector<dtype*> &in_vals, const std::vector<int> &in_dims, std::vector<dtype*> &vals, int count, int in_count, int out_dim) { int len = count * out_dim; int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_val_arr, val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); KernelConcatForward<<<block_count, TPB>>>(in_val_arr.value, in_dim_arr.value, (dtype *const *)val_arr.value, count, in_count, out_dim); CheckCudaError(); } __global__ void KernelConcatBackward(dtype *const *in_losses, int *in_dims, const dtype *const *out_losses, int count, int in_count, int out_dim) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < out_dim * count; i += step) { int out_dim_i = i % out_dim; int count_i = i / out_dim; int in_dim_sum = 0; int last_in_dim_sum; int offset_j = 0; for (int j = 0; j < in_count; ++j) { last_in_dim_sum = in_dim_sum; in_dim_sum += in_dims[j]; offset_j = j; if (out_dim_i < in_dim_sum) { break; } } int in_dim_i = out_dim_i - last_in_dim_sum; DeviceAtomicAdd(in_losses[count_i * in_count + offset_j] + in_dim_i, out_losses[count_i][out_dim_i]); } } void ConcatBackward(const std::vector<dtype*> &in_losses, const std::vector<int> &in_dims, std::vector<dtype*> &losses, int count, int in_count, int out_dim) { int len = count * out_dim; int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); NumberPointerArray in_loss_arr, loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_dim_arr; in_dim_arr.init((int*)in_dims.data(), in_dims.size()); KernelConcatBackward<<<block_count, TPB>>>((dtype *const *)in_loss_arr.value, in_dim_arr.value, loss_arr.value, count, in_count, out_dim); CheckCudaError(); } __global__ void KernelScalarConcatForward(const dtype *const *ins, int count, const int *dims, int max_dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < max_dim * count; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { results[count_i][dim_i] = ins[count_i * max_dim + dim_i][0]; } } } void ScalarConcatForward(const vector<dtype *> &ins, int count, const vector<int> &dims, int max_dim, const vector<dtype *> &results) { NumberPointerArray result_arr; result_arr.init((dtype**)results.data(), results.size()); NumberPointerArray in_arr; in_arr.init((dtype**)ins.data(), ins.size()); IntArray dim_arr; dim_arr.init((int *)dims.data(), dims.size()); int block_count = DefaultBlockCount(count * max_dim); KernelScalarConcatForward<<<block_count, TPB>>>(in_arr.value, count, dim_arr.value, max_dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelScalarConcatBackward(const dtype *const *losses, int count, const int *dims, int max_dim, dtype *const *input_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < max_dim * count; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { DeviceAtomicAdd(input_losses[count_i * max_dim + dim_i], losses[count_i][dim_i]); } } } void ScalarConcatBackward(const vector<dtype *> &losses, int count, const vector<int> &dims, int max_dim, const vector<dtype *> in_losses) { NumberPointerArray loss_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); in_loss_arr.init((dtype **)in_losses.data(), in_losses.size()); IntArray dim_arr; dim_arr.init((int *)dims.data(), dims.size()); int block_count = DefaultBlockCount(count * max_dim); KernelScalarConcatBackward<<<block_count, TPB>>>(loss_arr.value, count, dim_arr.value, max_dim, (dtype *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelMemset(dtype *p, int len, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(dtype *p, int len, dtype value) { int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); KernelMemset<<<block_count, TPB>>>(p, len, value); CheckCudaError(); } __global__ void KernelMemset(bool *p, int len, bool value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i+= step) { p[i] = value; } } void Memset(bool *p, int len, bool value) { int block_count = std::min(BLOCK_COUNT, (len - 1 + TPB) / TPB); KernelMemset<<<block_count, TPB>>>(p, len, value); CheckCudaError(); } void *Malloc(int size) { void *p; CallCuda(cudaMalloc(&p, size)); return p; } __global__ void KernelBatchMemset(dtype *const *p, int count, int *dims, int max_dim, dtype value) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < max_dim * count ; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { p[count_i][dim_i] = value; } } } void BatchMemset(const std::vector<dtype*> &vec, int count, const vector<int> &dims, dtype value) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = (count * max_dim -1 + TPB) / TPB; block_count = std::min(block_count, BLOCK_COUNT); NumberPointerArray vec_arr; vec_arr.init((dtype**)vec.data(), vec.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelBatchMemset<<<block_count, TPB>>>((dtype *const *)vec_arr.value, count, dim_arr.value, max_dim, value); CheckCudaError(); } __global__ void KernelLookupForward(const int *xids, const dtype *vocabulary, int count, int dim, dtype **vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (xid >= 0) { int voc_i = xid * dim + dim_i; vals[count_i][dim_i] = vocabulary[voc_i]; } else { vals[count_i][dim_i] = 0.0f; } } } void LookupForward(const std::vector<int> &xids, const dtype *vocabulary, int count, int dim, std::vector<dtype*> &vals) { int block_count = std::min(BLOCK_COUNT, (count * dim - 1 + TPB) / TPB); IntArray xid_arr; xid_arr.init((int*)xids.data(), xids.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); KernelLookupForward<<<block_count, TPB>>>(xid_arr.value, vocabulary, count, dim, const_cast<dtype**>(val_arr.value)); CheckCudaError(); } __global__ void KernelLookupBackward(const int *xids, const int *should_backward, const dtype** losses, int count, int dim, dtype *grad, bool *indexers) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (should_backward[count_i]) { if (dim_i == 0) { indexers[xid] = true; } DeviceAtomicAdd(grad + xid * dim + dim_i, losses[count_i][dim_i]); } } } void LookupBackward(const std::vector<int> &xids, const std::vector<int> &should_backward, const std::vector<dtype*> &losses, int count, int dim, dtype *grad, bool *indexers) { int block_count = std::min((count * dim - 1 + TPB) / TPB, BLOCK_COUNT); IntArray pl_arr; pl_arr.init((int*)xids.data(), xids.size()); IntArray xid_arr; xid_arr.init((int*)pl_arr.value, xids.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray should_backward_arr; should_backward_arr.init(should_backward.data(), should_backward.size()); KernelLookupBackward<<<block_count, TPB>>>( const_cast<const int *>(xid_arr.value), should_backward_arr.value, const_cast<const dtype**>(loss_arr.value), count, dim, grad, indexers); CheckCudaError(); } __global__ void KernelLookupBackward(const int *xids, int *should_backward, const dtype** losses, int count, int dim, dtype *grad) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int xid = xids[count_i]; if (should_backward[count_i]) { DeviceAtomicAdd(grad + xid * dim + dim_i, losses[count_i][dim_i]); } } } void LookupBackward(const std::vector<int> &xids, const vector<int> &should_backward, const std::vector<dtype*> &losses, int count, int dim, dtype *grad) { int block_count = std::min((count * dim - 1 + TPB) / TPB, BLOCK_COUNT); IntArray pl_arr; pl_arr.init((int*)xids.data(), xids.size()); IntArray xid_arr; xid_arr.init((int*)pl_arr.value, xids.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray should_backward_arr; should_backward_arr.init(should_backward.data(), should_backward.size()); KernelLookupBackward<<<block_count, TPB>>>( const_cast<const int *>(xid_arr.value), should_backward_arr.value, const_cast<const dtype**>(loss_arr.value), count, dim, grad); CheckCudaError(); } __global__ void KernelParamRowForward(const dtype *param, int row_index, int param_row_count, int count, int dim, dtype *const *vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; int param_offset = dim_i * param_row_count + row_index; vals[count_i][dim_i] = param[param_offset]; } } void ParamRowForward(const dtype *param, int row_index, int param_row_count, int count, int dim, vector<dtype*> &vals) { NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); int block_count = DefaultBlockCount(count * dim); KernelParamRowForward<<<block_count, TPB>>>(param, row_index, param_row_count, count, dim, (dtype *const *)val_arr.value); CheckCudaError(); } __global__ void KernelPoolForward(PoolingEnum pooling, const dtype *const *ins, int *in_counts, int max_in_count, dtype *const *outs, int count, int dim, int* hit_inputs) { __shared__ volatile extern dtype pool_shared_arr[]; volatile dtype* shared_indexers = pool_shared_arr + blockDim.x; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = ins[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = pooling == PoolingEnum::MAX ? -1e10 : 1e10; } shared_indexers[threadIdx.x] = threadIdx.x; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; if (pooling == PoolingEnum::MAX) { if (pool_shared_arr[threadIdx.x] < pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } else { if (pool_shared_arr[threadIdx.x] > pool_shared_arr[plus_i]) { pool_shared_arr[threadIdx.x] = pool_shared_arr[plus_i]; shared_indexers[threadIdx.x] = shared_indexers[plus_i]; } } } __syncthreads(); } if (threadIdx.x == 0) { hit_inputs[batch_i * dim + dim_i] = shared_indexers[0]; outs[batch_i][dim_i] = pool_shared_arr[0]; } } void PoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, std::vector<dtype*> &vals, int count, const std::vector<int> &in_counts, int dim, int *hit_inputs) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); KernelPoolForward<<<block_dim, thread_count, thread_count * 2 * sizeof(dtype)>>>(pooling, in_val_arr.value, in_count_arr.value, max_in_count, (dtype *const *)val_arr.value, count, dim, hit_inputs); CheckCudaError(); } __global__ void KernelPoolBackward(const dtype *const * losses, const int *hit_inputs, int max_in_count, int count, int dim, dtype *const *in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; int input_i = hit_inputs[i]; dtype loss = losses[count_i][dim_i]; DeviceAtomicAdd(in_losses[count_i * max_in_count + input_i] + dim_i, loss); } } void PoolBackward(const std::vector<dtype*> &losses, std::vector<dtype*> &in_losses, const std::vector<int> &in_counts, const int *hit_inputs, int count, int dim) { NumberPointerArray loss_arr, in_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int block_count = (count * dim - 1 + TPB) / TPB; block_count = std::min(block_count, BLOCK_COUNT); KernelPoolBackward<<<block_count, TPB>>>((const dtype**)loss_arr.value, hit_inputs, max_in_count, count, dim, (dtype *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelSumPoolForward(PoolingEnum pooling, const dtype *const *in_vals, int count, int dim, const int *in_counts, int max_in_count, dtype *const *vals) { __shared__ volatile extern dtype pool_shared_arr[]; int batch_i = blockIdx.y; int in_count = in_counts[batch_i]; int in_count_i = threadIdx.x; int dim_i = blockIdx.x; if (in_count_i < in_count) { pool_shared_arr[threadIdx.x] = in_vals[batch_i * max_in_count + in_count_i][dim_i]; } else { pool_shared_arr[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0;i >>=1) { if (threadIdx.x < i) { int plus_i = threadIdx.x + i; pool_shared_arr[threadIdx.x] += pool_shared_arr[plus_i]; } __syncthreads(); } if (threadIdx.x == 0) { vals[batch_i][dim_i] = pooling == PoolingEnum::SUM ? pool_shared_arr[0] : pool_shared_arr[0] / in_counts[batch_i]; } } void SumPoolForward(PoolingEnum pooling, const std::vector<dtype*> &in_vals, int count, int dim, const std::vector<int> &in_counts, std::vector<dtype*> &vals) { int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); int thread_count = 8; while (max_in_count > thread_count) { thread_count <<= 1; } dim3 block_dim(dim, count, 1); NumberPointerArray in_val_arr; in_val_arr.init((dtype**)in_vals.data(), in_vals.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); KernelSumPoolForward<<<block_dim, thread_count, thread_count * sizeof(dtype)>>>(pooling, in_val_arr.value, count, dim, in_count_arr.value, max_in_count, (dtype *const *)val_arr.value); CheckCudaError(); } __global__ void KernelSumBackward(PoolingEnum pooling, const dtype *const *losses, const int *in_counts, int max_in_count, int count, int dim, dtype *const *in_losses) { int global_in_count_i = blockIdx.x * max_in_count + blockIdx.y; for (int i = threadIdx.x; i < dim; i += blockDim.x) { if (blockIdx.y < in_counts[blockIdx.x]) { DeviceAtomicAdd(in_losses[global_in_count_i] + i, pooling == PoolingEnum::SUM ? losses[blockIdx.x][i] : losses[blockIdx.x][i] / in_counts[blockIdx.x]); } } } void SumPoolBackward(PoolingEnum pooling, const std::vector<dtype*> &losses, const std::vector<int> &in_counts, int count, int dim, std::vector<dtype*> &in_losses) { int thread_count = 8; while (thread_count < dim) { thread_count <<= 1; } thread_count = std::min(TPB, thread_count); int max_in_count = *std::max_element(in_counts.begin(), in_counts.end()); dim3 block_dim(count, max_in_count, 1); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray in_count_arr; in_count_arr.init((int*)in_counts.data(), in_counts.size()); NumberPointerArray in_loss_arr; in_loss_arr.init((dtype**)in_losses.data(), in_losses.size()); KernelSumBackward<<<block_dim, thread_count>>>(pooling, loss_arr.value, (const int*)in_count_arr.value, max_in_count, count, dim, (dtype *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelPMultiForward(const dtype **ins1, const dtype **ins2, int count, int dim, dtype *const *vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; vals[count_i][dim_i] = ins1[count_i][dim_i] * ins2[count_i][dim_i]; } } void PMultiForward(const std::vector<dtype*> &ins1, const std::vector<dtype*> &ins2, int count, int dim, std::vector<dtype*> &vals) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray ins1_arr, ins2_arr, vals_arr; ins1_arr.init((dtype**)ins1.data(), count); ins2_arr.init((dtype**)ins2.data(), count); vals_arr.init((dtype**)vals.data(), count); KernelPMultiForward<<<block_count, TPB>>>(ins1_arr.value, ins2_arr.value, count, dim, (dtype *const *)vals_arr.value); CheckCudaError(); } __global__ void KernelDivForward(const dtype *const *numerators, const dtype *const *denominators, int count, int *dims, int max_dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { results[count_i][dim_i] = numerators[count_i][dim_i] / denominators[count_i][0]; } } } void DivForward(const vector<const dtype*> numerators, const vector<const dtype*> denominators, int count, const vector<int> &dims, vector<dtype*> &results) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = DefaultBlockCount(count * max_dim); NumberPointerArray numerator_arr, denominator_arr, result_arr; numerator_arr.init((dtype**)numerators.data(), count); denominator_arr.init((dtype**)denominators.data(), count); result_arr.init((dtype**)results.data(), count); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelDivForward<<<block_count, TPB>>>(numerator_arr.value, denominator_arr.value, count, dim_arr.value, max_dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelDivNumeratorBackward(const dtype *const *losses, const dtype *const *denominator_vals, int count, int *dims, int max_dim, dtype *const *numerator_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { DeviceAtomicAdd(numerator_losses[count_i] + dim_i, losses[count_i][dim_i] / denominator_vals[count_i][0]); } } } __global__ void KernelDivDenominatorBackward(const dtype *const *losses, const dtype *const *numerator_vals, const dtype *const *denominator_vals, int count, int *dims, volatile dtype *block_sums, int *block_counters, dtype *const *denominator_losses) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; __shared__ volatile dtype square; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } int count_i = blockIdx.x; if (threadIdx.x == 0) { is_last_block = false; square = denominator_vals[count_i][0] * denominator_vals[count_i][0]; } __syncthreads(); int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_sum[threadIdx.x] = offset < dims[count_i] ? losses[count_i][offset] * numerator_vals[count_i][offset] / square : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_sums[block_sums_offset] = shared_sum[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; sum += block_sums[offset]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { DeviceAtomicAdd(denominator_losses[count_i], -shared_sum[0]); } } } void DivBackward(const vector<const dtype*> &losses, const vector<const dtype*> &denominator_vals, const vector<const dtype*> &numerator_vals, int count, const vector<int> &dims, vector<dtype*> &numerator_losses, vector<dtype*> &denominator_losses) { int max_dim = *max_element(dims.begin(), dims.end()); NumberPointerArray loss_arr, denominator_val_arr, numerator_val_arr, numerator_loss_arr, denominator_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); denominator_val_arr.init((dtype**)denominator_vals.data(), denominator_vals.size()); numerator_val_arr.init((dtype**)numerator_vals.data(), numerator_vals.size()); numerator_loss_arr.init((dtype**)numerator_losses.data(), numerator_losses.size()); denominator_loss_arr.init((dtype**)denominator_losses.data(), denominator_losses.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); int block_count = DefaultBlockCount(count * max_dim); KernelDivNumeratorBackward<<<block_count, TPB>>>(loss_arr.value, denominator_val_arr.value, count, dim_arr.value, max_dim, (dtype *const *)numerator_loss_arr.value); CheckCudaError(); int thread_count = min(NextTwoIntegerPowerNumber(max_dim), TPB); int block_y_count = (max_dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_sums; block_sums.init(block_y_count * count); IntArray block_counters; block_counters.init(count); KernelDivDenominatorBackward<<<block_dim , thread_count>>>(loss_arr.value, numerator_val_arr.value, denominator_val_arr.value, count, dim_arr.value, block_sums.value, block_counters.value, (dtype *const *)denominator_loss_arr.value); CheckCudaError(); } __global__ void KernelFullDivForward(const dtype *const *numerators, const dtype *const *denominators, int count, int dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; results[count_i][dim_i] = numerators[count_i][dim_i] / denominators[count_i][dim_i]; } } void FullDivForward(const vector<const dtype*> numerators, const vector<const dtype*> denominators, int count, int dim, vector<dtype*> &results) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray numerator_arr, denominator_arr, result_arr; numerator_arr.init((dtype**)numerators.data(), count); denominator_arr.init((dtype**)denominators.data(), count); result_arr.init((dtype**)results.data(), count); KernelFullDivForward<<<block_count, TPB>>>(numerator_arr.value, denominator_arr.value, count, dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelFullDivBackward(const dtype *const *losses, const dtype *const *numerator_vals, const dtype *const *denominator_vals, int count, int dim, dtype *const *numerator_losses, dtype *const *denominator_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; DeviceAtomicAdd(numerator_losses[count_i] + dim_i, losses[count_i][dim_i] / denominator_vals[count_i][dim_i]); DeviceAtomicAdd(denominator_losses[count_i] + dim_i, -losses[count_i][dim_i] * numerator_vals[count_i][dim_i] / (denominator_vals[count_i][dim_i] * denominator_vals[count_i][dim_i])); } } void FullDivBackward(const vector<const dtype*> &losses, const vector<const dtype*> &denominator_vals, const vector<const dtype*> &numerator_vals, int count, int dim, vector<dtype*> &numerator_losses, vector<dtype*> &denominator_losses) { NumberPointerArray loss_arr, denominator_val_arr, numerator_val_arr, numerator_loss_arr, denominator_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); denominator_val_arr.init((dtype**)denominator_vals.data(), denominator_vals.size()); numerator_val_arr.init((dtype**)numerator_vals.data(), numerator_vals.size()); numerator_loss_arr.init((dtype**)numerator_losses.data(), numerator_losses.size()); denominator_loss_arr.init((dtype**)denominator_losses.data(), denominator_losses.size()); int block_count = DefaultBlockCount(count * dim); KernelFullDivBackward<<<block_count, TPB>>>(loss_arr.value, numerator_val_arr.value, denominator_val_arr.value, count, dim, (dtype *const *)numerator_loss_arr.value, (dtype *const *)denominator_loss_arr.value); CheckCudaError(); } __global__ void KernelSplitForward(const dtype *const *inputs, const int *offsets, int count, int dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int offset = offsets[count_i]; results[count_i][dim_i] = inputs[count_i][offset + dim_i]; } } void SplitForward(const vector<const dtype*> &inputs, const vector<int> &offsets, int count, int dim, vector<dtype*> &results) { NumberPointerArray input_arr, result_arr; input_arr.init((dtype**)inputs.data(), inputs.size()); result_arr.init((dtype**)results.data(), results.size()); IntArray offset_arr; offset_arr.init((int*)offsets.data(), offsets.size()); int block_count = DefaultBlockCount(count * dim); KernelSplitForward<<<block_count, TPB>>>(input_arr.value, offset_arr.value, count, dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelSplitBackward(const dtype *const *losses, const int *offsets, int count, int dim, dtype *const *input_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; int offset = offsets[count_i]; DeviceAtomicAdd(input_losses[count_i] + offset + dim_i, losses[count_i][dim_i]); } } void SplitBackward(const vector<const dtype*> &losses, const vector<int> offsets, int count, int dim, const vector<dtype*> &input_losses) { NumberPointerArray loss_arr, input_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); input_loss_arr.init((dtype**)input_losses.data(), input_losses.size()); IntArray offset_arr; offset_arr.init((int*)offsets.data(), offsets.size()); int block_count = DefaultBlockCount(count * dim); KernelSplitBackward<<<block_count, TPB>>>(loss_arr.value, offset_arr.value, count, dim, (dtype *const *)input_loss_arr.value); CheckCudaError(); } __global__ void KernelSubForward(const dtype *const *minuend, const dtype *const *subtrahend, int count, int *dims, int max_dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { results[count_i][dim_i] = minuend[count_i][dim_i] - subtrahend[count_i][dim_i]; } } } void SubForward(const std::vector<const dtype*> &minuend, const std::vector<const dtype*> &subtrahend, int count, const vector<int> &dims, std::vector<dtype*> &results) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = DefaultBlockCount(count * max_dim); NumberPointerArray minuend_arr, subtrahend_arr, result_arr; minuend_arr.init((dtype**)minuend.data(), count); subtrahend_arr.init((dtype**)subtrahend.data(), count); result_arr.init((dtype**)results.data(), count); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelSubForward<<<block_count, TPB>>>((const dtype* const*)minuend_arr.value, (const dtype *const *)subtrahend_arr.value, count, dim_arr.value, max_dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelSubBackward(const dtype *const *losses, int count, int *dims, int max_dim, dtype *const *minuend_losses, dtype *const *subtrahend_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { DeviceAtomicAdd(minuend_losses[count_i] + dim_i, losses[count_i][dim_i]); DeviceAtomicAdd(subtrahend_losses[count_i] + dim_i, -losses[count_i][dim_i]); } } } void SubBackward(const std::vector<const dtype*> &losses, int count, const vector<int> &dims, std::vector<dtype*> &minuend_losses, std::vector<dtype*> &subtrahend_losses) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = DefaultBlockCount(count * max_dim); NumberPointerArray loss_arr, minuend_loss_arr, subtrahend_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); minuend_loss_arr.init((dtype**)minuend_losses.data(), minuend_losses.size()); subtrahend_loss_arr.init((dtype**)subtrahend_losses.data(), subtrahend_losses.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelSubBackward<<<block_count, TPB>>>((const dtype *const *)loss_arr.value, count, dim_arr.value, max_dim, (dtype *const *)minuend_loss_arr.value, (dtype *const *)subtrahend_loss_arr.value); CheckCudaError(); } __global__ void KernelPMultiBackward(const dtype **losses, const dtype *const *in_vals1, const dtype *const *in_vals2, int count, int dim, dtype *const *in_losses1, dtype *const *in_losses2) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; DeviceAtomicAdd(in_losses1[count_i] + dim_i, losses[count_i][dim_i] * in_vals2[count_i][dim_i]); DeviceAtomicAdd(in_losses2[count_i] + dim_i, losses[count_i][dim_i] * in_vals1[count_i][dim_i]); } } void PMultiBackward(const std::vector<dtype*> &losses, const std::vector<dtype*> &in_vals1, const std::vector<dtype*> &in_vals2, int count, int dim, std::vector<dtype*> &in_losses1, std::vector<dtype*> &in_losses2) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray losses_arr, in_vals1_arr, in_vals2_arr, in_losses1_arr, in_losses2_arr; losses_arr.init((dtype**)losses.data(), losses.size()); in_vals1_arr.init((dtype**)in_vals1.data(), in_vals1.size()); in_vals2_arr.init((dtype**)in_vals2.data(), in_vals2.size()); in_losses1_arr.init((dtype**)in_losses1.data(), in_losses1.size()); in_losses2_arr.init((dtype**)in_losses2.data(), in_losses2.size()); KernelPMultiBackward<<<block_count, TPB>>>(losses_arr.value, in_vals1_arr.value, in_vals2_arr.value, count, dim, (dtype *const *)in_losses1_arr.value, (dtype *const *)in_losses2_arr.value); CheckCudaError(); } __global__ void KernelPAddForward(const dtype *const *const *ins, int count, int dim, int in_count, dtype *const *vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i+= step) { int count_i = i / dim; int dim_i = i % dim; dtype sum = ins[0][count_i][dim_i]; for (int j = 1; j < in_count; ++j) { sum += ins[j][count_i][dim_i]; } vals[count_i][dim_i] = sum; } } void PAddForward(const std::vector<std::vector<dtype*>> &ins, int count, int dim, int in_count, std::vector<dtype*> &vals) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(ins.size()); for (const std::vector<dtype*> &x : ins) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> ins_gpu; ins_gpu.reserve(ins.size()); for (auto &ptr : gpu_addr) { ins_gpu.push_back((dtype**)ptr->value); } NumberPointerPointerArray in_arr; in_arr.init(ins_gpu.data(), ins_gpu.size()); NumberPointerArray out_arr; out_arr.init(vals.data(), vals.size()); int block_count = DefaultBlockCount(count * dim); KernelPAddForward<<<block_count, TPB>>>(in_arr.value, count, dim, in_count, (dtype *const *)out_arr.value); CheckCudaError(); } __global__ void KernelPAddBackward(const dtype **losses, int count, int dim, int in_count, dtype *const *const *in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int dim_mul_count = dim * count; for (int i = index; i < dim_mul_count * in_count; i += step) { int in_count_i = i / dim_mul_count; int dim_mul_count_i = i % dim_mul_count; int count_i = dim_mul_count_i / dim; int dim_i = dim_mul_count_i % dim; DeviceAtomicAdd(in_losses[in_count_i][count_i] + dim_i, losses[count_i][dim_i]); } } void PAddBackward(const std::vector<dtype*> &losses, int count, int dim, int in_count, std::vector<std::vector<dtype*>> &in_losses) { std::vector<std::shared_ptr<NumberPointerArray>> gpu_addr; gpu_addr.reserve(in_losses.size()); for (const std::vector<dtype*> &x : in_losses) { std::shared_ptr<NumberPointerArray> arr = std::make_shared<NumberPointerArray>(); arr->init((dtype**)x.data(), x.size()); gpu_addr.push_back(arr); } std::vector<dtype**> in_losses_gpu; in_losses_gpu.reserve(in_losses.size()); for (auto &ptr : gpu_addr) { in_losses_gpu.push_back((dtype **)ptr->value); } NumberPointerPointerArray in_loss_arr; in_loss_arr.init(in_losses_gpu.data(), in_losses_gpu.size()); NumberPointerArray out_loss_arr; out_loss_arr.init((dtype**)losses.data(), losses.size()); int block_count = DefaultBlockCount(in_count * count * dim); KernelPAddBackward<<<block_count, TPB>>>(out_loss_arr.value, count, dim, in_count, (dtype *const *const *)in_loss_arr.value); CheckCudaError(); } __global__ void KernelSoftMaxLoss(const dtype **vals, dtype **losses, int *correct_count, int *answers, int batchsize, int count, int dim) { volatile __shared__ int opt_label; volatile __shared__ dtype shared_val[TPB]; volatile __shared__ int64_t max_indexes[TPB]; volatile __shared__ dtype scores_sum[TPB]; volatile __shared__ dtype scores[TPB]; int dim_i = threadIdx.x; int count_i = blockIdx.x; if (count_i == 0 && dim_i == 0) { *correct_count = 0; } shared_val[dim_i] = dim_i < dim ? vals[count_i][dim_i] : -1e10; max_indexes[dim_i] = dim_i; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (shared_val[threadIdx.x + i] > shared_val[threadIdx.x]) { // race shared_val[threadIdx.x] = shared_val[threadIdx.x + i]; // race max_indexes[threadIdx.x] = max_indexes[threadIdx.x + i]; // race } __syncthreads(); } if (threadIdx.x == 0) { opt_label = max_indexes[0]; if (answers[count_i] == opt_label) { atomicAdd(correct_count, 1); } } __syncthreads(); dtype max_score = vals[count_i][opt_label]; dtype score = dim_i < dim ? cuda_exp(vals[count_i][dim_i] - max_score) : 0.0f; scores[dim_i] = score; scores_sum[dim_i] = score; for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { scores_sum[threadIdx.x] = scores_sum[threadIdx.x] + scores_sum[threadIdx.x + i]; // race __syncthreads(); } if (dim_i < dim) { losses[count_i][dim_i] = (scores[dim_i] / scores_sum[0] - (dim_i == answers[count_i] ? 1 : 0)) / batchsize; } } void SoftMaxLoss(const std::vector<dtype*> &vals, std::vector<dtype*> &losses, int *correct_count, const std::vector<int> &answers, int batchsize, int count, int dim) { if (dim > TPB) { abort(); } int thread_count = NextTwoIntegerPowerNumber(dim); NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); IntArray answer_arr; answer_arr.init((int*)answers.data(), answers.size()); KernelSoftMaxLoss<<<count, thread_count>>>( const_cast<const dtype **>(val_arr.value), const_cast<dtype **>(loss_arr.value), correct_count, answer_arr.value, batchsize, count, dim); CheckCudaError(); } __global__ void KernelCrossEntropyLoss(const dtype *const *vals, const int *answers, int count, dtype factor, dtype *const *losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count; i += step) { int answer = answers[i]; DeviceAtomicAdd(losses[i] + answer, - 1 / vals[i][answer] * factor); } } __global__ void KernelCrossEntropgyLossValue(const dtype *const *vals, const int *answers, int count, volatile dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { is_last_block = false; } shared_sum[threadIdx.x] = 0.0f; for (int i = index; i < count; i += blockDim.x * gridDim.x) { int answer_offset = answers[i]; shared_sum[threadIdx.x] -= cuda_log(vals[i][answer_offset]); } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype CrossEntropyLoss(const vector<dtype *> &vals, const vector<int> &answers, int count, dtype factor, vector<dtype *> &losses) { NumberPointerArray val_arr, loss_arr; val_arr.init((dtype**)vals.data(), vals.size()); loss_arr.init((dtype**)losses.data(), losses.size()); IntArray answer_arr; answer_arr.init((int*)answers.data(), answers.size()); KernelCrossEntropyLoss<<<DefaultBlockCount(count), TPB>>>(val_arr.value, answer_arr.value, count, factor, (dtype *const *)loss_arr.value); CheckCudaError(); int block_count = DefaultBlockCount(count); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); KernelCrossEntropgyLossValue<<<block_count, TPB>>>(val_arr.value, answer_arr.value, count, global_sum.value, block_counter.value, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v * factor; } __global__ void KernelMultiCrossEntropyLoss(const dtype *const *vals, const int *const *answers, int count, int dim, dtype factor, dtype *const *losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype val = vals[count_i][dim_i]; dtype grad = (answers[count_i][dim_i] ? (-1 / val) : (1 / (1 - val))) * factor; DeviceAtomicAdd(losses[count_i] + dim_i, grad); } } __global__ void KernelMultiCrossEntropyLossVector(const dtype *const *in_vals, const int *const *answers, int count, int dim, dtype *const *result) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype in_val = in_vals[count_i][dim_i]; dtype v = answers[count_i][dim_i] ? -cuda_log(in_val) : -cuda_log(1 - in_val); result[count_i][dim_i] = v; } } template<typename T> vector<T *> GPUArrayVectors(vector<shared_ptr<GPUArray<T>>> &ptrs, int count, int dim) { vector<T *> result; for (int i = 0; i < count; ++i) { shared_ptr<GPUArray<T>> e(new GPUArray<T>); e->init(dim); ptrs.push_back(e); result.push_back((T *)e->value); } return result; } dtype MultiCrossEntropyLoss(const vector<dtype*> &vals, const vector<vector<int>> &answers, int count, int dim, dtype factor, const vector<dtype*> &losses) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray val_arr, loss_arr; val_arr.init((dtype**)vals.data(), count); loss_arr.init((dtype**)losses.data(), count); vector<shared_ptr<IntArray>> answer_gpus; vector<int *> answer_gpu_pointers; for (auto &answer : answers) { shared_ptr<IntArray> answer_gpu(new IntArray); answer_gpu->init(answer.data(), answer.size()); answer_gpus.push_back(answer_gpu); answer_gpu_pointers.push_back(answer_gpu->value); } IntPointerArray answer_arr; answer_arr.init((int**)answer_gpu_pointers.data(), count); KernelMultiCrossEntropyLoss<<<block_count, TPB>>>(val_arr.value, answer_arr.value, count, dim, factor, (dtype *const *)loss_arr.value); CheckCudaError(); vector<shared_ptr<NumberArray>> nums; vector<dtype *> logged_vec = GPUArrayVectors(nums, count, dim); NumberPointerArray logged_arr; logged_arr.init(logged_vec.data(), count); KernelMultiCrossEntropyLossVector<<<block_count, TPB>>>(val_arr.value, answer_arr.value, count, dim, (dtype *const *)logged_arr.value); CheckCudaError(); vector<shared_ptr<NumberArray>> ce_loss_arrs; vector<dtype *> ce_losses = GPUArrayVectors(ce_loss_arrs, count, 1); for (auto &ptr : ce_loss_arrs) { vector<dtype> vec = ptr->toCpu(); } vector<const dtype *> const_logged_arr; auto return_const = [](dtype *v) -> const dtype* { return const_cast<const dtype*>(v); }; transform(logged_vec.begin(), logged_vec.end(), back_inserter(const_logged_arr), return_const); vector<int> dims; for (int i = 0; i < count; ++i) { dims.push_back(dim); } VectorSumForward(const_logged_arr, count, dims, ce_losses); dtype ce_loss_sum = 0.0f; for (auto &ptr : ce_loss_arrs) { vector<dtype> vec = ptr->toCpu(); if (vec.size() != 1) { cerr << "vec size is not 1" << endl; abort(); } dtype l = vec.front() * factor; ce_loss_sum += l; } return ce_loss_sum; } __global__ void KernelKLCrossEntropyLoss(const dtype *const *vals, const dtype *const *answers, int count, int dim, dtype factor, dtype *const *losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype val = vals[count_i][dim_i]; dtype grad = -answers[count_i][dim_i] / val * factor; DeviceAtomicAdd(losses[count_i] + dim_i, grad); } } __global__ void KernelKLCrossEntropyLossVector(const dtype *const *in_vals, const dtype *const *answers, int count, int dim, dtype *const *result) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype in_val = in_vals[count_i][dim_i]; dtype v = -answers[count_i][dim_i] * cuda_log(in_val); result[count_i][dim_i] = v; } } dtype KLCrossEntropyLoss(const vector<dtype*> &vals, const vector<shared_ptr<vector<dtype>>> &answers, int count, int dim, dtype factor, const vector<dtype*> &losses) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray val_arr, loss_arr; val_arr.init((dtype**)vals.data(), count); loss_arr.init((dtype**)losses.data(), count); vector<shared_ptr<NumberArray>> answer_gpus; vector<dtype *> answer_gpu_pointers; for (auto &answer : answers) { shared_ptr<NumberArray> answer_gpu(new NumberArray); answer_gpu->init(answer->data(), answer->size()); answer_gpus.push_back(answer_gpu); answer_gpu_pointers.push_back(answer_gpu->value); } NumberPointerArray answer_arr; answer_arr.init((dtype**)answer_gpu_pointers.data(), count); KernelKLCrossEntropyLoss<<<block_count, TPB>>>(val_arr.value, answer_arr.value, count, dim, factor, (dtype *const *)loss_arr.value); CheckCudaError(); vector<shared_ptr<NumberArray>> nums; vector<dtype *> logged_vec = GPUArrayVectors(nums, count, dim); NumberPointerArray logged_arr; logged_arr.init(logged_vec.data(), count); KernelKLCrossEntropyLossVector<<<block_count, TPB>>>(val_arr.value, answer_arr.value, count, dim, (dtype *const *)logged_arr.value); CheckCudaError(); vector<shared_ptr<NumberArray>> ce_loss_arrs; vector<dtype *> ce_losses = GPUArrayVectors(ce_loss_arrs, count, 1); for (auto &ptr : ce_loss_arrs) { vector<dtype> vec = ptr->toCpu(); } vector<const dtype *> const_logged_arr; auto return_const = [](dtype *v) -> const dtype* { return const_cast<const dtype*>(v); }; transform(logged_vec.begin(), logged_vec.end(), back_inserter(const_logged_arr), return_const); vector<int> dims; for (int i = 0; i < count; ++i) { dims.push_back(dim); } VectorSumForward(const_logged_arr, count, dims, ce_losses); dtype ce_loss_sum = 0.0f; for (auto &ptr : ce_loss_arrs) { vector<dtype> vec = ptr->toCpu(); if (vec.size() != 1) { cerr << "vec size is not 1" << endl; abort(); } dtype l = vec.front() * factor; ce_loss_sum += l; } return ce_loss_sum; } __global__ void KernelMax(const dtype *const *v, int count, int dim, volatile dtype *block_maxes, volatile int *block_max_is, int *block_counters, int *max_indexes, dtype *max_vals) { __shared__ volatile dtype shared_max[TPB]; __shared__ volatile int shared_max_i[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_max[threadIdx.x] = offset < dim ? v[count_i][offset] : -1e10; shared_max_i[threadIdx.x] = offset; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x] < shared_max[threadIdx.x + i]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } int block_maxes_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_maxes[block_maxes_offset] = shared_max[0]; block_max_is[block_maxes_offset] = shared_max_i[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype max = -1e10; int max_i = 100000; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; if (block_maxes[offset] > max) { max = block_maxes[offset]; max_i = block_max_is[offset]; } } shared_max[threadIdx.x] = max; shared_max_i[threadIdx.x] = max_i; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x + i] > shared_max[threadIdx.x]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { max_vals[count_i] = shared_max[0]; max_indexes[count_i] = shared_max_i[0]; } } } __global__ void KernelSingleMax(const dtype *const *v, int count, int dim, int *max_indexes, dtype *max_vals) { for (int count_i = 0; count_i < count; ++count_i) { dtype max_val = -1e10; int max_i; for (int dim_i = 0; dim_i < dim; ++ dim_i) { if (v[count_i][dim_i] > max_val) { max_val = v[count_i][dim_i]; max_i = dim_i; } } max_indexes[count_i] = max_i; max_vals[count_i] = max_val; } } void Max(const dtype *const *v, int count, int dim, int *max_indexes, dtype *max_vals) { int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB); int block_y_count = (dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_maxes; block_maxes.init(block_y_count * count); IntArray block_max_is, block_counters; block_max_is.init(block_y_count * count); block_counters.init(count); KernelMax<<<block_dim, thread_count>>>(v, count, dim, block_maxes.value, block_max_is.value, block_counters.value, max_indexes, max_vals); CheckCudaError(); #if TEST_CUDA NumberArray max_val_arr; IntArray max_indexer_arr; max_val_arr.init(count); max_indexer_arr.init(count); KernelSingleMax<<<1, 1>>>(v, count, dim, max_indexer_arr.value, max_val_arr.value); CheckCudaError(); vector<int> max_indexer_target(count), max_indexer_gold(count); MyCudaMemcpy(max_indexer_target.data(), max_indexes, count * sizeof(int), cudaMemcpyDeviceToHost); MyCudaMemcpy(max_indexer_gold.data(), max_indexer_arr.value, count * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < count; ++i) { if (max_indexer_target.at(i) != max_indexer_gold.at(i)) { cerr << format("max_indexer_target:%1% max_indexer_gold:%2%") % max_indexer_target.at(i) % max_indexer_gold.at(i) << endl; PrintNums(v, i, dim); abort(); } } #endif CheckCudaError(); } vector<int> Predict(const vector<dtype*> &vals, int count, int dim) { NumberPointerArray val_arr; val_arr.init((dtype**)vals.data(), vals.size()); IntArray max_index_arr; max_index_arr.init(vals.size()); NumberArray max_val_arr; max_val_arr.init(vals.size()); Max(val_arr.value, count, dim, max_index_arr.value, max_val_arr.value); return max_index_arr.toCpu(); } __global__ void KernelSum(const dtype *const *v, int count, int dim, volatile dtype *block_sums, int *block_counters, dtype *sum_vals) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_sum[threadIdx.x] = offset < dim ? v[count_i][offset] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_sums[block_sums_offset] = shared_sum[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; sum += block_sums[offset]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { sum_vals[count_i] = shared_sum[0]; } } } void Sum(const dtype *const *v, int count, int dim, dtype *sum_vals) { int thread_count = min(NextTwoIntegerPowerNumber(dim), TPB); int block_y_count = (dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_sums; block_sums.init(block_y_count * count); IntArray block_counters; block_counters.init(count); KernelSum<<<block_dim, thread_count>>>(v, count, dim, block_sums.value, block_counters.value, sum_vals); CheckCudaError(); } __global__ void KernelSoftMaxLossByExp(const dtype *const *exps, int count, int dim, const dtype *const *vals, const dtype *sums, const dtype *max_vals, const int *answers, dtype reverse_batchsize, dtype *const *grads, dtype *losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < dim * count; i += step) { int count_i = i / dim; int dim_i = i % dim; dtype loss = exps[count_i][dim_i] / sums[count_i]; if (dim_i == answers[count_i]) { loss -= 1.0f; } grads[count_i][dim_i] = loss * reverse_batchsize; losses[count_i] = (cuda_log(sums[count_i]) - vals[count_i][answers[count_i]] + max_vals[count_i]) * reverse_batchsize; } } void SoftMaxLossByExp(const dtype *const *exps, int count, int dim, const dtype *const *vals, const dtype *sums, const dtype *max_vals, const int *answers, dtype reverse_batchsize, dtype *const *grads, dtype *losses) { int block_count = DefaultBlockCount(dim * count); KernelSoftMaxLossByExp<<<block_count, TPB>>>(exps, count, dim, vals, sums, max_vals, answers, reverse_batchsize, (dtype *const *)grads, losses); CheckCudaError(); } __global__ void KernelMaxScalarForward(const dtype *const *v, int count, int* dims, int max_dim, volatile dtype *block_maxes, volatile int *block_max_is, int *block_counters, int *max_indexes, dtype *const *max_vals) { __shared__ volatile dtype shared_max[TPB]; __shared__ volatile int shared_max_i[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_max[threadIdx.x] = offset < dims[count_i] ? v[count_i][offset] : -1e10; shared_max_i[threadIdx.x] = offset; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x] < shared_max[threadIdx.x + i]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { int block_maxes_offset = blockIdx.x * gridDim.y + blockIdx.y; int max_ii = shared_max_i[0]; if (max_ii < 0 || max_ii >= max_dim) { printf("threadIdx.x == 0 after first reduce max_ii:%d v:%f\n", max_ii, shared_max[0]); for (int i = 0; i < TPB; ++i) { printf("shared_max[%d]:%f shared_max_i[%d]:%d\n", i, shared_max[i], i, shared_max_i[i]); } assert(false); } block_maxes[block_maxes_offset] = shared_max[0]; block_max_is[block_maxes_offset] = shared_max_i[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype max = -1e10; int max_i = 100000; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; int max_ii = block_max_is[offset]; if (max_ii < 0 || max_ii >= max_dim) { printf("offset:%d is_last_block block_maxes[offset]:%f block_max_is[offset]:%d\n", offset, block_maxes[offset], block_max_is[offset]); assert(false); } if (block_maxes[offset] > max) { max = block_maxes[offset]; max_i = block_max_is[offset]; } } shared_max[threadIdx.x] = max; shared_max_i[threadIdx.x] = max_i; // printf("max:%f max_i:%d\n", max, max_i); __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i && shared_max[threadIdx.x + i] > shared_max[threadIdx.x]) { shared_max[threadIdx.x] = shared_max[threadIdx.x + i]; shared_max_i[threadIdx.x] = shared_max_i[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { max_vals[count_i][0] = shared_max[0]; max_indexes[count_i] = shared_max_i[0]; int max_ii = max_indexes[count_i]; if (max_ii < 0 || max_ii >= max_dim) { printf("threadIdx.x == 0 max_i:%d count_i:%d max_val:%f\n", max_indexes[count_i], count_i, max_vals[count_i][0]); assert(false); } } } } void MaxScalarForward(const vector<const dtype*> &inputs, int count, const vector<int> &dims, vector<dtype*> &results, vector<int> &max_indexes) { int max_dim = *max_element(dims.begin(), dims.end()); int thread_count = min(NextTwoIntegerPowerNumber(max_dim), TPB); int block_y_count = (max_dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_maxes; block_maxes.init(block_y_count * count); IntArray block_max_is, block_counters; block_max_is.init(block_y_count * count); block_counters.init(count); NumberPointerArray input_arr; input_arr.init((dtype**)inputs.data(), inputs.size()); NumberPointerArray result_arr; result_arr.init((dtype**)results.data(), results.size()); IntArray max_index_arr; max_index_arr.init(max_indexes.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelMaxScalarForward<<<block_dim, thread_count>>>((const dtype *const *)input_arr.value, count, dim_arr.value, max_dim, block_maxes.value, block_max_is.value, block_counters.value, max_index_arr.value, (dtype *const *)result_arr.value); CheckCudaError(); MyCudaMemcpy(max_indexes.data(), max_index_arr.value, count * sizeof(int), cudaMemcpyDeviceToHost); } __global__ void KernelMaxScalarBackward(const dtype *const *losses, const int *indexes, int count, dtype *const *input_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count; i += step) { DeviceAtomicAdd(input_losses[i] + indexes[i], losses[i][0]); } } void MaxScalarBackward(const vector<const dtype *> &losses, const vector<int> &indexes, int count, const vector<dtype*> &input_losses) { int block_count = DefaultBlockCount(count); NumberPointerArray loss_arr, input_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); input_loss_arr.init((dtype**)input_losses.data(), input_losses.size()); IntArray index_arr; index_arr.init((int*)indexes.data(), indexes.size()); KernelMaxScalarBackward<<<block_count, TPB>>>((const dtype *const *)loss_arr.value, index_arr.value, count, (dtype *const *)input_loss_arr.value); CheckCudaError(); } __global__ void KernelVectorSumForward(const dtype *const *v, int count, int *dims, volatile dtype *block_sums, int *block_counters, dtype *const *results) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_sum[threadIdx.x] = offset < dims[count_i] ? v[count_i][offset] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_sums[block_sums_offset] = shared_sum[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; sum += block_sums[offset]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { results[count_i][0] = shared_sum[0]; } } } void VectorSumForward(const vector<const dtype *> &inputs, int count, const vector<int> &dims, vector<dtype*> &results) { int max_dim = *max_element(dims.begin(), dims.end()); int thread_count = min(NextTwoIntegerPowerNumber(max_dim), TPB); int block_y_count = (max_dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_sums; block_sums.init(block_y_count * count); IntArray block_counters; block_counters.init(count); NumberPointerArray input_arr; input_arr.init((dtype**)inputs.data(), inputs.size()); NumberPointerArray result_arr; result_arr.init((dtype**)results.data(), results.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelVectorSumForward<<<block_dim, thread_count>>>((const dtype *const *)input_arr.value, count, dim_arr.value, block_sums.value, block_counters.value, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelVectorSumBackward(const dtype *const *losses, int count, int *dims, int max_dim, dtype *const *input_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { DeviceAtomicAdd(input_losses[count_i] + dim_i, losses[count_i][0]); } } } void VectorSumBackward(const vector<const dtype*> &losses, int count, const vector<int> &dims, vector<dtype*> &input_losses) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = DefaultBlockCount(count * max_dim); NumberPointerArray loss_arr, input_loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); input_loss_arr.init((dtype**)input_losses.data(), input_losses.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelVectorSumBackward<<<block_count, TPB>>>((const dtype *const *)loss_arr.value, count, dim_arr.value, max_dim, (dtype *const *)input_loss_arr.value); CheckCudaError(); } __global__ void KernelScalarToVectorForward(const dtype* const* inputs, int count, int *dims, int max_dim, dtype *const *results) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * max_dim; i += step) { int count_i = i / max_dim; int dim_i = i % max_dim; if (dim_i < dims[count_i]) { results[count_i][dim_i] = inputs[count_i][0]; } } } void ScalarToVectorForward(const vector<const dtype*> &inputs, int count, const vector<int> &dims, vector<dtype*> &results) { int max_dim = *max_element(dims.begin(), dims.end()); int block_count = DefaultBlockCount(max_dim * count); NumberPointerArray input_arr; input_arr.init((dtype**)inputs.data(), inputs.size()); NumberPointerArray result_arr; result_arr.init((dtype**)results.data(), inputs.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelScalarToVectorForward<<<block_count, TPB>>>((const dtype* const *)input_arr.value, count, dim_arr.value, max_dim, (dtype *const *)result_arr.value); CheckCudaError(); } __global__ void KernelScalarToVectorBackward(const dtype *const *losses, int count, int *dims, volatile dtype *block_sums, int *block_counters, dtype *const *input_losses) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; if (threadIdx.x == 0 && blockIdx.y == 0) { block_counters[blockIdx.x] = 0; } if (threadIdx.x == 0) { is_last_block = false; } int count_i = blockIdx.x; int offset = blockIdx.y * blockDim.x + threadIdx.x; shared_sum[threadIdx.x] = offset < dims[count_i] ? losses[count_i][offset] : 0.0f; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } int block_sums_offset = blockIdx.x * gridDim.y + blockIdx.y; if (threadIdx.x == 0) { block_sums[block_sums_offset] = shared_sum[0]; if (atomicAdd(block_counters + blockIdx.x, 1) == gridDim.y - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.y; i += blockDim.x) { int offset = blockIdx.x * gridDim.y + i; sum += block_sums[offset]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { DeviceAtomicAdd(input_losses[count_i], shared_sum[0]); } } } void ScalarToVectorBackward(const vector<const dtype*> &losses, int count, const vector<int> &dims, vector<dtype*> &input_losses) { int max_dim = *max_element(dims.begin(), dims.end()); int thread_count = min(NextTwoIntegerPowerNumber(max_dim), TPB); int block_y_count = (max_dim - 1 + thread_count) / thread_count; dim3 block_dim(count, block_y_count, 1); NumberArray block_sums; block_sums.init(block_y_count * count); IntArray block_counters; block_counters.init(count); NumberPointerArray loss_arr; loss_arr.init((dtype**)losses.data(), losses.size()); NumberPointerArray input_loss_arr; input_loss_arr.init((dtype**)input_losses.data(), input_losses.size()); IntArray dim_arr; dim_arr.init(dims.data(), dims.size()); KernelScalarToVectorBackward<<<block_dim, thread_count>>>((const dtype *const *)loss_arr.value, count, dim_arr.value, block_sums.value, block_counters.value, (dtype *const *)input_loss_arr.value); CheckCudaError(); } __global__ void KernelBiasForward(const dtype *const *in_vals, const dtype *bias, int count, int dim, dtype *const *vals) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; vals[count_i][dim_i] = in_vals[count_i][dim_i] + bias[dim_i]; } } void BiasForward(const vector<dtype*> &in_vals, const dtype *bias, int count, int dim, const vector<dtype *> &vals) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray in_arr, val_arr; in_arr.init(in_vals.data(), in_vals.size()); val_arr.init(vals.data(), vals.size()); KernelBiasForward<<<block_count, TPB>>>(in_arr.value, bias, count, dim, (dtype *const *)val_arr.value); } __global__ void KernelBiasBackward(const dtype *const *losses, int count, int dim, dtype *bias_losses, dtype *const *in_losses) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count * dim; i += step) { int count_i = i / dim; int dim_i = i % dim; DeviceAtomicAdd(bias_losses + dim_i, losses[count_i][dim_i]); DeviceAtomicAdd(in_losses[count_i] + dim_i, losses[count_i][dim_i]); } } void BiasBackward(const vector<dtype *> &losses, int count, int dim, dtype *bias_loss, const vector<dtype *> input_losses) { int block_count = DefaultBlockCount(count * dim); NumberPointerArray loss_arr, input_loss_arr; loss_arr.init(losses.data(), losses.size()); input_loss_arr.init(input_losses.data(), input_losses.size()); KernelBiasBackward<<<block_count, TPB>>>(loss_arr.value, count, dim, bias_loss, (dtype *const *)input_loss_arr.value); } __global__ void KernelSquareSum(const dtype *v, int len, volatile dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { is_last_block = false; } shared_sum[threadIdx.x] = 0.0f; for (int i = index; i < len; i += blockDim.x * gridDim.x) { shared_sum[threadIdx.x] += v[i] * v[i]; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { dtype sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, int len) { int block_count = DefaultBlockCount(len); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); KernelSquareSum<<<block_count, TPB>>>(v, len, global_sum.value, block_counter.value, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelSquareSum(const dtype *v, const bool *indexers, int count, int dim, volatile dtype *global_sum, int *block_counter, dtype *result) { __shared__ volatile dtype shared_sum[TPB]; __shared__ volatile bool is_last_block; int index = DeviceDefaultIndex(); if (index == 0) { *block_counter = 0; } if (threadIdx.x == 0) { global_sum[blockIdx.x] = 0.0f; is_last_block = false; } int count_i = index / dim; if (index < count * dim && indexers[count_i]) { shared_sum[threadIdx.x] = v[index] * v[index]; } else { shared_sum[threadIdx.x] = 0.0f; } __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { global_sum[blockIdx.x] = shared_sum[0]; if (atomicAdd(block_counter, 1) == gridDim.x - 1) { is_last_block = true; } } __syncthreads(); if (is_last_block) { float sum = 0.0f; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sum += global_sum[i]; } shared_sum[threadIdx.x] = sum; __syncthreads(); for (int i = (blockDim.x >> 1); i > 0; i >>= 1) { if (threadIdx.x < i) { shared_sum[threadIdx.x] += shared_sum[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = shared_sum[0]; } } } dtype SquareSum(const dtype *v, const bool *indexers, int count, int dim) { int block_count = DefaultBlockCountWithoutLimit(count * dim); NumberArray global_sum; global_sum.init(block_count); DeviceInt block_counter; block_counter.init(); DeviceNumber result; result.init(); KernelSquareSum<<<block_count, TPB>>>(v, indexers, count, dim, global_sum.value, block_counter.value, result.value); CheckCudaError(); result.copyFromDeviceToHost(); return result.v; } __global__ void KernelRescale(dtype *v, int len, dtype scale) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < len; i += step) { v[i] *= scale; } } void Rescale(dtype *v, int len, dtype scale) { int block_count = DefaultBlockCount(len); KernelRescale<<<block_count, TPB>>>(v, len, scale); CheckCudaError(); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps, dtype x) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (!is_bias) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x; dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); dtype x = 1.0f / (1 - pow(belta1, iter + 1)); KernelUpdateAdam<<<block_count, TPB>>>(val, grad, row, col, is_bias, aux_mean, aux_square, iter, belta1, belta2, alpha, reg, eps, x); CheckCudaError(); } __global__ void KernelUpdateAdamW(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps, dtype x) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iter + 1)) * x; dtype square_plus_eps = aux_square[i] + eps; val[i] = (1 - (is_bias? 0.0f : reg)) * val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } void UpdateAdamW(dtype *val, dtype *grad, int row, int col, bool is_bias, dtype *aux_mean, dtype *aux_square, int iter, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); dtype x = 1.0f / (1 - pow(belta1, iter + 1)); KernelUpdateAdamW<<<block_count, TPB>>>(val, grad, row, col, is_bias, aux_mean, aux_square, iter, belta1, belta2, alpha, reg, eps, x); CheckCudaError(); } __global__ void KernelUpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / row; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_mean[i] = belta1 * aux_mean[i] + (1 - belta1) * grad[i]; aux_square[i] = belta2 * aux_square[i] + (1 - belta2) * grad[i] * grad[i]; dtype lr_t = alpha * cuda_sqrt(1 - cuda_pow(belta2, iters[count_i] + 1)) / (1 - cuda_pow(belta1, iters[count_i] + 1)); dtype square_plus_eps = aux_square[i] + eps; val[i] = val[i] - aux_mean[i] * lr_t / cuda_sqrt(square_plus_eps); } } } __global__ void KernelSelfPlusIters(const bool *indexers, int *iters, int count) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); for (int i = index; i < count; i += step) { if (indexers[i]) { ++iters[i]; } } } void UpdateAdam(dtype *val, dtype *grad, int row, int col, dtype *aux_mean, dtype *aux_square, const bool *indexers, int *iters, dtype belta1, dtype belta2, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); KernelUpdateAdam<<<block_count, TPB>>>(val, grad, row, col, aux_mean, aux_square, indexers, iters, belta1, belta2, alpha, reg, eps); CheckCudaError(); block_count = DefaultBlockCount(col); KernelSelfPlusIters<<<block_count, TPB>>>(indexers, iters, col); CheckCudaError(); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); KernelUpdateAdagrad<<<block_count, TPB>>>(val, grad, row, col, aux_square, alpha, reg, eps); CheckCudaError(); } __global__ void KernelUpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int index = DeviceDefaultIndex(); int step = DeviceDefaultStep(); int len = row * col; for (int i = index; i < len; i += step) { int count_i = i / col; if (indexers[count_i]) { if (row > 1 && col > 1) { grad[i] += val[i] * reg; } aux_square[i] = aux_square[i] + grad[i] * grad[i]; val[i] = val[i] - grad[i] * alpha / cuda_sqrt(aux_square[i] + eps); } } } void UpdateAdagrad(dtype *val, dtype *grad, int row, int col, dtype *aux_square, const bool *indexers, dtype alpha, dtype reg, dtype eps) { int block_count = DefaultBlockCount(row * col); KernelUpdateAdagrad<<<block_count, TPB>>>(val, grad, row, col, aux_square, indexers, alpha, reg, eps); CheckCudaError(); } void *GraphHostAlloc() { void *m; CallCuda(cudaHostAlloc(&m, 10000000, cudaHostAllocWriteCombined)); if (m == NULL) { abort(); } return m; } }
01d539248843884bec406a5ca60996b59e7af477.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void inc(int *array, size_t n){ size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < n){ array[idx]++; idx += blockDim.x*gridDim.x; // grid-stride loop } }
01d539248843884bec406a5ca60996b59e7af477.cu
#include "includes.h" __global__ void inc(int *array, size_t n){ size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < n){ array[idx]++; idx += blockDim.x*gridDim.x; // grid-stride loop } }
e0674309317f2c3e7a457845743cdd049008ee73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<time.h> #include"Passenger.h" #include"Aircraft.h" //this structure defines a block (it can be an exit block or a aisle block thats all //it also stores the id of the Passenger residing on that block) typedef struct block { //Now more occ as it is useless int passid; // -1 means unoccupied or Pasengerid int exit; // if this one is exit one }block; // Delta z value is 1 inch so each row has 30 inch of aile length in front of it // void pass_input(Passenger P[],int n); __device__ void get_Aisle_Range(int range[],int i, int N) { if(i>=1 && i<=N/2 - 1) { range[0] = (i-1)*30; range[1] = range[0] +30 - 1; } else if(i == N/2) { range[0] = (i-1)*30; range[1] = range[0] + 50 -1; } else if(i == N/2 + 1) { range[0] = (i-2)*30 + 50; range[1] = range[0] + 50 -1; } else if(i>N/2+1 && i<=N-1) { range[0] = (i-1)*30+40; range[1] = range[0] +30 - 1; } //return range; } /* __device__ int select_exit(Passenger P, int exit[]) { int ans =0; if ((exit[0] == 1 || exit[1] == 1) && (exit[2]==1 || exit[3]==1 || exit[4]==1 || exit[5]==1) && P.x<=470){ if(P.x<470-P.x) { ans = (exit[0] == 1) ? 0:1; } else{ if(exit[2]==1 || exit[3]==1){ ans = (exit[2] == 1) ? 2:3; } else if(exit[4]==1 || exit[5]==1){ ans = (exit[4] == 1) ? 4:5; } } } else if ((exit[6] == 1 || exit[7] == 1) && (exit[2]==1 || exit[3]==1 || exit[4]==1 || exit[5]==1) && P.x>470){ if(940-P.x<P.x-470){ ans = (exit[6] == 1) ? 6:7; } else{ if(exit[4]==1 || exit[5]==1){ ans = (exit[4] == 1) ? 4:5; } else if(exit[2]==1 || exit[3]==1){ ans = (exit[2] == 1) ? 2:3; } } } return ans; } */ __device__ int select_exit(Passenger P, int exit[]) { int i,ans =-1; if(P.x<470) { if(P.x<470-P.x) { if(exit[0]==1||exit[1]==1) ans=0; } else { if(exit[2]==1||exit[3]==1||exit[4]==1||exit[5]==1) ans=3; } } else { if(940-P.x<P.x-470) { if(exit[7]==1||exit[8]==1) ans=7; } else { if(exit[2]==1||exit[3]==1||exit[4]==1||exit[5]==1) ans=3; } } if(ans==-1) { for(i=0;i<6;++i) { if(exit[i]==1) return i; } } else return ans; } __device__ int get_direction(Passenger p, block A[], int exitnum){ if(exitnum == 0 || exitnum == 1){ if(p.x-4 > 0){ return 1; } else if(p.x-4 <= 0){ return 0; } } else if (exitnum == 6 || exitnum == 7) { if(p.y+4 < 940){ return -1; } else if(p.y >= 940){ return 0; } } else if ((exitnum == 2 || exitnum == 3)) { if(p.x < 450){ return -1; } if(p.x > 500){ return 1; } if(p.x >= 450 || p.x <= 500){ return 0; } } else if ((exitnum == 4 || exitnum == 5)) { if(p.x < 500){ return -1; } if(p.x > 550){ return 1; } if(p.x >= 500 || p.x <= 550){ return 0; } } return 0; } /* __global__ void map_Passenger_to_exit(Passenger P[], int seat[100][100], block C[][55],int exit[]){ int k,j,m,i =2, rownm; while(i<6 && exit[i]!=0){ if(i==2) { rownm = 15; j=0; } if(i==3) { rownm = 15; j=5; } if(i==4) { rownm = 16; j=0; } if(i==5){ rownm = 16; j=5; } for(k=0;k<35;){ for( m = k; m < k + (int)P[ seat[rownm][j] ].diameter ; ++m){ C[i-2][m].passid = seat[rownm][j]; } seat[rownm][j] = 0; //C[i-2][p[seat[rownm][j]].diameter-1].passid = -1*seat[rownm][j]; if(i==2 || i==4){ ++j; } else if(i==3 || i==5){ --j; } k=k+17; } ++i; } } */ __global__ void movement_to_exit(block A[],block B[4][55],block C[4][55],Passenger P[] ,int seat[][100],int d_exit[],int numPass) //runs for each Passenger and make his movmenent according to the positions { // Now we have to map the thread id with the passennger id int i=threadIdx.x,k; int range[2]; int j,count=0; int tex,ex,dir; // int exit[]={1,1,0,0,1,1,0,0}; if(i < numPass) { switch(P[i].status) { //printf("Hello\n" ); case 0: //the Passenger is in his seat aisle (x=row number y = (1-6)column in seat ) if(P[i].y == 2 || P[i].y==3) { get_Aisle_Range(range, P[i].x , 30); // range[0]=0; // range[1]=50; count=0; for(j=range[0];j<range[1];++j) { if(A[j].passid == -1) { count++; if(count >= P[i].diameter) break; } else count=0; } if(j<range[1]+1) { for(k=j;k>=j-P[i].diameter;--k) A[k].passid=i; seat[P[i].x][P[i].y]=-1; P[i].x=k; P[i].y=j; P[i].status=1; P[i].res=0; } } else { if(P[i].y < 2) { if(seat[P[i].x][P[i].y+1]==-1) { if(P[i].res==60) { P[i].y++; seat[P[i].x][P[i].y-1]=-1; seat[P[i].x][P[i].y] = i;// P[i].res=0; } else P[i].res++; } } else { if(seat[P[i].x][P[i].y-1]==-1) { if(P[i].res==60) { P[i].y--; seat[P[i].x][P[i].y+1]=-1; seat[P[i].x][P[i].y] = i;// P[i].res=0; } else P[i].res++; } } } break; //done till here ;D //comment starts here: case 1: // the Passenger is in aisle and here the x value that tell the starting of the Passenger // y is the ending point of the Passenger // Select the exit and try to move towards the aisle point of that exit ex = select_exit(P[i], d_exit); // Create an exit array that contain 0 if the exit is not open and 1 if it is open dir= get_direction(P[i],A,ex); P[i].ans=ex; P[i].dir=dir; if(dir == 1) { //move up if(A[P[i].x-2].passid == -1) { if(P[i].speed==1.0f) { if(P[i].res==2) { P[i].x-=2; P[i].y-=2; A[P[i].x].passid = i; A[P[i].x+1].passid = i; A[P[i].y+1].passid = -1; A[P[i].y+2].passid = -1; P[i].res=0; } else P[i].res++; } else { if(P[i].speed==1.5f && A[P[i].x-3].passid == -1) { if(P[i].res==2) { P[i].x-=3; P[i].y-=3; A[P[i].x+1].passid = i; A[P[i].x+2].passid = i; A[P[i].x].passid = i; A[P[i].y+1].passid = -1; A[P[i].y+2].passid = -1; A[P[i].y+3].passid = -1; P[i].res=0; } else P[i].res++; } else P[i].res=0; } } else P[i].res=0; //else dont move } else { if(dir==-1) { //move down if(A[P[i].y+2].passid == -1) { if(P[i].speed==1.0f) { if(P[i].res==2) { P[i].x+=2; P[i].y+=2; A[P[i].y].passid = i; A[P[i].y-1].passid = i; A[P[i].x-1].passid = -1; A[P[i].x-2].passid = -1; P[i].res=0; } else P[i].res++; } else { if(A[P[i].y+3].passid == -1) { if(P[i].res==2) { P[i].x+=3; P[i].y+=3; A[P[i].y].passid = i; A[P[i].y-2].passid = i; A[P[i].y-1].passid = i; A[P[i].x-1].passid = -1; A[P[i].x-2].passid = -1; A[P[i].x-3].passid = -1; P[i].res=0; } else P[i].res++; } else P[i].res=0; } } else P[i].res=0; } else { P[i].res=0; //stay and jump to B or C if(ex==0||ex==1||ex==6||ex==7) { //Going to B tex=ex; if(ex==6||ex==7) tex=ex-5; // Going to B[tex] for(j=50;B[tex][j].passid==-1&&j> 50-P[i].diameter ;--j); if(50 - j == P[i].diameter) { for(k=P[i].x;k<=P[i].y;++k) A[k].passid=-1; P[i].x = tex; P[i].y = j; for(;j<=50;++j) B[tex][j].passid=i; P[i].status=2; } } else { P[i].res=0; tex=ex-2; // Going to B[tex] for(j=50;C[tex][j].passid==-1&&j>50-P[i].diameter;--j); if(50 - j == P[i].diameter) { for(k=P[i].x;k<=P[i].y;++k) A[k].passid=-1; P[i].x = tex; P[i].y = j; for(;j<=50;++j) C[tex][j].passid=i; P[i].status=3; } } } } break; case 2: // the Passenger is in midle of the exit front and end exit aisles i.e seat exit y represent the position in the aisle and // x represent which aisle 1 / 2 / 3 / 4 if(P[i].y <= 0||(P[i].speed==1.0f && P[i].y-2 <= 0)||(P[i].speed==1.5f && P[i].y-3 <= 0) ) { for(j=0;j<10; ++j) { if(B[P[i].x][j].passid ==i) B[P[i].x][j].passid=-1; } P[i].status = 4; //Passenger is out of the plane } else { if(P[i].speed == 1.0f) { if(B[P[i].x][P[i].y-2].passid ==-1) { //move closer to the exit P[i].y-=2; B[P[i].x][P[i].y].passid = i; B[P[i].x][P[i].y + 1].passid = i; B[P[i].x][(P[i].y + (int)P[i].diameter + 1)].passid =-1; B[P[i].x][(P[i].y + (int)P[i].diameter + 2)].passid =-1; } } else { if(B[P[i].x][P[i].y -3 ].passid ==-1) { //move closer to the exit P[i].y-=3; B[P[i].x][P[i].y].passid = i; B[P[i].x][P[i].y + 2].passid = i; B[P[i].x][P[i].y + 1].passid = i; B[P[i].x][P[i].y + (int)P[i].diameter + 1].passid =-1; B[P[i].x][P[i].y + (int)P[i].diameter + 2].passid =-1; B[P[i].x][P[i].y + (int)P[i].diameter + 3].passid =-1; } } } break; case 3: // the Passenger is in midle of the midle exit aisles i.e seat exit y represent the position in the aisle and // x represent which aisle 1 / 2 / 3 / 4 if(P[i].y <= 0||(P[i].speed==1.0f && P[i].y-2 <= 0)||(P[i].speed==1.5f && P[i].y-3 <= 0) ) { for(j=0;j<10; ++j) { if(C[P[i].x][j].passid ==i) C[P[i].x][j].passid=-1; } P[i].status = 4; //Passenger is out of the plane } else { if(P[i].speed == 1.0f) { if(C[P[i].x][P[i].y-2].passid ==-1) { //move closer to the exit if(P[i].res==1) { P[i].y-=2; C[P[i].x][P[i].y ].passid = i; C[P[i].x][P[i].y + 1].passid = i; C[P[i].x][P[i].y + (int)P[i].diameter + 1].passid =-1; C[P[i].x][P[i].y + (int)P[i].diameter + 2].passid =-1; } else P[i].res++; } else P[i].res=0; } else { if(P[i].speed == 1.5f&&C[P[i].x][P[i].y -3].passid ==-1) { //move closer to the exit if(P[i].res==1) { P[i].y-=3; C[P[i].x][P[i].y ].passid = i; C[P[i].x][P[i].y + 2].passid = i; C[P[i].x][P[i].y + 1].passid = i; C[P[i].x][P[i].y + (int)P[i].diameter + 1].passid =-1; C[P[i].x][P[i].y + (int)P[i].diameter + 2].passid =-1; C[P[i].x][P[i].y + (int)P[i].diameter + 3].passid =-1; } else P[i].res++; } else P[i].res=0; } } break; case 4: P[i].x=-1; P[i].y=-1; }; } } void map_Passenger_to_exit(Passenger P[],int seat[][100], block C[][55],block B[][55],int h_exit[]) { int i,k,j,l; for(l=2;l<6;++l) { if(h_exit[l]==1)//the middle exit 2,3,4,5 { if(l%2==0) { for(i=0;i<3;i++) { k=seat[15+(l-2)/2][i]; if(k!=-1) { for(j=0;j<P[k].diameter;++j) C[l-2][i*17 + j].passid=k; P[k].x=l-2; P[k].status = 3; P[k].y= i*17; P[k].res=0; } } } else { for(i=3;i<6;i++) { k=seat[15+(l-2)/2][i]; if(k!=-1) { for(j=0;j<P[k].diameter;++j) C[l-2][(5-i)*17 + j].passid=k; P[k].x=l-2; P[k].status = 3; P[k].y= (5-i)*17; P[k].res=0; } } } } } } //Main int main() { srand(time((0))); //Aircraft* air= input(); int numPass,i,j; int count1=0; scanf("%d",&numPass); Passenger *h_P =(Passenger *)malloc(sizeof(Passenger)*numPass); Passenger *P; pass_input(h_P,numPass); char aircraftName[30]; scanf("%s",aircraftName); Aircraft A; aircraftInput(A,aircraftName); // Seating Arrangement Assigning Each Passenger location to sit randomely // Think something to make sure the random function does not send it to infinite loop int h_seat[A->row][A->column]; // initialise exact array (compile with check bound) check bound int (*seat)[100]; for(i=0;i<A->row;++i) { for(j=0;j<A->column;++j) h_seat[i][j]=-1; } //all seats are vacant right now //TODO start id with 1 int r_row,r_col; for(i=0;i<numPass;++i) { r_row=rand()%30; //should be defined in the header file TODO r_col=rand()%6; //should be defined in the header file TODO if(h_seat[r_row][r_col]==-1) { h_P[i].x=r_row; h_P[i].y=r_col; if(r_row<0||r_col<0) { printf("Olala\n"); return 0; } else h_seat[r_row][r_col]=i; printf("%d %d\n",r_row,r_col); } else { i--; } } // for(i=0;i<30;++i) //{ // for(j=0;j<10;++j) // printf("%d ",h_seat[i][j]+1); //printf("\n"); // } // Now each row is occupied by some Passengers // Each Passenger is sitting in a row and each row is having corresponding aisle array portion in front of it. // The Passenger can move to the aisle A[] in front of its row if it is unoccupied printf("Seating Done\n"); int aisleLength = ((A->row - 2) * 30 ) + 100); block h_A[aisleLength]; //length of aisle should be general block* A; // A is the aisle //Each Element of the for(j=0;j<aisleLength;++j) { h_A[j].passid=-1; h_A[j].exit=0; } // the aisle is empty right now //Now there are 4 Normal Gate Exits and //Exit Paths are of 2 types 1 end and other in the middle each one will have different speeds block h_B[4][55]; // Nornal Exit Paths 2 on each ends of the plane block h_C[4][55]; // Seat Exit 2 in the middle of the plane block (*B)[55]; // Nornal Exit Paths 2 on each ends of the plane block (*C)[55]; // Seat Exit 2 in the middle of the plane for(i=0;i<4;++i) { for(j=0;j<55;++j) { h_B[i][j].passid=-1; h_C[i][j].passid=-1; h_B[i][j].exit=0; h_C[i][j].exit=0; } } // set up the exits all the B exits are empty // C exits or the middle exits are occupied by people //int h_exit[6] = {1,1,1,1,1,1}; //exit is 1 for those wxits which are open and 0 for those which are close //block A[],block B[4][55],block C[4][55],Passenger P[] ,int seat[][100],int numPass //hipMalloc((void **) &array1_d , WIDTH*WIDTH*sizeof (int) ) ; // Here the game starts //Emergency! Emergency! Emergency! Run all of you Out of the plane printf("Enter 1 if the exit is open and 0 if the exit is close for all the 8 exits"); int h_exit[8]; int *d_exit; for(i=0;i<8;++i) scanf("%d",&h_exit[i]); //__global__ void movement_to_exit(block A[],block B[4][55],block C[4][55],Passenger P[] ,int seat[][100],int numPass) //runs for each Passenger and make his movmenent according to the positions int numout=0,numprev=0; j=0; //change the variable name to some specific time var hipMalloc((void **) &P , numPass*sizeof (Passenger) ) ; hipMalloc((void **) &B , (55*4)*sizeof (block) ) ; hipMalloc((void **) &C , (55*4)*sizeof (block) ) ; hipMalloc((void **) &A , 1000*sizeof (block) ) ; hipMalloc((void **) &seat , (100*100)*sizeof (int) ) ; hipMalloc((void **) &d_exit , (8)*sizeof (int) ) ; // select exit map_Passenger_to_exit(h_P,h_seat, h_C,h_B,h_exit); for(j=0;j<4;++j) { for(i=0;i<55;++i) { printf("%d",h_C[j][i]); } printf("\n"); } count1=0; int filecounter = 0; while(numout<numPass) { ++filecounter; // if(j==1000) // break; numprev=numout; numout=0; /* for(i=0;i<30;++i) { for(j=0;j<10;++j) printf("%d ",h_seat[i][j]+1); printf("\n"); } */ /* for(i=0;i<numPass;++i) { if(P[i].status!=4) printf("Passengr %d : (%d,%d) : %d : ans : %d : dir : %d\n",i,h_P[i].x,h_P[i].y,h_P[i].status,h_P[i].ans,h_P[i].dir); } */ //printf("Passengr %d : (%d,%d) : %d : ans : %d : dir : %d\n",3,h_P[3].x,h_P[3].y,h_P[3].status,h_P[3].ans,h_P[3].dir); hipMemcpy ( P , h_P , numPass*sizeof (Passenger) , hipMemcpyHostToDevice); hipMemcpy ( seat , h_seat , 100*100*sizeof (int) , hipMemcpyHostToDevice); hipMemcpy ( C , h_C , 4*55*sizeof (block) , hipMemcpyHostToDevice); hipMemcpy ( B , h_B , 4*55*sizeof (block) , hipMemcpyHostToDevice); hipMemcpy ( A , h_A , 1000*sizeof (block) , hipMemcpyHostToDevice); hipMemcpy ( d_exit , h_exit , 8*sizeof (int) , hipMemcpyHostToDevice); hipLaunchKernelGGL(( movement_to_exit), dim3(1),dim3(numPass) , 0, 0, A,B,C,P,seat,d_exit,numPass); hipError_t err1 = hipPeekAtLastError(); hipDeviceSynchronize(); //printf( "Got CUDA error ... %s \n", hipGetErrorString(err1)); hipMemcpy ( h_P , P , numPass*sizeof (Passenger) , hipMemcpyDeviceToHost); hipMemcpy ( h_seat , seat , 100*100*sizeof (int) , hipMemcpyDeviceToHost); hipMemcpy ( h_C , C , 4*55*sizeof (block) , hipMemcpyDeviceToHost); hipMemcpy ( h_B , B , 4*55*sizeof (block) , hipMemcpyDeviceToHost); hipMemcpy ( h_A , A , 1000*sizeof (block) , hipMemcpyDeviceToHost); // creating file FILE *fp; char filename[] = "output"; char str[100]; sprintf(str, "%d", filecounter); strcat(filename,str); strcat(filename,".txt"); fp = fopen(filename, "w"); int global[1000][150]; for (int i = 0; i <1000; ++i) { for (int j = 0; j < 150; ++j) { fprintf(fp, "%d", global[i][j]); } fprintf(fp, "\n", ); } fprintf(fp, "This is testing...\n"); fclose(fp); //file creation complete for(i=0;i<numPass;i++) { if(h_P[i].status == 4) numout++; // printf("%d\n",h_P[i].status); } if(numprev==numout) { printf("*"); count1++; } else { count1=0; printf("%d %d\n",numout,j); } if(numout==numPass) break; //printf("%d %d\n",numout,j); if(count1>100) { for(i=0;i<numPass;i++) { if(h_P[i].status!=4) { printf("Passengr %d : (%d,%d) : %d : ans : %d : dir : %d\n",i,h_P[i].x,h_P[i].y,h_P[i].status,h_P[i].ans,h_P[i].dir); } } // break; } // printf("%d\t %d\n",numout,j); j++; } float timeSteps = 40.6; printf("%f\n",j*timeSteps + 7000.0); // printf("%f\n",j*timeSteps); return 0; } int random(int min,int max); void pass_input(Passenger *P,int n) { int i;//r; srand(time(0)); Passenger *tp=P; for(i=0;i<n;++i,tp++) { //r=rand(); // printf("%d\n",r); tp->id=i; // tp->x= // tp->sex=random(0,1); // Male or female(random 0-1) tp->status = 0; tp->Mtime=tp->sex?random(875,1750):random(920,1950); tp->Wtime=50; tp->Rtime=random(400,700); // Random (500-1000)ms tp->fear=-1; //fear value 0 tp->agility=-1; // agility value tp->diameter=random(9,15);//(Random ) // diameter occupied by passenger tp->totaltime=0; //total time to evacuate tp->totalDist=0; //total distance to exit if(tp->sex==0){ float t = (float)random(10,15); tp->speed=t/10; //Random (1-1.5 ) speed of passenger } else{ float t = (float)random(9,12); tp->speed=t/10; } tp->grpstatus=-1; // Not in this paper in group or not tp->timeSteps=178; // minimum unit of time = 178 miliseconds tp->res=0; printf("id : %d , sex : %d , Mtime : %d, Rtime : %d\n",tp->id,tp->sex,tp->Mtime,tp->Rtime); } } Aircraft aircraftInput(Aircraft *A,char name[]){ char filename1[100]; strcat(filename1,".txt"); FILE *fp; char buff[255]; int res[10]; int k =0; fp = fopen(filename1, "r"); while(fgets(buff, 80, fp) != NULL) { //fscanf(fp, "%s", buff); //printf("%s\n", buff ); int result = atoi(buff); //printf("%d\n", result); res[k] = result; ++k; } fclose(fp); A->row = res[0]; A->column = res[1]; A->numOfExitPassage = res[2]; A->maxNumPassenger = res[3]; } void createGlobalMatrix(int global[1000][150], block h_seat, block h_A, block h_B, block h_C){ //main exit 1 int k =0, i=0; for(k =0;k<50;++k){ for (i = 0; i < 55; ++i) { if(h_B[0][i].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } } int temp = i; if(h_A[k].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } for (i = 54; i >=0; --i) { if(h_B[1][i].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } } } // 1-14 seats int p = 0; for (i = 50; i < (14*30)+50; ++i) { int u=0; for (int j = 0; j < 3; ++j) { if (h_seat[p][j] == -1) { for (; u < 18*(j+1); ++u) { global[i][u] = 0; } } else{ for (; u < 18*(j+1); ++u) { global[i][u] = 1; } } } global[i][u] = h_A[i]; int u=0; for (int j = 3; j < 6; ++j) { if (h_seat[p][j] == -1) { for (; u < 18*(j+1); ++u) { global[i][u] = 0; } } else{ for (; u < 18*(j+1); ++u) { global[i][u] = 1; } } } if ((i+1-50)%30 == 0) { ++p; } } //middle exits for(k =i;k<i+50;++k){ int i =0; for (j = 0; j < 55; ++j) { if(h_C[0][j].passid == -1){ global[k][j] = 0; } else{ global[k][j] = 1; } } int temp = i; if(h_A[k].passid == -1){ global[k][j] = 0; } else{ global[k][j] = 1; } for (j = 54; j >=0; --j) { if(h_C[1][j].passid == -1){ global[k][j+1] = 0; } else{ global[k][j+1] = 1; } } } // middle exits correct the second loop for j.. j is not in the sync for(;k<i+100;++k){ int i =0; for (j = 0; j < 55; ++j) { if(h_C[2][j].passid == -1){ global[k][j] = 0; } else{ global[k][j] = 1; } } int temp = i; if(h_A[k].passid == -1){ global[k][j] = 0; } else{ global[k][j] = 1; } for (j = 54; j >=0; --j) { if(h_C[3][j].passid == -1){ global[k][j+1] = 0; } else{ global[k][j+1] = 1; } } } // 17 - 30 seats for (i = k; i < (28*30)+150; ++i) { int u=0; for (int j = 0; j < 3; ++j) { if (h_seat[p][j] == -1) { for (; u < 18*(j+1); ++u) { global[i][u] = 0; } } else{ for (; u < 18*(j+1); ++u) { global[i][u] = 1; } } } global[i][u] = h_A[i]; int u=0; for (int j = 3; j < 6; ++j) { if (h_seat[p][j] == -1) { for (; u < 18*(j+1); ++u) { global[i][u] = 0; } } else{ for (; u < 18*(j+1); ++u) { global[i][u] = 1; } } } if ((i+1-150)%30 == 0) { ++p; } } //end exits int q = i+50; for(k =i;k<q;++k){ int i =0; for (i = 0; i < 55; ++i) { if(h_B[2][i].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } } int temp = i; if(h_A[k].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } for (i = 54; i >=0; --i) { if(h_B[3][i].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } } } } int random(int min,int max) { int r = rand(); r=r%(max-min+1); r=r+min; return r; }
e0674309317f2c3e7a457845743cdd049008ee73.cu
#include<stdio.h> #include<stdlib.h> #include<time.h> #include"Passenger.h" #include"Aircraft.h" //this structure defines a block (it can be an exit block or a aisle block thats all //it also stores the id of the Passenger residing on that block) typedef struct block { //Now more occ as it is useless int passid; // -1 means unoccupied or Pasengerid int exit; // if this one is exit one }block; // Delta z value is 1 inch so each row has 30 inch of aile length in front of it // void pass_input(Passenger P[],int n); __device__ void get_Aisle_Range(int range[],int i, int N) { if(i>=1 && i<=N/2 - 1) { range[0] = (i-1)*30; range[1] = range[0] +30 - 1; } else if(i == N/2) { range[0] = (i-1)*30; range[1] = range[0] + 50 -1; } else if(i == N/2 + 1) { range[0] = (i-2)*30 + 50; range[1] = range[0] + 50 -1; } else if(i>N/2+1 && i<=N-1) { range[0] = (i-1)*30+40; range[1] = range[0] +30 - 1; } //return range; } /* __device__ int select_exit(Passenger P, int exit[]) { int ans =0; if ((exit[0] == 1 || exit[1] == 1) && (exit[2]==1 || exit[3]==1 || exit[4]==1 || exit[5]==1) && P.x<=470){ if(P.x<470-P.x) { ans = (exit[0] == 1) ? 0:1; } else{ if(exit[2]==1 || exit[3]==1){ ans = (exit[2] == 1) ? 2:3; } else if(exit[4]==1 || exit[5]==1){ ans = (exit[4] == 1) ? 4:5; } } } else if ((exit[6] == 1 || exit[7] == 1) && (exit[2]==1 || exit[3]==1 || exit[4]==1 || exit[5]==1) && P.x>470){ if(940-P.x<P.x-470){ ans = (exit[6] == 1) ? 6:7; } else{ if(exit[4]==1 || exit[5]==1){ ans = (exit[4] == 1) ? 4:5; } else if(exit[2]==1 || exit[3]==1){ ans = (exit[2] == 1) ? 2:3; } } } return ans; } */ __device__ int select_exit(Passenger P, int exit[]) { int i,ans =-1; if(P.x<470) { if(P.x<470-P.x) { if(exit[0]==1||exit[1]==1) ans=0; } else { if(exit[2]==1||exit[3]==1||exit[4]==1||exit[5]==1) ans=3; } } else { if(940-P.x<P.x-470) { if(exit[7]==1||exit[8]==1) ans=7; } else { if(exit[2]==1||exit[3]==1||exit[4]==1||exit[5]==1) ans=3; } } if(ans==-1) { for(i=0;i<6;++i) { if(exit[i]==1) return i; } } else return ans; } __device__ int get_direction(Passenger p, block A[], int exitnum){ if(exitnum == 0 || exitnum == 1){ if(p.x-4 > 0){ return 1; } else if(p.x-4 <= 0){ return 0; } } else if (exitnum == 6 || exitnum == 7) { if(p.y+4 < 940){ return -1; } else if(p.y >= 940){ return 0; } } else if ((exitnum == 2 || exitnum == 3)) { if(p.x < 450){ return -1; } if(p.x > 500){ return 1; } if(p.x >= 450 || p.x <= 500){ return 0; } } else if ((exitnum == 4 || exitnum == 5)) { if(p.x < 500){ return -1; } if(p.x > 550){ return 1; } if(p.x >= 500 || p.x <= 550){ return 0; } } return 0; } /* __global__ void map_Passenger_to_exit(Passenger P[], int seat[100][100], block C[][55],int exit[]){ int k,j,m,i =2, rownm; while(i<6 && exit[i]!=0){ if(i==2) { rownm = 15; j=0; } if(i==3) { rownm = 15; j=5; } if(i==4) { rownm = 16; j=0; } if(i==5){ rownm = 16; j=5; } for(k=0;k<35;){ for( m = k; m < k + (int)P[ seat[rownm][j] ].diameter ; ++m){ C[i-2][m].passid = seat[rownm][j]; } seat[rownm][j] = 0; //C[i-2][p[seat[rownm][j]].diameter-1].passid = -1*seat[rownm][j]; if(i==2 || i==4){ ++j; } else if(i==3 || i==5){ --j; } k=k+17; } ++i; } } */ __global__ void movement_to_exit(block A[],block B[4][55],block C[4][55],Passenger P[] ,int seat[][100],int d_exit[],int numPass) //runs for each Passenger and make his movmenent according to the positions { // Now we have to map the thread id with the passennger id int i=threadIdx.x,k; int range[2]; int j,count=0; int tex,ex,dir; // int exit[]={1,1,0,0,1,1,0,0}; if(i < numPass) { switch(P[i].status) { //printf("Hello\n" ); case 0: //the Passenger is in his seat aisle (x=row number y = (1-6)column in seat ) if(P[i].y == 2 || P[i].y==3) { get_Aisle_Range(range, P[i].x , 30); // range[0]=0; // range[1]=50; count=0; for(j=range[0];j<range[1];++j) { if(A[j].passid == -1) { count++; if(count >= P[i].diameter) break; } else count=0; } if(j<range[1]+1) { for(k=j;k>=j-P[i].diameter;--k) A[k].passid=i; seat[P[i].x][P[i].y]=-1; P[i].x=k; P[i].y=j; P[i].status=1; P[i].res=0; } } else { if(P[i].y < 2) { if(seat[P[i].x][P[i].y+1]==-1) { if(P[i].res==60) { P[i].y++; seat[P[i].x][P[i].y-1]=-1; seat[P[i].x][P[i].y] = i;// P[i].res=0; } else P[i].res++; } } else { if(seat[P[i].x][P[i].y-1]==-1) { if(P[i].res==60) { P[i].y--; seat[P[i].x][P[i].y+1]=-1; seat[P[i].x][P[i].y] = i;// P[i].res=0; } else P[i].res++; } } } break; //done till here ;D //comment starts here: case 1: // the Passenger is in aisle and here the x value that tell the starting of the Passenger // y is the ending point of the Passenger // Select the exit and try to move towards the aisle point of that exit ex = select_exit(P[i], d_exit); // Create an exit array that contain 0 if the exit is not open and 1 if it is open dir= get_direction(P[i],A,ex); P[i].ans=ex; P[i].dir=dir; if(dir == 1) { //move up if(A[P[i].x-2].passid == -1) { if(P[i].speed==1.0f) { if(P[i].res==2) { P[i].x-=2; P[i].y-=2; A[P[i].x].passid = i; A[P[i].x+1].passid = i; A[P[i].y+1].passid = -1; A[P[i].y+2].passid = -1; P[i].res=0; } else P[i].res++; } else { if(P[i].speed==1.5f && A[P[i].x-3].passid == -1) { if(P[i].res==2) { P[i].x-=3; P[i].y-=3; A[P[i].x+1].passid = i; A[P[i].x+2].passid = i; A[P[i].x].passid = i; A[P[i].y+1].passid = -1; A[P[i].y+2].passid = -1; A[P[i].y+3].passid = -1; P[i].res=0; } else P[i].res++; } else P[i].res=0; } } else P[i].res=0; //else dont move } else { if(dir==-1) { //move down if(A[P[i].y+2].passid == -1) { if(P[i].speed==1.0f) { if(P[i].res==2) { P[i].x+=2; P[i].y+=2; A[P[i].y].passid = i; A[P[i].y-1].passid = i; A[P[i].x-1].passid = -1; A[P[i].x-2].passid = -1; P[i].res=0; } else P[i].res++; } else { if(A[P[i].y+3].passid == -1) { if(P[i].res==2) { P[i].x+=3; P[i].y+=3; A[P[i].y].passid = i; A[P[i].y-2].passid = i; A[P[i].y-1].passid = i; A[P[i].x-1].passid = -1; A[P[i].x-2].passid = -1; A[P[i].x-3].passid = -1; P[i].res=0; } else P[i].res++; } else P[i].res=0; } } else P[i].res=0; } else { P[i].res=0; //stay and jump to B or C if(ex==0||ex==1||ex==6||ex==7) { //Going to B tex=ex; if(ex==6||ex==7) tex=ex-5; // Going to B[tex] for(j=50;B[tex][j].passid==-1&&j> 50-P[i].diameter ;--j); if(50 - j == P[i].diameter) { for(k=P[i].x;k<=P[i].y;++k) A[k].passid=-1; P[i].x = tex; P[i].y = j; for(;j<=50;++j) B[tex][j].passid=i; P[i].status=2; } } else { P[i].res=0; tex=ex-2; // Going to B[tex] for(j=50;C[tex][j].passid==-1&&j>50-P[i].diameter;--j); if(50 - j == P[i].diameter) { for(k=P[i].x;k<=P[i].y;++k) A[k].passid=-1; P[i].x = tex; P[i].y = j; for(;j<=50;++j) C[tex][j].passid=i; P[i].status=3; } } } } break; case 2: // the Passenger is in midle of the exit front and end exit aisles i.e seat exit y represent the position in the aisle and // x represent which aisle 1 / 2 / 3 / 4 if(P[i].y <= 0||(P[i].speed==1.0f && P[i].y-2 <= 0)||(P[i].speed==1.5f && P[i].y-3 <= 0) ) { for(j=0;j<10; ++j) { if(B[P[i].x][j].passid ==i) B[P[i].x][j].passid=-1; } P[i].status = 4; //Passenger is out of the plane } else { if(P[i].speed == 1.0f) { if(B[P[i].x][P[i].y-2].passid ==-1) { //move closer to the exit P[i].y-=2; B[P[i].x][P[i].y].passid = i; B[P[i].x][P[i].y + 1].passid = i; B[P[i].x][(P[i].y + (int)P[i].diameter + 1)].passid =-1; B[P[i].x][(P[i].y + (int)P[i].diameter + 2)].passid =-1; } } else { if(B[P[i].x][P[i].y -3 ].passid ==-1) { //move closer to the exit P[i].y-=3; B[P[i].x][P[i].y].passid = i; B[P[i].x][P[i].y + 2].passid = i; B[P[i].x][P[i].y + 1].passid = i; B[P[i].x][P[i].y + (int)P[i].diameter + 1].passid =-1; B[P[i].x][P[i].y + (int)P[i].diameter + 2].passid =-1; B[P[i].x][P[i].y + (int)P[i].diameter + 3].passid =-1; } } } break; case 3: // the Passenger is in midle of the midle exit aisles i.e seat exit y represent the position in the aisle and // x represent which aisle 1 / 2 / 3 / 4 if(P[i].y <= 0||(P[i].speed==1.0f && P[i].y-2 <= 0)||(P[i].speed==1.5f && P[i].y-3 <= 0) ) { for(j=0;j<10; ++j) { if(C[P[i].x][j].passid ==i) C[P[i].x][j].passid=-1; } P[i].status = 4; //Passenger is out of the plane } else { if(P[i].speed == 1.0f) { if(C[P[i].x][P[i].y-2].passid ==-1) { //move closer to the exit if(P[i].res==1) { P[i].y-=2; C[P[i].x][P[i].y ].passid = i; C[P[i].x][P[i].y + 1].passid = i; C[P[i].x][P[i].y + (int)P[i].diameter + 1].passid =-1; C[P[i].x][P[i].y + (int)P[i].diameter + 2].passid =-1; } else P[i].res++; } else P[i].res=0; } else { if(P[i].speed == 1.5f&&C[P[i].x][P[i].y -3].passid ==-1) { //move closer to the exit if(P[i].res==1) { P[i].y-=3; C[P[i].x][P[i].y ].passid = i; C[P[i].x][P[i].y + 2].passid = i; C[P[i].x][P[i].y + 1].passid = i; C[P[i].x][P[i].y + (int)P[i].diameter + 1].passid =-1; C[P[i].x][P[i].y + (int)P[i].diameter + 2].passid =-1; C[P[i].x][P[i].y + (int)P[i].diameter + 3].passid =-1; } else P[i].res++; } else P[i].res=0; } } break; case 4: P[i].x=-1; P[i].y=-1; }; } } void map_Passenger_to_exit(Passenger P[],int seat[][100], block C[][55],block B[][55],int h_exit[]) { int i,k,j,l; for(l=2;l<6;++l) { if(h_exit[l]==1)//the middle exit 2,3,4,5 { if(l%2==0) { for(i=0;i<3;i++) { k=seat[15+(l-2)/2][i]; if(k!=-1) { for(j=0;j<P[k].diameter;++j) C[l-2][i*17 + j].passid=k; P[k].x=l-2; P[k].status = 3; P[k].y= i*17; P[k].res=0; } } } else { for(i=3;i<6;i++) { k=seat[15+(l-2)/2][i]; if(k!=-1) { for(j=0;j<P[k].diameter;++j) C[l-2][(5-i)*17 + j].passid=k; P[k].x=l-2; P[k].status = 3; P[k].y= (5-i)*17; P[k].res=0; } } } } } } //Main int main() { srand(time((0))); //Aircraft* air= input(); int numPass,i,j; int count1=0; scanf("%d",&numPass); Passenger *h_P =(Passenger *)malloc(sizeof(Passenger)*numPass); Passenger *P; pass_input(h_P,numPass); char aircraftName[30]; scanf("%s",aircraftName); Aircraft A; aircraftInput(A,aircraftName); // Seating Arrangement Assigning Each Passenger location to sit randomely // Think something to make sure the random function does not send it to infinite loop int h_seat[A->row][A->column]; // initialise exact array (compile with check bound) check bound int (*seat)[100]; for(i=0;i<A->row;++i) { for(j=0;j<A->column;++j) h_seat[i][j]=-1; } //all seats are vacant right now //TODO start id with 1 int r_row,r_col; for(i=0;i<numPass;++i) { r_row=rand()%30; //should be defined in the header file TODO r_col=rand()%6; //should be defined in the header file TODO if(h_seat[r_row][r_col]==-1) { h_P[i].x=r_row; h_P[i].y=r_col; if(r_row<0||r_col<0) { printf("Olala\n"); return 0; } else h_seat[r_row][r_col]=i; printf("%d %d\n",r_row,r_col); } else { i--; } } // for(i=0;i<30;++i) //{ // for(j=0;j<10;++j) // printf("%d ",h_seat[i][j]+1); //printf("\n"); // } // Now each row is occupied by some Passengers // Each Passenger is sitting in a row and each row is having corresponding aisle array portion in front of it. // The Passenger can move to the aisle A[] in front of its row if it is unoccupied printf("Seating Done\n"); int aisleLength = ((A->row - 2) * 30 ) + 100); block h_A[aisleLength]; //length of aisle should be general block* A; // A is the aisle //Each Element of the for(j=0;j<aisleLength;++j) { h_A[j].passid=-1; h_A[j].exit=0; } // the aisle is empty right now //Now there are 4 Normal Gate Exits and //Exit Paths are of 2 types 1 end and other in the middle each one will have different speeds block h_B[4][55]; // Nornal Exit Paths 2 on each ends of the plane block h_C[4][55]; // Seat Exit 2 in the middle of the plane block (*B)[55]; // Nornal Exit Paths 2 on each ends of the plane block (*C)[55]; // Seat Exit 2 in the middle of the plane for(i=0;i<4;++i) { for(j=0;j<55;++j) { h_B[i][j].passid=-1; h_C[i][j].passid=-1; h_B[i][j].exit=0; h_C[i][j].exit=0; } } // set up the exits all the B exits are empty // C exits or the middle exits are occupied by people //int h_exit[6] = {1,1,1,1,1,1}; //exit is 1 for those wxits which are open and 0 for those which are close //block A[],block B[4][55],block C[4][55],Passenger P[] ,int seat[][100],int numPass //cudaMalloc((void **) &array1_d , WIDTH*WIDTH*sizeof (int) ) ; // Here the game starts //Emergency! Emergency! Emergency! Run all of you Out of the plane printf("Enter 1 if the exit is open and 0 if the exit is close for all the 8 exits"); int h_exit[8]; int *d_exit; for(i=0;i<8;++i) scanf("%d",&h_exit[i]); //__global__ void movement_to_exit(block A[],block B[4][55],block C[4][55],Passenger P[] ,int seat[][100],int numPass) //runs for each Passenger and make his movmenent according to the positions int numout=0,numprev=0; j=0; //change the variable name to some specific time var cudaMalloc((void **) &P , numPass*sizeof (Passenger) ) ; cudaMalloc((void **) &B , (55*4)*sizeof (block) ) ; cudaMalloc((void **) &C , (55*4)*sizeof (block) ) ; cudaMalloc((void **) &A , 1000*sizeof (block) ) ; cudaMalloc((void **) &seat , (100*100)*sizeof (int) ) ; cudaMalloc((void **) &d_exit , (8)*sizeof (int) ) ; // select exit map_Passenger_to_exit(h_P,h_seat, h_C,h_B,h_exit); for(j=0;j<4;++j) { for(i=0;i<55;++i) { printf("%d",h_C[j][i]); } printf("\n"); } count1=0; int filecounter = 0; while(numout<numPass) { ++filecounter; // if(j==1000) // break; numprev=numout; numout=0; /* for(i=0;i<30;++i) { for(j=0;j<10;++j) printf("%d ",h_seat[i][j]+1); printf("\n"); } */ /* for(i=0;i<numPass;++i) { if(P[i].status!=4) printf("Passengr %d : (%d,%d) : %d : ans : %d : dir : %d\n",i,h_P[i].x,h_P[i].y,h_P[i].status,h_P[i].ans,h_P[i].dir); } */ //printf("Passengr %d : (%d,%d) : %d : ans : %d : dir : %d\n",3,h_P[3].x,h_P[3].y,h_P[3].status,h_P[3].ans,h_P[3].dir); cudaMemcpy ( P , h_P , numPass*sizeof (Passenger) , cudaMemcpyHostToDevice); cudaMemcpy ( seat , h_seat , 100*100*sizeof (int) , cudaMemcpyHostToDevice); cudaMemcpy ( C , h_C , 4*55*sizeof (block) , cudaMemcpyHostToDevice); cudaMemcpy ( B , h_B , 4*55*sizeof (block) , cudaMemcpyHostToDevice); cudaMemcpy ( A , h_A , 1000*sizeof (block) , cudaMemcpyHostToDevice); cudaMemcpy ( d_exit , h_exit , 8*sizeof (int) , cudaMemcpyHostToDevice); movement_to_exit<<< 1,numPass >>>(A,B,C,P,seat,d_exit,numPass); cudaError_t err1 = cudaPeekAtLastError(); cudaDeviceSynchronize(); //printf( "Got CUDA error ... %s \n", cudaGetErrorString(err1)); cudaMemcpy ( h_P , P , numPass*sizeof (Passenger) , cudaMemcpyDeviceToHost); cudaMemcpy ( h_seat , seat , 100*100*sizeof (int) , cudaMemcpyDeviceToHost); cudaMemcpy ( h_C , C , 4*55*sizeof (block) , cudaMemcpyDeviceToHost); cudaMemcpy ( h_B , B , 4*55*sizeof (block) , cudaMemcpyDeviceToHost); cudaMemcpy ( h_A , A , 1000*sizeof (block) , cudaMemcpyDeviceToHost); // creating file FILE *fp; char filename[] = "output"; char str[100]; sprintf(str, "%d", filecounter); strcat(filename,str); strcat(filename,".txt"); fp = fopen(filename, "w"); int global[1000][150]; for (int i = 0; i <1000; ++i) { for (int j = 0; j < 150; ++j) { fprintf(fp, "%d", global[i][j]); } fprintf(fp, "\n", ); } fprintf(fp, "This is testing...\n"); fclose(fp); //file creation complete for(i=0;i<numPass;i++) { if(h_P[i].status == 4) numout++; // printf("%d\n",h_P[i].status); } if(numprev==numout) { printf("*"); count1++; } else { count1=0; printf("%d %d\n",numout,j); } if(numout==numPass) break; //printf("%d %d\n",numout,j); if(count1>100) { for(i=0;i<numPass;i++) { if(h_P[i].status!=4) { printf("Passengr %d : (%d,%d) : %d : ans : %d : dir : %d\n",i,h_P[i].x,h_P[i].y,h_P[i].status,h_P[i].ans,h_P[i].dir); } } // break; } // printf("%d\t %d\n",numout,j); j++; } float timeSteps = 40.6; printf("%f\n",j*timeSteps + 7000.0); // printf("%f\n",j*timeSteps); return 0; } int random(int min,int max); void pass_input(Passenger *P,int n) { int i;//r; srand(time(0)); Passenger *tp=P; for(i=0;i<n;++i,tp++) { //r=rand(); // printf("%d\n",r); tp->id=i; // tp->x= // tp->sex=random(0,1); // Male or female(random 0-1) tp->status = 0; tp->Mtime=tp->sex?random(875,1750):random(920,1950); tp->Wtime=50; tp->Rtime=random(400,700); // Random (500-1000)ms tp->fear=-1; //fear value 0 tp->agility=-1; // agility value tp->diameter=random(9,15);//(Random ) // diameter occupied by passenger tp->totaltime=0; //total time to evacuate tp->totalDist=0; //total distance to exit if(tp->sex==0){ float t = (float)random(10,15); tp->speed=t/10; //Random (1-1.5 ) speed of passenger } else{ float t = (float)random(9,12); tp->speed=t/10; } tp->grpstatus=-1; // Not in this paper in group or not tp->timeSteps=178; // minimum unit of time = 178 miliseconds tp->res=0; printf("id : %d , sex : %d , Mtime : %d, Rtime : %d\n",tp->id,tp->sex,tp->Mtime,tp->Rtime); } } Aircraft aircraftInput(Aircraft *A,char name[]){ char filename1[100]; strcat(filename1,".txt"); FILE *fp; char buff[255]; int res[10]; int k =0; fp = fopen(filename1, "r"); while(fgets(buff, 80, fp) != NULL) { //fscanf(fp, "%s", buff); //printf("%s\n", buff ); int result = atoi(buff); //printf("%d\n", result); res[k] = result; ++k; } fclose(fp); A->row = res[0]; A->column = res[1]; A->numOfExitPassage = res[2]; A->maxNumPassenger = res[3]; } void createGlobalMatrix(int global[1000][150], block h_seat, block h_A, block h_B, block h_C){ //main exit 1 int k =0, i=0; for(k =0;k<50;++k){ for (i = 0; i < 55; ++i) { if(h_B[0][i].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } } int temp = i; if(h_A[k].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } for (i = 54; i >=0; --i) { if(h_B[1][i].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } } } // 1-14 seats int p = 0; for (i = 50; i < (14*30)+50; ++i) { int u=0; for (int j = 0; j < 3; ++j) { if (h_seat[p][j] == -1) { for (; u < 18*(j+1); ++u) { global[i][u] = 0; } } else{ for (; u < 18*(j+1); ++u) { global[i][u] = 1; } } } global[i][u] = h_A[i]; int u=0; for (int j = 3; j < 6; ++j) { if (h_seat[p][j] == -1) { for (; u < 18*(j+1); ++u) { global[i][u] = 0; } } else{ for (; u < 18*(j+1); ++u) { global[i][u] = 1; } } } if ((i+1-50)%30 == 0) { ++p; } } //middle exits for(k =i;k<i+50;++k){ int i =0; for (j = 0; j < 55; ++j) { if(h_C[0][j].passid == -1){ global[k][j] = 0; } else{ global[k][j] = 1; } } int temp = i; if(h_A[k].passid == -1){ global[k][j] = 0; } else{ global[k][j] = 1; } for (j = 54; j >=0; --j) { if(h_C[1][j].passid == -1){ global[k][j+1] = 0; } else{ global[k][j+1] = 1; } } } // middle exits correct the second loop for j.. j is not in the sync for(;k<i+100;++k){ int i =0; for (j = 0; j < 55; ++j) { if(h_C[2][j].passid == -1){ global[k][j] = 0; } else{ global[k][j] = 1; } } int temp = i; if(h_A[k].passid == -1){ global[k][j] = 0; } else{ global[k][j] = 1; } for (j = 54; j >=0; --j) { if(h_C[3][j].passid == -1){ global[k][j+1] = 0; } else{ global[k][j+1] = 1; } } } // 17 - 30 seats for (i = k; i < (28*30)+150; ++i) { int u=0; for (int j = 0; j < 3; ++j) { if (h_seat[p][j] == -1) { for (; u < 18*(j+1); ++u) { global[i][u] = 0; } } else{ for (; u < 18*(j+1); ++u) { global[i][u] = 1; } } } global[i][u] = h_A[i]; int u=0; for (int j = 3; j < 6; ++j) { if (h_seat[p][j] == -1) { for (; u < 18*(j+1); ++u) { global[i][u] = 0; } } else{ for (; u < 18*(j+1); ++u) { global[i][u] = 1; } } } if ((i+1-150)%30 == 0) { ++p; } } //end exits int q = i+50; for(k =i;k<q;++k){ int i =0; for (i = 0; i < 55; ++i) { if(h_B[2][i].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } } int temp = i; if(h_A[k].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } for (i = 54; i >=0; --i) { if(h_B[3][i].passid == -1){ global[k][i] = 0; } else{ global[k][i] = 1; } } } } int random(int min,int max) { int r = rand(); r=r%(max-min+1); r=r+min; return r; }
95c84d62a2e010622df134d6471e562c6f3c6d3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <exceptions/cuda_exception.h> #include <rocblas.h> #include "../MmulHelper.h" #include <specials_cuda.h> #include <ShapeUtils.h> #include <PointersManager.h> #include <numeric> namespace nd4j { ////////////////////////////////////////////////////////////////////////////// // MXK x KxN = MxN -> actual sequence of axes doesn't matter template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemm(const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) { const T1* A = reinterpret_cast<const T1*>(vA); const T2* B = reinterpret_cast<const T2*>(vB); T3* C = reinterpret_cast< T3*>(vC); __shared__ int K; __shared__ bool betaPresent; __shared__ Nd4jLong cLen, totalThreads, *coords; __shared__ T3 alphaZ, betaZ; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; coords = reinterpret_cast<Nd4jLong*>(shmem); cLen = shape::length(cShapeInfo); K = shape::shapeOf(const_cast<Nd4jLong*>(aShapeInfo))[aKaxis]; betaPresent = beta; totalThreads = gridDim.x * blockDim.x; alphaZ = alpha; betaZ = beta; } __syncthreads(); auto aCoords = coords + threadIdx.x * 6; // 6 = (aRank + bRank + cRank) auto bCoords = aCoords + 2; auto cCoords = bCoords + 2; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < cLen; i += totalThreads) { // evaluate C coordinates shape::index2coords(i, cShapeInfo, cCoords); // evaluate A coordinates aCoords[aMaxis] = cCoords[cMaxis]; aCoords[aKaxis] = 0; // evaluate B coordinates bCoords[bKaxis] = 0; bCoords[bNaxis] = cCoords[cNaxis]; auto aOffset = shape::getOffset(aShapeInfo, aCoords); auto bOffset = shape::getOffset(bShapeInfo, bCoords); T3 val = A[aOffset] * B[bOffset]; // first iteration for (uint j = 1; j < K; ++j) { // rest iterations aOffset += shape::stride(aShapeInfo)[aKaxis]; bOffset += shape::stride(bShapeInfo)[bKaxis]; val = val + A[aOffset] * B[bOffset]; } auto cOffset = shape::getOffset(cShapeInfo, cCoords); if(betaPresent) C[cOffset] = alphaZ * val + betaZ * C[cOffset]; else C[cOffset] = alphaZ * val; } } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, hipStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) { hipLaunchKernelGGL(( usualCudaGemm<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta); } //////////////////////////////////////////////////////////////////////// // MXN x N = M -> actual sequence of {M,N} axes doesn't matter template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemv(const void* vA, const Nd4jLong* aShapeInfo, const void* vX, const Nd4jLong* xShapeInfo, void* vY, const Nd4jLong* yShapeInfo, const int incx, const int incy, const int aMaxis, const double alpha, const double beta) { const T1* A = reinterpret_cast<const T1*>(vA); const T2* X = reinterpret_cast<const T2*>(vX); T3* Y = reinterpret_cast< T3*>(vY); __shared__ int M, N; __shared__ bool betaPresent; __shared__ Nd4jLong cLen, totalThreads, aNstride, aMstride; __shared__ T3 alphaZ, betaZ; if (threadIdx.x == 0) { N = shape::length(xShapeInfo); M = shape::length(yShapeInfo); aMstride = shape::stride(aShapeInfo)[aMaxis]; aNstride = shape::stride(aShapeInfo)[aMaxis == 0 ? 1 : 0]; totalThreads = gridDim.x * blockDim.x; betaPresent = beta; alphaZ = alpha; betaZ = beta; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < M; i += totalThreads) { // evaluate offsets auto aOffset = i * aMstride; auto xOffset = 0; T3 val = A[aOffset] * X[xOffset]; // first iteration for (uint j = 1; j < N; ++j) { // rest iterations aOffset += aNstride; xOffset += incx; val = val + A[aOffset] * X[xOffset]; } auto yOffset = i * incy; if(betaPresent) Y[yOffset] = alphaZ * val + betaZ * Y[yOffset]; else Y[yOffset] = alphaZ * val; } } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemv(const int blocksPerGrid, const int threadsPerBlock, hipStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vX, const Nd4jLong* xShapeInfo, void* vY, const Nd4jLong* yShapeInfo, const int incx, const int incy, const int aMaxis, const double alpha, const double beta) { hipLaunchKernelGGL(( usualCudaGemv<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), 512, *stream, vA, aShapeInfo, vX, xShapeInfo, vY, yShapeInfo, incx, incy, aMaxis, alpha, beta); } ////////////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> static __global__ void usualCudaDot(const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) { T1* X = reinterpret_cast<T1*>(const_cast<void*>(vX)); T2* Y = reinterpret_cast<T2*>(const_cast<void*>(vY)); T3* Z = reinterpret_cast<T3*>(vZ); extern __shared__ unsigned char shmem[]; auto pairwiseMul = reinterpret_cast<T3*>(shmem); const int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < length) pairwiseMul[tid] = X[tid * incx] * Y[tid * incy]; __syncthreads(); if(tid == 0) { T3 sum = 0; for(Nd4jLong i = 0; i < length; ++i) sum = sum + pairwiseMul[i]; if(beta) *Z = (T3)alpha * sum + (T3)beta * *Z; else *Z = (T3)alpha * sum; } } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualDot(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) { hipLaunchKernelGGL(( usualCudaDot<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), length*sizeof(T3) + 128, *stream, length, alpha, vX, incx, vY, incy, beta, vZ); } ////////////////////////////////////////////////////////////////////////////// // MXK x KxN = MxN NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, double alpha, double beta, const char outOrder) { if(A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of A array is not equal 2 !"); if(B->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of B array is not equal 2 !"); const auto M = A->sizeAt(0); const auto K = A->sizeAt(1); const auto N = B->sizeAt(1); if(C != nullptr && C->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of C array is not equal 2 !"); if(B->sizeAt(0) != K) throw std::runtime_error("MmulHelper::mmulMxM cuda: B array has wrong number of rows !"); if(C != nullptr && C->sizeAt(0) != M) throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of rows !"); if(C != nullptr && C->sizeAt(1) != N) throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of columns !"); if(C == nullptr) C = new NDArray(outOrder, {M,N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext()); const int major = Environment::getInstance()->capabilities()[AffinityManager::currentDeviceId()].first(); const auto aType = A->dataType(); const auto bType = B->dataType(); const auto cType = C->dataType(); const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC); const bool typeDouble = ABC && aType == DataType::DOUBLE; const bool typeFloat = ABC && aType == DataType::FLOAT32; const bool typeHalf = ABC && aType == DataType::HALF && major >= 6; const bool typeIntFloat = AB && aType == DataType::INT8 && cType == DataType::FLOAT32 && major >= 6; const bool typeHalfFloat = AB && aType == DataType::HALF && cType == DataType::FLOAT32 && major >= 6; auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle()); auto stream = A->getContext()->getCudaStream(); auto status = hipblasSetStream(*handle, *stream); if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status); if(!typeDouble && !typeFloat && !typeHalf && !typeIntFloat && !typeHalfFloat) { const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * 6 + 128; // 6 = aRank + bRank + cRank NDArray::prepareSpecialUse({C}, {A, B}); // BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (blocksPerGrid, threadsPerBlock, sharedMem, stream, A->getSpecialBuffer(), A->getSpecialShapeInfo(), B->getSpecialBuffer(), B->getSpecialShapeInfo(), C->getSpecialBuffer(), C->getSpecialShapeInfo(), 0, 1, 0, 1, 0, 1, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm, (blocksPerGrid, threadsPerBlock, sharedMem, stream, A->getSpecialBuffer(), A->getSpecialShapeInfo(), B->getSpecialBuffer(), B->getSpecialShapeInfo(), C->getSpecialBuffer(), C->getSpecialShapeInfo(), 0, 1, 0, 1, 0, 1, alpha, beta), NUMERIC_TYPES) NDArray::registerSpecialUse({C}, {A, B}); auto cudaResult = hipStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult); } else { std::vector<NDArray*> toDelete; NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C)); bool aMcont = M == 1 || A->strideAt(0) == 1; bool aKcont = K == 1 || A->strideAt(1) == 1; bool bKcont = K == 1 || B->strideAt(0) == 1; bool bNcont = N == 1 || B->strideAt(1) == 1; bool cMcont = M == 1 || C->strideAt(0) == 1; bool cNcont = N == 1 || C->strideAt(1) == 1; if(!aMcont && !aKcont) { pA = A->dup('f'); toDelete.push_back(pA); aMcont = true; } if(!bKcont && !bNcont) { pB = B->dup('f'); toDelete.push_back(pB); bKcont = true; } if(!cMcont) { pC = C->dup('f'); toDelete.push_back(pC); cMcont = true; } const bool transA = !aMcont; const bool transB = !bKcont; const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1); const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(0) : pB->strideAt(1); const int ldc = (cMcont && cNcont) ? M : pC->strideAt(1); const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; const hipblasOperation_t transBblas = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N; NDArray::prepareSpecialUse({pC}, {pA, pB}); // choose appropriate cuda gemm api depending on data types if(typeDouble) { status = hipblasDgemm(*handle, transAblas, transBblas, M, N, K, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)pB->getSpecialBuffer(), ldb, &beta, (double*)pC->getSpecialBuffer(), ldc); } else if(typeFloat) { float alphaF(alpha), betaF(beta); status = hipblasSgemm(*handle, transAblas, transBblas, M, N, K, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)pB->getSpecialBuffer(), ldb, &betaF, (float*)pC->getSpecialBuffer(), ldc); } else if(typeHalf) { float16 alphaH(alpha), betaH(beta); status = hipblasHgemm(*handle, transAblas, transBblas, M, N, K, &alphaH.data, (__half*)pA->getSpecialBuffer(), lda, (__half*)pB->getSpecialBuffer(), ldb, &betaH.data, (__half*)pC->getSpecialBuffer(), ldc); } else if(typeIntFloat) { float alphaF(alpha), betaF(beta); status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), HIP_R_8I, lda, pB->getSpecialBuffer(), HIP_R_8I, ldb, &betaF, pC->getSpecialBuffer(), HIP_R_32F, ldc); } else if(typeHalfFloat) { float alphaF(alpha), betaF(beta); status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), HIP_R_16F, lda, pB->getSpecialBuffer(), HIP_R_16F, ldb, &betaF, pC->getSpecialBuffer(), HIP_R_32F, ldc); } if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status); NDArray::registerSpecialUse({pC}, {pA, pB}); auto cudaResult = hipStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult); if(C != pC) C->assign(pC); for(int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; } return C; } //////////////////////////////////////////////////////////////////////////// // MXN x N = M NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, nd4j::NDArray* Y, const double alpha, const double beta, const char outOrder) { int xLenDim, yLenDim(0); if(A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxV cuda: rank of A array is not equal 2 !"); if(!shape::isCommonVector(X->getShapeInfo(), xLenDim)) throw std::runtime_error("MmulHelper::mmulMxV cuda: X array must be vector !"); const auto M = A->sizeAt(0); const auto N = A->sizeAt(1); if(Y != nullptr && !shape::isCommonVector(Y->getShapeInfo(), yLenDim)) throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array must be vector !"); if(X->lengthOf() != N) throw std::runtime_error("MmulHelper::mmulMxV cuda: X vector has wrong length !"); if(Y != nullptr && Y->lengthOf() != M) throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array has wrong length !"); if(Y == nullptr) Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()), A->getContext()); const int incx = X->strideAt(xLenDim); const int incy = Y->strideAt(yLenDim); const auto aType = A->dataType(); const auto xType = X->dataType(); const auto yType = Y->dataType(); const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY); const bool typeDouble = AXY && aType == DataType::DOUBLE; const bool typeFloat = AXY && aType == DataType::FLOAT32; auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle()); auto stream = A->getContext()->getCudaStream(); auto status = hipblasSetStream(*handle, *stream); if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status); if(!typeDouble && !typeFloat) { const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (M + threadsPerBlock - 1) / threadsPerBlock; NDArray::prepareSpecialUse({Y}, {A, X}); // BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, A->getSpecialBuffer(), A->getSpecialShapeInfo(), X->getSpecialBuffer(), X->getSpecialShapeInfo(), Y->getSpecialBuffer(), Y->getSpecialShapeInfo(), incx, incy, 0, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(xType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, A->getSpecialBuffer(), A->getSpecialShapeInfo(), X->getSpecialBuffer(), X->getSpecialShapeInfo(), Y->getSpecialBuffer(), Y->getSpecialShapeInfo(), incx, incy, 0, alpha, beta), NUMERIC_TYPES) NDArray::registerSpecialUse({Y}, {A, X}); auto cudaResult = hipStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult); } else { NDArray *pA(const_cast<NDArray*>(A)); bool aMcont = M == 1 || A->strideAt(0) == 1; bool aNcont = N == 1 || A->strideAt(1) == 1; if(!aMcont && !aNcont) { pA = A->dup('f'); aMcont = true; } const bool transA = !aMcont; const int lda = (aMcont && aNcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1); const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; NDArray::prepareSpecialUse({Y}, {pA, X}); // choose appropriate cuda gemm api depending on data types if(typeDouble) { status = hipblasDgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)X->getSpecialBuffer(), incx, &beta, (double*)Y->getSpecialBuffer(), incy); } else if(typeFloat) { float alphaF(alpha), betaF(beta); status = hipblasSgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)X->getSpecialBuffer(), incx, &betaF, (float*)Y->getSpecialBuffer(), incy); } if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status); auto cudaResult = hipStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult); NDArray::registerSpecialUse({Y}, {pA, X}); if(pA != A) delete pA; } return Y; } //////////////////////////////////////////////////////////////////////////// // (X * Y) = Z[0] NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, nd4j::NDArray* Z, const double alpha, const double beta) { int xLenDim(0), yLenDim(0); if(!shape::isCommonVector(X->getShapeInfo(), xLenDim)) throw std::runtime_error("MmulHelper::dot cuda: X array must be vector !"); if(!shape::isCommonVector(Y->getShapeInfo(), yLenDim)) throw std::runtime_error("MmulHelper::dot cuda: Y array must be vector !"); if(Z != nullptr && !Z->isScalar()) throw std::runtime_error("MmulHelper::dot cuda: Z array must be scalar !"); const auto length = X->lengthOf(); if(Y->lengthOf() != length) throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !"); if(Z == nullptr) Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext()); const Nd4jLong incx = X->strideAt(xLenDim); const Nd4jLong incy = Y->strideAt(yLenDim); const auto xType = X->dataType(); const auto yType = Y->dataType(); const auto zType = Z->dataType(); if(!X->isActualOnDeviceSide()) X->syncToDevice(); if(!Y->isActualOnDeviceSide()) Y->syncToDevice(); if(!Z->isActualOnDeviceSide()) Z->syncToDevice(); hipStream_t* stream = X->getContext()->getCudaStream(); dim3 threadsPerBlock(512); dim3 blocksPerGrid(1); if (length > 512) threadsPerBlock.x = math::nd4j_ceil<double, int>(static_cast<double>(length) / 512); NDArray::prepareSpecialUse({Z}, {X, Y}); //BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), NUMERIC_TYPES) auto cudaResult = hipStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::dot cuda failed !", cudaResult); NDArray::registerSpecialUse({Z}, {X, Y}); return Z; } ////////////////////////////////////////////////////////////////////////////// // [bS,M,K] x [bS,K,N] = [bS,M,N] // [bS,M,K] x [K,N] = [bS,M,N] // [M,K] x [bS,K,N] = [bS,M,N] // bS could stand for several axes template <typename T1, typename T2, typename T3> static __global__ void batchedCudaGemm(const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int* aBatchDims, const int* bBatchDims, const int* cBatchDims, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) { const T1* A = reinterpret_cast<const T1*>(vA); const T2* B = reinterpret_cast<const T2*>(vB); T3* C = reinterpret_cast< T3*>(vC); __shared__ bool betaPresent; __shared__ int aRank, bRank, cRank, K; __shared__ Nd4jLong cLen, totalThreads, *coords; __shared__ T3 alphaZ, betaZ; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; coords = reinterpret_cast<Nd4jLong*>(shmem); cLen = shape::length(cShapeInfo); K = shape::shapeOf(const_cast<Nd4jLong*>(aShapeInfo))[aKaxis]; totalThreads = gridDim.x * blockDim.x; aRank = shape::rank(aShapeInfo); bRank = shape::rank(bShapeInfo); cRank = shape::rank(cShapeInfo); betaPresent = beta; alphaZ = alpha; betaZ = beta; } __syncthreads(); auto aCoords = coords + threadIdx.x * (aRank + bRank + cRank); auto bCoords = aCoords + aRank; auto cCoords = bCoords + bRank; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < cLen; i += totalThreads) { // evaluate C coordinates shape::index2coords(i, cShapeInfo, cCoords); // calculate index of current batch Nd4jLong batchInd; if(cBatchDims != nullptr) batchInd = shape::coords2index(cShapeInfo, cCoords, cRank - 2, cBatchDims); // evaluate A coordinates if(aBatchDims != nullptr) shape::index2coords(batchInd, aShapeInfo, aCoords, aRank - 2, aBatchDims); aCoords[aMaxis] = cCoords[cMaxis]; aCoords[aKaxis] = 0; // evaluate B coordinates if(bBatchDims != nullptr) shape::index2coords(batchInd, bShapeInfo, bCoords, bRank - 2, bBatchDims); bCoords[bKaxis] = 0; bCoords[bNaxis] = cCoords[cNaxis]; auto aOffset = shape::getOffset(aShapeInfo, aCoords); auto bOffset = shape::getOffset(bShapeInfo, bCoords); T3 val = A[aOffset] * B[bOffset]; // first iteration for (uint j = 1; j < K; ++j) { // rest iterations aOffset += shape::stride(aShapeInfo)[aKaxis]; bOffset += shape::stride(bShapeInfo)[bKaxis]; val = val + A[aOffset] * B[bOffset]; } auto cOffset = shape::getOffset(cShapeInfo, cCoords); if(betaPresent) C[cOffset] = alphaZ * val + betaZ * C[cOffset]; else C[cOffset] = alphaZ * val; } } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void batchedGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, hipStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int* aBatchDims, const int* bBatchDims, const int* cBatchDims, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) { hipLaunchKernelGGL(( batchedCudaGemm<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta); } /////////////////////////////////////////////////////////////////// NDArray* MmulHelper::mmulNxN(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) { const int aRank = A->rankOf(); const int bRank = B->rankOf(); // input ranks validation if(aRank > bRank && bRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !"); else if(bRank > aRank && aRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !"); else if (aRank == bRank ) { for(int i = 0; i < aRank - 2; ++i) if(A->sizeAt(i) != B->sizeAt(i)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); } if(A->sizeAt(-1) != B->sizeAt(-2)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); // validation of C array std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector(); cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2); cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1); if(C != nullptr ) { if(!C->isSameShape(cExpectedShape)) throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !"); } else C = new NDArray(outOrder, cExpectedShape, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext()); const int cRank = C->rankOf(); const int aMaxis(aRank-2), aKaxis(aRank-1), bKaxis(bRank-2), bNaxis(bRank-1), cMaxis(cRank-2), cNaxis(cRank-1); const int threadsPerBlock = MAX_NUM_THREADS / 8; const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * (aRank + bRank + cRank) + 128; PointersManager manager(A->getContext(), "MmulHelper::mmulNxN"); const int *aBatchDims(nullptr), *bBatchDims(nullptr), *cBatchDims(nullptr); if(aRank > 2) aBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(aRank, {aMaxis, aKaxis}).data(), (aRank - 2) * sizeof(int))); if(bRank > 2) bBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(bRank, {bKaxis, bNaxis}).data(), (bRank - 2) * sizeof(int))); if(cRank > 2) cBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(cRank, {cMaxis, cNaxis}).data(), (cRank - 2) * sizeof(int))); NDArray::prepareSpecialUse({C}, {A, B}); // BUILD_TRIPLE_SELECTOR(A->dataType(), b->dataType(), C->dataType(), batchedGemm, (blocksPerGrid, threadsPerBlock, A->getContext()->getCudaStream(), A->getSpecialBuffer(), A->getSpecialShapeInfo(), B->getSpecialBuffer(), B->getSpecialShapeInfo(), C->getSpecialBuffer(), C->getSpecialShapeInfo(), aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(A->dataType(), batchedGemm, (blocksPerGrid, threadsPerBlock, sharedMem, A->getContext()->getCudaStream(), A->getSpecialBuffer(), A->getSpecialShapeInfo(), B->getSpecialBuffer(), B->getSpecialShapeInfo(), C->getSpecialBuffer(), C->getSpecialShapeInfo(), aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta), NUMERIC_TYPES) NDArray::registerSpecialUse({C}, {A, B}); manager.synchronize(); return C; } /* ////////////////////////////////////////////////////////////////////////////// // MXN x N = M template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemv(const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) { T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA)); T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX)); T3* Y = reinterpret_cast<T3*>(vY); __shared__ T3 alphaZ, betaZ; __shared__ Nd4jLong strideArow, strideAcol; const int row = blockIdx.x * blockDim.x + threadIdx.x; if(row == 0) { alphaZ = alpha; betaZ = beta; if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; } } __syncthreads(); T3 val = 0; if (row < M) for (int i = 0; i < N; i++) val = val + A[row * strideArow + i * strideAcol] * X[i * incx]; Y[row * incy] = alphaZ * val + betaZ * Y[row * incy]; } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemv(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) { usualCudaGemv<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, M, N, alpha, vA, lda, vX, incx, beta, vY, incy); } */ /* ////////////////////////////////////////////////////////////////////////////// MXK x KxN = MxN C array must be in f order template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemm(const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) { T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA)); T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB)); T3* C = reinterpret_cast<T3*>(vC); __shared__ T3 alphaZ, betaZ; __shared__ Nd4jLong strideArow, strideAcol, strideBrow, strideBcol; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int col = blockIdx.x * blockDim.x + threadIdx.x; if(row == 0 && col == 0) { alphaZ = alpha; betaZ = beta; if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; } if(transB) { strideBrow = ldb; strideBcol = 1; } else { strideBrow = 1; strideBcol = ldb; } } __syncthreads(); T3 val = 0; if (row < M && col < N) for (int i = 0; i < K; i++) val = val + A[row * strideArow + i * strideAcol] * B[i * strideBrow + col * strideBcol]; C[row + col * ldc] = alphaZ * val + betaZ * C[row + col * ldc]; } ////////////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemm(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) { usualCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, transB, M, N, K, alpha, vA, lda, vB, ldb, beta, vC, ldc); } */ ////////////////////////////////////////////////////////////////////////// /* NDArray* MmulHelper::mmulNxNold1(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) { const int aRank = A->rankOf(); const int bRank = B->rankOf(); // input ranks validation if(aRank > bRank && bRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !"); else if(bRank > aRank && aRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !"); else if (aRank == bRank ) { for(int i = 0; i < aRank - 2; ++i) if(A->sizeAt(i) != B->sizeAt(i)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); } if(A->sizeAt(-1) != B->sizeAt(-2)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); // validation of C array std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector(); cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2); cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1); if(C != nullptr ) { if(!C->isSameShape(cExpectedShape)) throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !"); } else { C = new NDArray(outOrder, cExpectedShape, B->dataType()); } // multiplication const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(C->rankOf(), {-2, -1}); const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(C->getShapeInfo(), dimsToExclude); std::vector<Nd4jLong> idxRanges(2 * C->rankOf()); // #pragma omp parallel for schedule(guided) firstprivate(idxRanges) for(Nd4jLong i = 0; i < numOfSubArrs; ++i) { ShapeUtils::evalIdxRangesForSubArr(i, C->getShapeInfo(), dimsToExclude, idxRanges.data()); NDArray cSubArr = (*C)(idxRanges); if(aRank > bRank) { NDArray aSubArr = (*A)(idxRanges); mmulMxM(&aSubArr, B, &cSubArr, 1., 0., outOrder); } else if(bRank > aRank) { NDArray bSubArr = (*B)(idxRanges); mmulMxM(A, &bSubArr, &cSubArr, 1., 0, outOrder); } else { NDArray aSubArr = (*A)(idxRanges); NDArray bSubArr = (*B)(idxRanges); mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., outOrder); } } return C; } */ ////////////////////////////////////////////////////////////////////////// // [bS,M,K] x [bS,K,N] = [bS,M,N] // [bS,M,K] x [K,N] = [bS,M,N] // [M,K] x [bS,K,N] = [bS,M,N] // bS could stand for several axes /* NDArray* MmulHelper::mmulNxNold2(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) { const int aRank = A->rankOf(); const int bRank = B->rankOf(); // input ranks validation if(aRank > bRank && bRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !"); else if(bRank > aRank && aRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !"); else if (aRank == bRank ) { for(int i = 0; i < aRank - 2; ++i) if(A->sizeAt(i) != B->sizeAt(i)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); } if(A->sizeAt(-1) != B->sizeAt(-2)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); // validation of C array std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector(); cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2); cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1); if(C != nullptr ) { if(!C->isSameShape(cExpectedShape)) throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !"); } else C = new NDArray(outOrder, cExpectedShape, B->dataType()); const int cRank = C->rankOf(); const auto M = A->sizeAt(-2); const auto K = A->sizeAt(-1); const auto N = B->sizeAt(-1); NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C)); std::vector<NDArray*> toDelete; bool aMcont = M == 1 || A->strideAt(-2) == 1; bool aKcont = K == 1 || A->strideAt(-1) == 1; bool bKcont = K == 1 || B->strideAt(-2) == 1; bool bNcont = N == 1 || B->strideAt(-1) == 1; bool cMcont = M == 1 || C->strideAt(-2) == 1; bool cNcont = N == 1 || C->strideAt(-1) == 1; if(!aMcont && !aKcont) { pA = A->dup('c'); toDelete.push_back(pA); aKcont = true; } if(!bKcont && !bNcont) { pB = B->dup('c'); toDelete.push_back(pB); bNcont = true; } std::vector<int> permut(cRank); if(!cMcont) { std::iota(permut.begin(), permut.end(), 0); permut[cRank - 2] = cRank - 1; permut[cRank - 1] = cRank - 2; // swap two last dimensions [..., M,N] -> [..., N,M] auto Cpermut = C->permute(permut); pC = new NDArray('c', Cpermut.getShapeAsVector(), Cpermut.dataType(), A->getContext()); pC->assign(Cpermut); toDelete.push_back(pC); cMcont = true; } const auto aType = pA->dataType(); const auto bType = pB->dataType(); const auto cType = pC->dataType(); const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC); bool badTypes = false; hipDataType cudaType, cudaAType, cudaBType, cudaCType; if(ABC && aType == DataType::HALF) { cudaType = cudaAType = cudaBType = cudaCType = HIP_R_16F; } else if(ABC && aType == DataType::FLOAT32) { cudaType = cudaAType = cudaBType = cudaCType = HIP_R_32F; } else if(ABC && aType == DataType::DOUBLE) { cudaType = cudaAType = cudaBType = cudaCType = HIP_R_64F; } else if(AB && cType == DataType::FLOAT32 && aType == DataType::INT8) { cudaType = cudaCType = HIP_R_32F; cudaAType = cudaBType = HIP_R_8I; } else if(AB && cType == DataType::FLOAT32 && aType == DataType::HALF) { cudaType = cudaCType = HIP_R_32F; cudaAType = cudaBType = HIP_R_16F; } else badTypes = true; const int bS = pC->lengthOf() / (M*N); const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(cRank, {-2, -1}); NDArray::prepareSpecialUse({pC}, {pA, pB}); if(!badTypes) { std::vector<Nd4jLong> subArrOffsets(bS); std::vector<Nd4jLong> subArrShapeInfo(shape::shapeInfoLength(2)); // all sub-arrays have rank = 2 std::vector<void*> aSubArrs(bS), bSubArrs(bS), cSubArrs(bS); if(aRank > 2) shape::calcSubArrShapeAndOffsets(pA->getShapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) aSubArrs[i] = aRank == 2 ? pA->getSpecialBuffer() : pA->getSpecialBuffer() + subArrOffsets[i] * pA->sizeOfT(); if(bRank > 2) shape::calcSubArrShapeAndOffsets(pB->getShapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) bSubArrs[i] = bRank == 2 ? pB->getSpecialBuffer() : pB->getSpecialBuffer() + subArrOffsets[i] * pB->sizeOfT(); shape::calcSubArrShapeAndOffsets(pC->getShapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) cSubArrs[i] = pC->getSpecialBuffer() + subArrOffsets[i] * pC->sizeOfT(); PointersManager manager(A->getContext(), "mmulNxN"); const void** aSubArrsCuda = reinterpret_cast<const void **>(manager.replicatePointer(aSubArrs.data(), aSubArrs.size() * sizeof(void*))); const void** bSubArrsCuda = reinterpret_cast<const void **>(manager.replicatePointer(bSubArrs.data(), bSubArrs.size() * sizeof(void*))); void** cSubArrsCuda = reinterpret_cast< void **>(manager.replicatePointer(cSubArrs.data(), cSubArrs.size() * sizeof(void*))); const bool transA = !aMcont; const bool transB = !bKcont; const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(-2) : pA->strideAt(-1); const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(-2) : pB->strideAt(-1); const int ldc = (cMcont && cNcont) ? M : C != pC ? pC->strideAt(-2) : pC->strideAt(-1); const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; const hipblasOperation_t transBblas = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N; union Coeff {__half _h; float _f; double _d; }; Coeff uAlpha, uBeta; if(cudaType == HIP_R_16F) { uAlpha._h = alpha; uBeta._h = beta; } else if(cudaType == HIP_R_32F) { uAlpha._f = alpha; uBeta._f = beta; } else if(cudaType == HIP_R_64F) { uAlpha._d = alpha; uBeta._d = beta; } auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle()); auto stream = A->getContext()->getCudaStream(); auto status = hipblasSetStream(*handle, *stream); if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status); status = hipblasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &uAlpha, aSubArrsCuda, cudaAType, lda, bSubArrsCuda, cudaBType, ldb, &uBeta, cSubArrsCuda, cudaCType, ldc, bS, cudaType, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status); auto cudaResult = hipStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", cudaResult); } else { std::vector<Nd4jLong> idxRanges(2 * pC->rankOf()); for(Nd4jLong i = 0; i < bS; ++i) { ShapeUtils::evalIdxRangesForSubArr(i, pC->getShapeInfo(), dimsToExclude, idxRanges.data()); NDArray cSubArr = (*pC)(idxRanges); if(aRank > bRank) { NDArray aSubArr = (*pA)(idxRanges); mmulMxM(&aSubArr, pB, &cSubArr, 1., 0., pC->ordering()); } else if(bRank > aRank) { NDArray bSubArr = (*pB)(idxRanges); mmulMxM(pA, &bSubArr, &cSubArr, 1., 0, pC->ordering()); } else { NDArray aSubArr = (*pA)(idxRanges); NDArray bSubArr = (*pB)(idxRanges); mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., pC->ordering()); } } } NDArray::registerSpecialUse({pC}, {pA, pB}); if(C != pC) C->assign(pC->permute(permut)); for(int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; return C; } */ //BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); //BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vB, const int incx, const double beta, void* vC, const int incy), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); //BUILD_TRIPLE_TEMPLATE(template void usualDot, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); }
95c84d62a2e010622df134d6471e562c6f3c6d3a.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <exceptions/cuda_exception.h> #include <cublas_v2.h> #include "../MmulHelper.h" #include <specials_cuda.h> #include <ShapeUtils.h> #include <PointersManager.h> #include <numeric> namespace nd4j { ////////////////////////////////////////////////////////////////////////////// // MXK x KxN = MxN -> actual sequence of axes doesn't matter template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemm(const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) { const T1* A = reinterpret_cast<const T1*>(vA); const T2* B = reinterpret_cast<const T2*>(vB); T3* C = reinterpret_cast< T3*>(vC); __shared__ int K; __shared__ bool betaPresent; __shared__ Nd4jLong cLen, totalThreads, *coords; __shared__ T3 alphaZ, betaZ; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; coords = reinterpret_cast<Nd4jLong*>(shmem); cLen = shape::length(cShapeInfo); K = shape::shapeOf(const_cast<Nd4jLong*>(aShapeInfo))[aKaxis]; betaPresent = beta; totalThreads = gridDim.x * blockDim.x; alphaZ = alpha; betaZ = beta; } __syncthreads(); auto aCoords = coords + threadIdx.x * 6; // 6 = (aRank + bRank + cRank) auto bCoords = aCoords + 2; auto cCoords = bCoords + 2; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < cLen; i += totalThreads) { // evaluate C coordinates shape::index2coords(i, cShapeInfo, cCoords); // evaluate A coordinates aCoords[aMaxis] = cCoords[cMaxis]; aCoords[aKaxis] = 0; // evaluate B coordinates bCoords[bKaxis] = 0; bCoords[bNaxis] = cCoords[cNaxis]; auto aOffset = shape::getOffset(aShapeInfo, aCoords); auto bOffset = shape::getOffset(bShapeInfo, bCoords); T3 val = A[aOffset] * B[bOffset]; // first iteration for (uint j = 1; j < K; ++j) { // rest iterations aOffset += shape::stride(aShapeInfo)[aKaxis]; bOffset += shape::stride(bShapeInfo)[bKaxis]; val = val + A[aOffset] * B[bOffset]; } auto cOffset = shape::getOffset(cShapeInfo, cCoords); if(betaPresent) C[cOffset] = alphaZ * val + betaZ * C[cOffset]; else C[cOffset] = alphaZ * val; } } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, cudaStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) { usualCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta); } //////////////////////////////////////////////////////////////////////// // MXN x N = M -> actual sequence of {M,N} axes doesn't matter template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemv(const void* vA, const Nd4jLong* aShapeInfo, const void* vX, const Nd4jLong* xShapeInfo, void* vY, const Nd4jLong* yShapeInfo, const int incx, const int incy, const int aMaxis, const double alpha, const double beta) { const T1* A = reinterpret_cast<const T1*>(vA); const T2* X = reinterpret_cast<const T2*>(vX); T3* Y = reinterpret_cast< T3*>(vY); __shared__ int M, N; __shared__ bool betaPresent; __shared__ Nd4jLong cLen, totalThreads, aNstride, aMstride; __shared__ T3 alphaZ, betaZ; if (threadIdx.x == 0) { N = shape::length(xShapeInfo); M = shape::length(yShapeInfo); aMstride = shape::stride(aShapeInfo)[aMaxis]; aNstride = shape::stride(aShapeInfo)[aMaxis == 0 ? 1 : 0]; totalThreads = gridDim.x * blockDim.x; betaPresent = beta; alphaZ = alpha; betaZ = beta; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < M; i += totalThreads) { // evaluate offsets auto aOffset = i * aMstride; auto xOffset = 0; T3 val = A[aOffset] * X[xOffset]; // first iteration for (uint j = 1; j < N; ++j) { // rest iterations aOffset += aNstride; xOffset += incx; val = val + A[aOffset] * X[xOffset]; } auto yOffset = i * incy; if(betaPresent) Y[yOffset] = alphaZ * val + betaZ * Y[yOffset]; else Y[yOffset] = alphaZ * val; } } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemv(const int blocksPerGrid, const int threadsPerBlock, cudaStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vX, const Nd4jLong* xShapeInfo, void* vY, const Nd4jLong* yShapeInfo, const int incx, const int incy, const int aMaxis, const double alpha, const double beta) { usualCudaGemv<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 512, *stream>>>(vA, aShapeInfo, vX, xShapeInfo, vY, yShapeInfo, incx, incy, aMaxis, alpha, beta); } ////////////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> static __global__ void usualCudaDot(const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) { T1* X = reinterpret_cast<T1*>(const_cast<void*>(vX)); T2* Y = reinterpret_cast<T2*>(const_cast<void*>(vY)); T3* Z = reinterpret_cast<T3*>(vZ); extern __shared__ unsigned char shmem[]; auto pairwiseMul = reinterpret_cast<T3*>(shmem); const int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < length) pairwiseMul[tid] = X[tid * incx] * Y[tid * incy]; __syncthreads(); if(tid == 0) { T3 sum = 0; for(Nd4jLong i = 0; i < length; ++i) sum = sum + pairwiseMul[i]; if(beta) *Z = (T3)alpha * sum + (T3)beta * *Z; else *Z = (T3)alpha * sum; } } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualDot(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) { usualCudaDot<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, length*sizeof(T3) + 128, *stream>>>(length, alpha, vX, incx, vY, incy, beta, vZ); } ////////////////////////////////////////////////////////////////////////////// // MXK x KxN = MxN NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, double alpha, double beta, const char outOrder) { if(A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of A array is not equal 2 !"); if(B->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of B array is not equal 2 !"); const auto M = A->sizeAt(0); const auto K = A->sizeAt(1); const auto N = B->sizeAt(1); if(C != nullptr && C->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of C array is not equal 2 !"); if(B->sizeAt(0) != K) throw std::runtime_error("MmulHelper::mmulMxM cuda: B array has wrong number of rows !"); if(C != nullptr && C->sizeAt(0) != M) throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of rows !"); if(C != nullptr && C->sizeAt(1) != N) throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of columns !"); if(C == nullptr) C = new NDArray(outOrder, {M,N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext()); const int major = Environment::getInstance()->capabilities()[AffinityManager::currentDeviceId()].first(); const auto aType = A->dataType(); const auto bType = B->dataType(); const auto cType = C->dataType(); const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC); const bool typeDouble = ABC && aType == DataType::DOUBLE; const bool typeFloat = ABC && aType == DataType::FLOAT32; const bool typeHalf = ABC && aType == DataType::HALF && major >= 6; const bool typeIntFloat = AB && aType == DataType::INT8 && cType == DataType::FLOAT32 && major >= 6; const bool typeHalfFloat = AB && aType == DataType::HALF && cType == DataType::FLOAT32 && major >= 6; auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle()); auto stream = A->getContext()->getCudaStream(); auto status = cublasSetStream_v2(*handle, *stream); if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status); if(!typeDouble && !typeFloat && !typeHalf && !typeIntFloat && !typeHalfFloat) { const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * 6 + 128; // 6 = aRank + bRank + cRank NDArray::prepareSpecialUse({C}, {A, B}); // BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (blocksPerGrid, threadsPerBlock, sharedMem, stream, A->getSpecialBuffer(), A->getSpecialShapeInfo(), B->getSpecialBuffer(), B->getSpecialShapeInfo(), C->getSpecialBuffer(), C->getSpecialShapeInfo(), 0, 1, 0, 1, 0, 1, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm, (blocksPerGrid, threadsPerBlock, sharedMem, stream, A->getSpecialBuffer(), A->getSpecialShapeInfo(), B->getSpecialBuffer(), B->getSpecialShapeInfo(), C->getSpecialBuffer(), C->getSpecialShapeInfo(), 0, 1, 0, 1, 0, 1, alpha, beta), NUMERIC_TYPES) NDArray::registerSpecialUse({C}, {A, B}); auto cudaResult = cudaStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult); } else { std::vector<NDArray*> toDelete; NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C)); bool aMcont = M == 1 || A->strideAt(0) == 1; bool aKcont = K == 1 || A->strideAt(1) == 1; bool bKcont = K == 1 || B->strideAt(0) == 1; bool bNcont = N == 1 || B->strideAt(1) == 1; bool cMcont = M == 1 || C->strideAt(0) == 1; bool cNcont = N == 1 || C->strideAt(1) == 1; if(!aMcont && !aKcont) { pA = A->dup('f'); toDelete.push_back(pA); aMcont = true; } if(!bKcont && !bNcont) { pB = B->dup('f'); toDelete.push_back(pB); bKcont = true; } if(!cMcont) { pC = C->dup('f'); toDelete.push_back(pC); cMcont = true; } const bool transA = !aMcont; const bool transB = !bKcont; const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1); const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(0) : pB->strideAt(1); const int ldc = (cMcont && cNcont) ? M : pC->strideAt(1); const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N; const cublasOperation_t transBblas = transB ? CUBLAS_OP_T : CUBLAS_OP_N; NDArray::prepareSpecialUse({pC}, {pA, pB}); // choose appropriate cuda gemm api depending on data types if(typeDouble) { status = cublasDgemm(*handle, transAblas, transBblas, M, N, K, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)pB->getSpecialBuffer(), ldb, &beta, (double*)pC->getSpecialBuffer(), ldc); } else if(typeFloat) { float alphaF(alpha), betaF(beta); status = cublasSgemm(*handle, transAblas, transBblas, M, N, K, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)pB->getSpecialBuffer(), ldb, &betaF, (float*)pC->getSpecialBuffer(), ldc); } else if(typeHalf) { float16 alphaH(alpha), betaH(beta); status = cublasHgemm(*handle, transAblas, transBblas, M, N, K, &alphaH.data, (__half*)pA->getSpecialBuffer(), lda, (__half*)pB->getSpecialBuffer(), ldb, &betaH.data, (__half*)pC->getSpecialBuffer(), ldc); } else if(typeIntFloat) { float alphaF(alpha), betaF(beta); status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), CUDA_R_8I, lda, pB->getSpecialBuffer(), CUDA_R_8I, ldb, &betaF, pC->getSpecialBuffer(), CUDA_R_32F, ldc); } else if(typeHalfFloat) { float alphaF(alpha), betaF(beta); status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), CUDA_R_16F, lda, pB->getSpecialBuffer(), CUDA_R_16F, ldb, &betaF, pC->getSpecialBuffer(), CUDA_R_32F, ldc); } if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status); NDArray::registerSpecialUse({pC}, {pA, pB}); auto cudaResult = cudaStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult); if(C != pC) C->assign(pC); for(int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; } return C; } //////////////////////////////////////////////////////////////////////////// // MXN x N = M NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, nd4j::NDArray* Y, const double alpha, const double beta, const char outOrder) { int xLenDim, yLenDim(0); if(A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxV cuda: rank of A array is not equal 2 !"); if(!shape::isCommonVector(X->getShapeInfo(), xLenDim)) throw std::runtime_error("MmulHelper::mmulMxV cuda: X array must be vector !"); const auto M = A->sizeAt(0); const auto N = A->sizeAt(1); if(Y != nullptr && !shape::isCommonVector(Y->getShapeInfo(), yLenDim)) throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array must be vector !"); if(X->lengthOf() != N) throw std::runtime_error("MmulHelper::mmulMxV cuda: X vector has wrong length !"); if(Y != nullptr && Y->lengthOf() != M) throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array has wrong length !"); if(Y == nullptr) Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()), A->getContext()); const int incx = X->strideAt(xLenDim); const int incy = Y->strideAt(yLenDim); const auto aType = A->dataType(); const auto xType = X->dataType(); const auto yType = Y->dataType(); const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY); const bool typeDouble = AXY && aType == DataType::DOUBLE; const bool typeFloat = AXY && aType == DataType::FLOAT32; auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle()); auto stream = A->getContext()->getCudaStream(); auto status = cublasSetStream_v2(*handle, *stream); if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status); if(!typeDouble && !typeFloat) { const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (M + threadsPerBlock - 1) / threadsPerBlock; NDArray::prepareSpecialUse({Y}, {A, X}); // BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, A->getSpecialBuffer(), A->getSpecialShapeInfo(), X->getSpecialBuffer(), X->getSpecialShapeInfo(), Y->getSpecialBuffer(), Y->getSpecialShapeInfo(), incx, incy, 0, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(xType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, A->getSpecialBuffer(), A->getSpecialShapeInfo(), X->getSpecialBuffer(), X->getSpecialShapeInfo(), Y->getSpecialBuffer(), Y->getSpecialShapeInfo(), incx, incy, 0, alpha, beta), NUMERIC_TYPES) NDArray::registerSpecialUse({Y}, {A, X}); auto cudaResult = cudaStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult); } else { NDArray *pA(const_cast<NDArray*>(A)); bool aMcont = M == 1 || A->strideAt(0) == 1; bool aNcont = N == 1 || A->strideAt(1) == 1; if(!aMcont && !aNcont) { pA = A->dup('f'); aMcont = true; } const bool transA = !aMcont; const int lda = (aMcont && aNcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1); const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N; NDArray::prepareSpecialUse({Y}, {pA, X}); // choose appropriate cuda gemm api depending on data types if(typeDouble) { status = cublasDgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)X->getSpecialBuffer(), incx, &beta, (double*)Y->getSpecialBuffer(), incy); } else if(typeFloat) { float alphaF(alpha), betaF(beta); status = cublasSgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)X->getSpecialBuffer(), incx, &betaF, (float*)Y->getSpecialBuffer(), incy); } if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status); auto cudaResult = cudaStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult); NDArray::registerSpecialUse({Y}, {pA, X}); if(pA != A) delete pA; } return Y; } //////////////////////////////////////////////////////////////////////////// // (X * Y) = Z[0] NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, nd4j::NDArray* Z, const double alpha, const double beta) { int xLenDim(0), yLenDim(0); if(!shape::isCommonVector(X->getShapeInfo(), xLenDim)) throw std::runtime_error("MmulHelper::dot cuda: X array must be vector !"); if(!shape::isCommonVector(Y->getShapeInfo(), yLenDim)) throw std::runtime_error("MmulHelper::dot cuda: Y array must be vector !"); if(Z != nullptr && !Z->isScalar()) throw std::runtime_error("MmulHelper::dot cuda: Z array must be scalar !"); const auto length = X->lengthOf(); if(Y->lengthOf() != length) throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !"); if(Z == nullptr) Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext()); const Nd4jLong incx = X->strideAt(xLenDim); const Nd4jLong incy = Y->strideAt(yLenDim); const auto xType = X->dataType(); const auto yType = Y->dataType(); const auto zType = Z->dataType(); if(!X->isActualOnDeviceSide()) X->syncToDevice(); if(!Y->isActualOnDeviceSide()) Y->syncToDevice(); if(!Z->isActualOnDeviceSide()) Z->syncToDevice(); cudaStream_t* stream = X->getContext()->getCudaStream(); dim3 threadsPerBlock(512); dim3 blocksPerGrid(1); if (length > 512) threadsPerBlock.x = math::nd4j_ceil<double, int>(static_cast<double>(length) / 512); NDArray::prepareSpecialUse({Z}, {X, Y}); //BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), NUMERIC_TYPES) auto cudaResult = cudaStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::dot cuda failed !", cudaResult); NDArray::registerSpecialUse({Z}, {X, Y}); return Z; } ////////////////////////////////////////////////////////////////////////////// // [bS,M,K] x [bS,K,N] = [bS,M,N] // [bS,M,K] x [K,N] = [bS,M,N] // [M,K] x [bS,K,N] = [bS,M,N] // bS could stand for several axes template <typename T1, typename T2, typename T3> static __global__ void batchedCudaGemm(const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int* aBatchDims, const int* bBatchDims, const int* cBatchDims, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) { const T1* A = reinterpret_cast<const T1*>(vA); const T2* B = reinterpret_cast<const T2*>(vB); T3* C = reinterpret_cast< T3*>(vC); __shared__ bool betaPresent; __shared__ int aRank, bRank, cRank, K; __shared__ Nd4jLong cLen, totalThreads, *coords; __shared__ T3 alphaZ, betaZ; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; coords = reinterpret_cast<Nd4jLong*>(shmem); cLen = shape::length(cShapeInfo); K = shape::shapeOf(const_cast<Nd4jLong*>(aShapeInfo))[aKaxis]; totalThreads = gridDim.x * blockDim.x; aRank = shape::rank(aShapeInfo); bRank = shape::rank(bShapeInfo); cRank = shape::rank(cShapeInfo); betaPresent = beta; alphaZ = alpha; betaZ = beta; } __syncthreads(); auto aCoords = coords + threadIdx.x * (aRank + bRank + cRank); auto bCoords = aCoords + aRank; auto cCoords = bCoords + bRank; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < cLen; i += totalThreads) { // evaluate C coordinates shape::index2coords(i, cShapeInfo, cCoords); // calculate index of current batch Nd4jLong batchInd; if(cBatchDims != nullptr) batchInd = shape::coords2index(cShapeInfo, cCoords, cRank - 2, cBatchDims); // evaluate A coordinates if(aBatchDims != nullptr) shape::index2coords(batchInd, aShapeInfo, aCoords, aRank - 2, aBatchDims); aCoords[aMaxis] = cCoords[cMaxis]; aCoords[aKaxis] = 0; // evaluate B coordinates if(bBatchDims != nullptr) shape::index2coords(batchInd, bShapeInfo, bCoords, bRank - 2, bBatchDims); bCoords[bKaxis] = 0; bCoords[bNaxis] = cCoords[cNaxis]; auto aOffset = shape::getOffset(aShapeInfo, aCoords); auto bOffset = shape::getOffset(bShapeInfo, bCoords); T3 val = A[aOffset] * B[bOffset]; // first iteration for (uint j = 1; j < K; ++j) { // rest iterations aOffset += shape::stride(aShapeInfo)[aKaxis]; bOffset += shape::stride(bShapeInfo)[bKaxis]; val = val + A[aOffset] * B[bOffset]; } auto cOffset = shape::getOffset(cShapeInfo, cCoords); if(betaPresent) C[cOffset] = alphaZ * val + betaZ * C[cOffset]; else C[cOffset] = alphaZ * val; } } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void batchedGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, cudaStream_t *stream, const void* vA, const Nd4jLong* aShapeInfo, const void* vB, const Nd4jLong* bShapeInfo, void* vC, const Nd4jLong* cShapeInfo, const int* aBatchDims, const int* bBatchDims, const int* cBatchDims, const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis, const double alpha, const double beta) { batchedCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta); } /////////////////////////////////////////////////////////////////// NDArray* MmulHelper::mmulNxN(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) { const int aRank = A->rankOf(); const int bRank = B->rankOf(); // input ranks validation if(aRank > bRank && bRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !"); else if(bRank > aRank && aRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !"); else if (aRank == bRank ) { for(int i = 0; i < aRank - 2; ++i) if(A->sizeAt(i) != B->sizeAt(i)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); } if(A->sizeAt(-1) != B->sizeAt(-2)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); // validation of C array std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector(); cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2); cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1); if(C != nullptr ) { if(!C->isSameShape(cExpectedShape)) throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !"); } else C = new NDArray(outOrder, cExpectedShape, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext()); const int cRank = C->rankOf(); const int aMaxis(aRank-2), aKaxis(aRank-1), bKaxis(bRank-2), bNaxis(bRank-1), cMaxis(cRank-2), cNaxis(cRank-1); const int threadsPerBlock = MAX_NUM_THREADS / 8; const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * (aRank + bRank + cRank) + 128; PointersManager manager(A->getContext(), "MmulHelper::mmulNxN"); const int *aBatchDims(nullptr), *bBatchDims(nullptr), *cBatchDims(nullptr); if(aRank > 2) aBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(aRank, {aMaxis, aKaxis}).data(), (aRank - 2) * sizeof(int))); if(bRank > 2) bBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(bRank, {bKaxis, bNaxis}).data(), (bRank - 2) * sizeof(int))); if(cRank > 2) cBatchDims = reinterpret_cast<int*>(manager.replicatePointer(ShapeUtils::evalDimsToExclude(cRank, {cMaxis, cNaxis}).data(), (cRank - 2) * sizeof(int))); NDArray::prepareSpecialUse({C}, {A, B}); // BUILD_TRIPLE_SELECTOR(A->dataType(), b->dataType(), C->dataType(), batchedGemm, (blocksPerGrid, threadsPerBlock, A->getContext()->getCudaStream(), A->getSpecialBuffer(), A->getSpecialShapeInfo(), B->getSpecialBuffer(), B->getSpecialShapeInfo(), C->getSpecialBuffer(), C->getSpecialShapeInfo(), aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(A->dataType(), batchedGemm, (blocksPerGrid, threadsPerBlock, sharedMem, A->getContext()->getCudaStream(), A->getSpecialBuffer(), A->getSpecialShapeInfo(), B->getSpecialBuffer(), B->getSpecialShapeInfo(), C->getSpecialBuffer(), C->getSpecialShapeInfo(), aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta), NUMERIC_TYPES) NDArray::registerSpecialUse({C}, {A, B}); manager.synchronize(); return C; } /* ////////////////////////////////////////////////////////////////////////////// // MXN x N = M template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemv(const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) { T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA)); T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX)); T3* Y = reinterpret_cast<T3*>(vY); __shared__ T3 alphaZ, betaZ; __shared__ Nd4jLong strideArow, strideAcol; const int row = blockIdx.x * blockDim.x + threadIdx.x; if(row == 0) { alphaZ = alpha; betaZ = beta; if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; } } __syncthreads(); T3 val = 0; if (row < M) for (int i = 0; i < N; i++) val = val + A[row * strideArow + i * strideAcol] * X[i * incx]; Y[row * incy] = alphaZ * val + betaZ * Y[row * incy]; } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemv(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) { usualCudaGemv<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, M, N, alpha, vA, lda, vX, incx, beta, vY, incy); } */ /* ////////////////////////////////////////////////////////////////////////////// MXK x KxN = MxN C array must be in f order template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemm(const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) { T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA)); T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB)); T3* C = reinterpret_cast<T3*>(vC); __shared__ T3 alphaZ, betaZ; __shared__ Nd4jLong strideArow, strideAcol, strideBrow, strideBcol; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int col = blockIdx.x * blockDim.x + threadIdx.x; if(row == 0 && col == 0) { alphaZ = alpha; betaZ = beta; if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; } if(transB) { strideBrow = ldb; strideBcol = 1; } else { strideBrow = 1; strideBcol = ldb; } } __syncthreads(); T3 val = 0; if (row < M && col < N) for (int i = 0; i < K; i++) val = val + A[row * strideArow + i * strideAcol] * B[i * strideBrow + col * strideBcol]; C[row + col * ldc] = alphaZ * val + betaZ * C[row + col * ldc]; } ////////////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemm(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) { usualCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, transB, M, N, K, alpha, vA, lda, vB, ldb, beta, vC, ldc); } */ ////////////////////////////////////////////////////////////////////////// /* NDArray* MmulHelper::mmulNxNold1(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) { const int aRank = A->rankOf(); const int bRank = B->rankOf(); // input ranks validation if(aRank > bRank && bRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !"); else if(bRank > aRank && aRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !"); else if (aRank == bRank ) { for(int i = 0; i < aRank - 2; ++i) if(A->sizeAt(i) != B->sizeAt(i)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); } if(A->sizeAt(-1) != B->sizeAt(-2)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); // validation of C array std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector(); cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2); cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1); if(C != nullptr ) { if(!C->isSameShape(cExpectedShape)) throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !"); } else { C = new NDArray(outOrder, cExpectedShape, B->dataType()); } // multiplication const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(C->rankOf(), {-2, -1}); const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(C->getShapeInfo(), dimsToExclude); std::vector<Nd4jLong> idxRanges(2 * C->rankOf()); // #pragma omp parallel for schedule(guided) firstprivate(idxRanges) for(Nd4jLong i = 0; i < numOfSubArrs; ++i) { ShapeUtils::evalIdxRangesForSubArr(i, C->getShapeInfo(), dimsToExclude, idxRanges.data()); NDArray cSubArr = (*C)(idxRanges); if(aRank > bRank) { NDArray aSubArr = (*A)(idxRanges); mmulMxM(&aSubArr, B, &cSubArr, 1., 0., outOrder); } else if(bRank > aRank) { NDArray bSubArr = (*B)(idxRanges); mmulMxM(A, &bSubArr, &cSubArr, 1., 0, outOrder); } else { NDArray aSubArr = (*A)(idxRanges); NDArray bSubArr = (*B)(idxRanges); mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., outOrder); } } return C; } */ ////////////////////////////////////////////////////////////////////////// // [bS,M,K] x [bS,K,N] = [bS,M,N] // [bS,M,K] x [K,N] = [bS,M,N] // [M,K] x [bS,K,N] = [bS,M,N] // bS could stand for several axes /* NDArray* MmulHelper::mmulNxNold2(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) { const int aRank = A->rankOf(); const int bRank = B->rankOf(); // input ranks validation if(aRank > bRank && bRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !"); else if(bRank > aRank && aRank != 2) throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !"); else if (aRank == bRank ) { for(int i = 0; i < aRank - 2; ++i) if(A->sizeAt(i) != B->sizeAt(i)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); } if(A->sizeAt(-1) != B->sizeAt(-2)) throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !"); // validation of C array std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector(); cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2); cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1); if(C != nullptr ) { if(!C->isSameShape(cExpectedShape)) throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !"); } else C = new NDArray(outOrder, cExpectedShape, B->dataType()); const int cRank = C->rankOf(); const auto M = A->sizeAt(-2); const auto K = A->sizeAt(-1); const auto N = B->sizeAt(-1); NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C)); std::vector<NDArray*> toDelete; bool aMcont = M == 1 || A->strideAt(-2) == 1; bool aKcont = K == 1 || A->strideAt(-1) == 1; bool bKcont = K == 1 || B->strideAt(-2) == 1; bool bNcont = N == 1 || B->strideAt(-1) == 1; bool cMcont = M == 1 || C->strideAt(-2) == 1; bool cNcont = N == 1 || C->strideAt(-1) == 1; if(!aMcont && !aKcont) { pA = A->dup('c'); toDelete.push_back(pA); aKcont = true; } if(!bKcont && !bNcont) { pB = B->dup('c'); toDelete.push_back(pB); bNcont = true; } std::vector<int> permut(cRank); if(!cMcont) { std::iota(permut.begin(), permut.end(), 0); permut[cRank - 2] = cRank - 1; permut[cRank - 1] = cRank - 2; // swap two last dimensions [..., M,N] -> [..., N,M] auto Cpermut = C->permute(permut); pC = new NDArray('c', Cpermut.getShapeAsVector(), Cpermut.dataType(), A->getContext()); pC->assign(Cpermut); toDelete.push_back(pC); cMcont = true; } const auto aType = pA->dataType(); const auto bType = pB->dataType(); const auto cType = pC->dataType(); const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC); bool badTypes = false; cudaDataType_t cudaType, cudaAType, cudaBType, cudaCType; if(ABC && aType == DataType::HALF) { cudaType = cudaAType = cudaBType = cudaCType = CUDA_R_16F; } else if(ABC && aType == DataType::FLOAT32) { cudaType = cudaAType = cudaBType = cudaCType = CUDA_R_32F; } else if(ABC && aType == DataType::DOUBLE) { cudaType = cudaAType = cudaBType = cudaCType = CUDA_R_64F; } else if(AB && cType == DataType::FLOAT32 && aType == DataType::INT8) { cudaType = cudaCType = CUDA_R_32F; cudaAType = cudaBType = CUDA_R_8I; } else if(AB && cType == DataType::FLOAT32 && aType == DataType::HALF) { cudaType = cudaCType = CUDA_R_32F; cudaAType = cudaBType = CUDA_R_16F; } else badTypes = true; const int bS = pC->lengthOf() / (M*N); const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(cRank, {-2, -1}); NDArray::prepareSpecialUse({pC}, {pA, pB}); if(!badTypes) { std::vector<Nd4jLong> subArrOffsets(bS); std::vector<Nd4jLong> subArrShapeInfo(shape::shapeInfoLength(2)); // all sub-arrays have rank = 2 std::vector<void*> aSubArrs(bS), bSubArrs(bS), cSubArrs(bS); if(aRank > 2) shape::calcSubArrShapeAndOffsets(pA->getShapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) aSubArrs[i] = aRank == 2 ? pA->getSpecialBuffer() : pA->getSpecialBuffer() + subArrOffsets[i] * pA->sizeOfT(); if(bRank > 2) shape::calcSubArrShapeAndOffsets(pB->getShapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) bSubArrs[i] = bRank == 2 ? pB->getSpecialBuffer() : pB->getSpecialBuffer() + subArrOffsets[i] * pB->sizeOfT(); shape::calcSubArrShapeAndOffsets(pC->getShapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(), subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) cSubArrs[i] = pC->getSpecialBuffer() + subArrOffsets[i] * pC->sizeOfT(); PointersManager manager(A->getContext(), "mmulNxN"); const void** aSubArrsCuda = reinterpret_cast<const void **>(manager.replicatePointer(aSubArrs.data(), aSubArrs.size() * sizeof(void*))); const void** bSubArrsCuda = reinterpret_cast<const void **>(manager.replicatePointer(bSubArrs.data(), bSubArrs.size() * sizeof(void*))); void** cSubArrsCuda = reinterpret_cast< void **>(manager.replicatePointer(cSubArrs.data(), cSubArrs.size() * sizeof(void*))); const bool transA = !aMcont; const bool transB = !bKcont; const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(-2) : pA->strideAt(-1); const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(-2) : pB->strideAt(-1); const int ldc = (cMcont && cNcont) ? M : C != pC ? pC->strideAt(-2) : pC->strideAt(-1); const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N; const cublasOperation_t transBblas = transB ? CUBLAS_OP_T : CUBLAS_OP_N; union Coeff {__half _h; float _f; double _d; }; Coeff uAlpha, uBeta; if(cudaType == CUDA_R_16F) { uAlpha._h = alpha; uBeta._h = beta; } else if(cudaType == CUDA_R_32F) { uAlpha._f = alpha; uBeta._f = beta; } else if(cudaType == CUDA_R_64F) { uAlpha._d = alpha; uBeta._d = beta; } auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle()); auto stream = A->getContext()->getCudaStream(); auto status = cublasSetStream_v2(*handle, *stream); if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status); status = cublasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &uAlpha, aSubArrsCuda, cudaAType, lda, bSubArrsCuda, cudaBType, ldb, &uBeta, cSubArrsCuda, cudaCType, ldc, bS, cudaType, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status); auto cudaResult = cudaStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", cudaResult); } else { std::vector<Nd4jLong> idxRanges(2 * pC->rankOf()); for(Nd4jLong i = 0; i < bS; ++i) { ShapeUtils::evalIdxRangesForSubArr(i, pC->getShapeInfo(), dimsToExclude, idxRanges.data()); NDArray cSubArr = (*pC)(idxRanges); if(aRank > bRank) { NDArray aSubArr = (*pA)(idxRanges); mmulMxM(&aSubArr, pB, &cSubArr, 1., 0., pC->ordering()); } else if(bRank > aRank) { NDArray bSubArr = (*pB)(idxRanges); mmulMxM(pA, &bSubArr, &cSubArr, 1., 0, pC->ordering()); } else { NDArray aSubArr = (*pA)(idxRanges); NDArray bSubArr = (*pB)(idxRanges); mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., pC->ordering()); } } } NDArray::registerSpecialUse({pC}, {pA, pB}); if(C != pC) C->assign(pC->permute(permut)); for(int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; return C; } */ //BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); //BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vB, const int incx, const double beta, void* vC, const int incy), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); //BUILD_TRIPLE_TEMPLATE(template void usualDot, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); }
d9e4e0b2d463d372db9e67741866aaa030521904.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <iostream> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #include <thrust/transform.h> #define SIZE 10 #define V 0.1 #define T 1 using namespace std; __global__ void kernel(float *f, float *res) { int cur = threadIdx.x + blockDim.x * blockIdx.x; int prev = cur - 1; if (prev == -1) prev = SIZE - 1; if (cur >= 0 && cur < SIZE) { res[cur] = f[cur] + (V * T) * (f[prev] - f[cur]); } } struct saxpy_functor { const float a; saxpy_functor(float _a) : a(_a) {} __host__ __device__ float operator()(float x, float y) { return a * x + y; } }; void saxpy(float a, thrust::device_vector<float> &x, thrust::device_vector<float> &y) { saxpy_functor func(a); thrust::transform(x.begin(), x.end(), y.begin(), y.begin(), func); } int main() { hipEvent_t start, stop; float elapsedTime; hipEventCreate(&start); hipEventCreate(&stop); float F[SIZE]; float *frez; float *tempa; float DATA[SIZE]; for (int i = 0; i < SIZE; i++) { DATA[i] = rand() % 10; F[i] = DATA[i]; } hipMalloc((void **)&frez, sizeof(float) * SIZE); hipMalloc((void **)&tempa, sizeof(float) * SIZE); hipMemcpy(tempa, F, sizeof(float) * SIZE, hipMemcpyHostToDevice); hipEventRecord(start, 0); for (int i = 0; i < 100; i++) { hipLaunchKernelGGL(( kernel), dim3(1), dim3(SIZE), 0, 0, tempa, frez); hipMemcpy(F, frez, sizeof(float) * SIZE, hipMemcpyDeviceToHost); hipMemcpy(tempa, frez, sizeof(float) * SIZE, hipMemcpyHostToDevice); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); fprintf(stderr, "Time (Raw CUDA C) %g\n", elapsedTime); thrust::host_vector<float> h1(SIZE); thrust::host_vector<float> h2(SIZE); for (int i = 0; i < SIZE; i++) { h1[i] = DATA[i]; if ((i - 1) >= 0) h2[i] = DATA[i - 1]; else h2[i] = DATA[SIZE - 1]; h2[i] = h2[i] * V * T; } thrust::device_vector<float> d1 = h1; thrust::device_vector<float> d2 = h2; hipEventRecord(start, 0); for (int j = 0; j < 100; j++) { saxpy(1 - V * T, d1, d2); h2 = d2; d1 = h2; for (int i = 0; i < SIZE; i++) { if ((i - 1) >= 0) h1[i] = h2[i - 1]; else h1[i] = h2[SIZE - 1]; h1[i] = h1[i] * V * T; } d2 = h1; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); fprintf(stderr, "Time (Trust SAXPY) %g\n", elapsedTime); }
d9e4e0b2d463d372db9e67741866aaa030521904.cu
#include <cstdio> #include <cuda.h> #include <iostream> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #include <thrust/transform.h> #define SIZE 10 #define V 0.1 #define T 1 using namespace std; __global__ void kernel(float *f, float *res) { int cur = threadIdx.x + blockDim.x * blockIdx.x; int prev = cur - 1; if (prev == -1) prev = SIZE - 1; if (cur >= 0 && cur < SIZE) { res[cur] = f[cur] + (V * T) * (f[prev] - f[cur]); } } struct saxpy_functor { const float a; saxpy_functor(float _a) : a(_a) {} __host__ __device__ float operator()(float x, float y) { return a * x + y; } }; void saxpy(float a, thrust::device_vector<float> &x, thrust::device_vector<float> &y) { saxpy_functor func(a); thrust::transform(x.begin(), x.end(), y.begin(), y.begin(), func); } int main() { cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); float F[SIZE]; float *frez; float *tempa; float DATA[SIZE]; for (int i = 0; i < SIZE; i++) { DATA[i] = rand() % 10; F[i] = DATA[i]; } cudaMalloc((void **)&frez, sizeof(float) * SIZE); cudaMalloc((void **)&tempa, sizeof(float) * SIZE); cudaMemcpy(tempa, F, sizeof(float) * SIZE, cudaMemcpyHostToDevice); cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { kernel<<<1, SIZE>>>(tempa, frez); cudaMemcpy(F, frez, sizeof(float) * SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(tempa, frez, sizeof(float) * SIZE, cudaMemcpyHostToDevice); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); fprintf(stderr, "Time (Raw CUDA C) %g\n", elapsedTime); thrust::host_vector<float> h1(SIZE); thrust::host_vector<float> h2(SIZE); for (int i = 0; i < SIZE; i++) { h1[i] = DATA[i]; if ((i - 1) >= 0) h2[i] = DATA[i - 1]; else h2[i] = DATA[SIZE - 1]; h2[i] = h2[i] * V * T; } thrust::device_vector<float> d1 = h1; thrust::device_vector<float> d2 = h2; cudaEventRecord(start, 0); for (int j = 0; j < 100; j++) { saxpy(1 - V * T, d1, d2); h2 = d2; d1 = h2; for (int i = 0; i < SIZE; i++) { if ((i - 1) >= 0) h1[i] = h2[i - 1]; else h1[i] = h2[SIZE - 1]; h1[i] = h1[i] * V * T; } d2 = h1; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); fprintf(stderr, "Time (Trust SAXPY) %g\n", elapsedTime); }
d3518a2560ce2d94543d5e8ad3dd72a00f5ad4f2.hip
// !!! This is a file automatically generated by hipify!!! /*******************\ The Task List: 1. Author: Vu Duc Thai \*******************/ #include "ReadingImage.hpp" #include "MedianFilter.hpp" #include <cstdio> // #include #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> int main() { Matrix *input_mat = new Matrix("/home/thaivu/Projects/CUDA-NVIDIA_Learning/Photo/sp_noise.jpg", KERNEL_SIZE); // std::cout << *input_mat << std::endl; Matrix *output_mat = new Matrix(input_mat->rows, input_mat->cols); // std::cout << *output_mat << std::endl; //the number of elements for padding matrix int new_rows = input_mat->rows + (int)(KERNEL_SIZE/2) * 2; int new_cols = input_mat->cols + (int)(KERNEL_SIZE/2) * 2; // int true_size = input_mat->rows * input_mat->cols; // Set our CTA and Grid dimensions dim3 dimBlock(TILE_SIZE, TILE_SIZE); dim3 dimGrid((int)ceil((float)new_cols / (float)TILE_SIZE), (int)ceil((float)new_rows / (float)TILE_SIZE)); hipLaunchKernelGGL(( actPaddingMedianFilter) , dim3(dimGrid), dim3(dimBlock), 0, 0, input_mat->d_elements, output_mat->d_elements, input_mat->rows, input_mat->cols); // size_t shmem_size = (16+2) * (16+2) * blocks * blocks * sizeof(u_char); // hipFuncSetCacheConfig(Optimized_Kernel_Function_shared, hipFuncCachePreferShared); // Optimized_Kernel_Function_shared <<<NUM_BLOCKS, NUM_THREADS, shmem_size>>> (input_mat->d_elements, output_mat->d_elements, // input_mat->rows, input_mat->cols); // medianFilterSharedKernel <<<NUM_BLOCKS, NUM_THREADS>>> (input_mat->d_elements, output_mat->d_elements, // input_mat->rows, input_mat->cols); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // copy data v host memory output_mat->copyCudaMemoryD2H(); // save output image output_mat->saveImage("Filted_Image_v3"); delete input_mat, output_mat; return 0; }
d3518a2560ce2d94543d5e8ad3dd72a00f5ad4f2.cu
/*******************\ The Task List: 1. Author: Vu Duc Thai \*******************/ #include "ReadingImage.hpp" #include "MedianFilter.hpp" #include <cstdio> // #include #include <cuda.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> int main() { Matrix *input_mat = new Matrix("/home/thaivu/Projects/CUDA-NVIDIA_Learning/Photo/sp_noise.jpg", KERNEL_SIZE); // std::cout << *input_mat << std::endl; Matrix *output_mat = new Matrix(input_mat->rows, input_mat->cols); // std::cout << *output_mat << std::endl; //the number of elements for padding matrix int new_rows = input_mat->rows + (int)(KERNEL_SIZE/2) * 2; int new_cols = input_mat->cols + (int)(KERNEL_SIZE/2) * 2; // int true_size = input_mat->rows * input_mat->cols; // Set our CTA and Grid dimensions dim3 dimBlock(TILE_SIZE, TILE_SIZE); dim3 dimGrid((int)ceil((float)new_cols / (float)TILE_SIZE), (int)ceil((float)new_rows / (float)TILE_SIZE)); actPaddingMedianFilter <<<dimGrid, dimBlock>>> (input_mat->d_elements, output_mat->d_elements, input_mat->rows, input_mat->cols); // size_t shmem_size = (16+2) * (16+2) * blocks * blocks * sizeof(u_char); // cudaFuncSetCacheConfig(Optimized_Kernel_Function_shared, cudaFuncCachePreferShared); // Optimized_Kernel_Function_shared <<<NUM_BLOCKS, NUM_THREADS, shmem_size>>> (input_mat->d_elements, output_mat->d_elements, // input_mat->rows, input_mat->cols); // medianFilterSharedKernel <<<NUM_BLOCKS, NUM_THREADS>>> (input_mat->d_elements, output_mat->d_elements, // input_mat->rows, input_mat->cols); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // copy data về host memory output_mat->copyCudaMemoryD2H(); // save output image output_mat->saveImage("Filted_Image_v3"); delete input_mat, output_mat; return 0; }
725569df8b0f528c33beb174dceb8bc639cf8106.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=64 --gridDim=1 --no-inline #include "hip/hip_runtime.h" __global__ void foo(int* glob) { int a; int* p; a = 0; p = &a; *p = threadIdx.x; glob[*p] = threadIdx.x; }
725569df8b0f528c33beb174dceb8bc639cf8106.cu
//pass //--blockDim=64 --gridDim=1 --no-inline #include "cuda.h" __global__ void foo(int* glob) { int a; int* p; a = 0; p = &a; *p = threadIdx.x; glob[*p] = threadIdx.x; }
99d39e1a34eb5842af33d96679faef56f1f659ff.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zmgesellcmmv.cu normal z -> c, Tue Sep 2 12:38:33 2014 */ #include "hip/hip_runtime.h" #include <stdio.h> #include "common_magma.h" #include <rocblas.h> #define PRECISION_c #define TEXTURE // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_1_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // T threads assigned to each row int idx = threadIdx.x; // local row int idy = threadIdx.y; // vector int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idx; // global row index if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int max_ = (d_rowptr[ bdx+1 ]-offset)/blocksize; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + idx + blocksize*k ]; int col = d_colind[ offset + idx + blocksize*k ] ; dot += val * d_x[ col*num_vecs+idy ]; } d_y[ row+idy*num_rows ] = dot*alpha + beta*d_y [ row+idy*num_rows ]; } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_4_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ] ; dot += val * d_x[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row+vec]; } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_8_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, const magmaFloatComplex* __restrict__ d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ] ; dot += val * d_x[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row+vec]; } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_16_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if( idx < 8 ){ shared[ldz]+=shared[ldz+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row+vec]; } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_32_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if( idx < 16 ){ shared[ldz]+=shared[ldz+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row+vec]; } } } } /************************* same but using texture mem *************************/ // SELLP SpMV kernel 2D grid - for large number of vectors // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_1_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaFloatComplex beta, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) int idx = threadIdx.x; // local row int idy = threadIdx.y; // vector int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idx; // global row index if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int max_ = (d_rowptr[ bdx+1 ]-offset)/blocksize; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + idx + blocksize*k ]; int col = num_vecs * d_colind[ offset + idx + blocksize*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idy ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } d_y[row+num_rows*idy*2] = dot1*alpha + beta*d_y [row*num_vecs+idy*2]; d_y[row+num_rows*idy*2+num_rows] = dot2*alpha + beta*d_y [row*num_vecs+idy*2+1]; } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_4_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaFloatComplex beta, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2]; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2+1]; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_8_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaFloatComplex beta, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; __syncthreads(); if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2]; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2+1]; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_16_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaFloatComplex beta, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 8 ){ shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; __syncthreads(); if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2]; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2+1]; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_32_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaFloatComplex beta, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 16 ){ shared[ldz]+=shared[ldz+blocksize*16]; shared[ldz+sv]+=shared[ldz+sv+blocksize*16]; __syncthreads(); if( idx < 8 ){ shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; } if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2]; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2+1]; } } } #endif } //***************** routines for beta = 0 ************************************// // SELLP SpMV kernel 2D grid - for large number of vectors // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_1_3D_texb( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) int idx = threadIdx.x; // local row int idy = threadIdx.y; // vector int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idx; // global row index if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + idx + blocksize*k ]; int col = num_vecs * d_colind[ offset + idx + blocksize*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idy ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } d_y[row+num_rows*idy*2] = dot1*alpha; d_y[row+num_rows*idy*2+num_rows] = dot2*alpha; } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_4_3D_texb( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_8_3D_texb( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; __syncthreads(); if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_16_3D_texb( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 8 ){ shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; __syncthreads(); if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_32_3D_texb( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 16 ){ shared[ldz]+=shared[ldz+blocksize*16]; shared[ldz+sv]+=shared[ldz+sv+blocksize*16]; __syncthreads(); if( idx < 8 ){ shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; } if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } } } #endif } //*************************** end kernels using texture ********************// /** Purpose ------- This routine computes Y = alpha * A^t * X + beta * Y on the GPU. Input format is SELLP. Note, that the input format for X is row-major while the output format for Y is column major! Arguments --------- @param transA magma_trans_t transpose A? @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param num_vecs magma_int_t number of columns in X and Y @param blocksize magma_int_t number of rows in one ELL-slice @param slices magma_int_t number of slices in matrix @param alignment magma_int_t number of threads assigned to one row @param alpha magmaFloatComplex scalar multiplier @param d_val magmaFloatComplex* array containing values of A in SELLP @param d_colind magma_int_t* columnindices of A in SELLP @param d_rowptr magma_int_t* rowpointer of SELLP @param d_x magmaFloatComplex* input vector x @param beta magmaFloatComplex scalar multiplier @param d_y magmaFloatComplex* input/output vector y @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cmgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y ){ // using a 3D thread grid for small num_vecs, a 2D grid otherwise int texture=0, kepler=0, precision=0; magma_int_t arch = magma_getdevice_arch(); if ( arch > 300 ) kepler = 1; #if defined(PRECISION_d) precision = 1; #endif #if defined(TEXTURE) texture = 1; #endif if( (texture==1) && (precision==1) && (kepler==1) ){ // Create channel. hipChannelFormatDesc channel_desc; channel_desc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindSigned); // Create resource descriptor. struct hipResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = hipResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)d_x; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(float); // Specify texture object parameters. struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; // Create texture object. hipTextureObject_t texdx = 0; hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); if( num_vecs%2 ==1 ){ // only multiple of 2 can be processed printf("error: number of vectors has to be multiple of 2.\n"); exit(-1); } if( num_vecs > 8 ) // avoid running into memory problems alignment = 1; int num_threads = (num_vecs/2) * blocksize*alignment; // every thread handles two vectors if ( num_threads > 1024 ) printf("error: too many threads requested.\n"); dim3 block( blocksize, alignment, num_vecs/2 ); int dimgrid1 = sqrt(slices); int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_vecs * blocksize*alignment * sizeof( magmaFloatComplex ); if( alignment == 1){ dim3 block( blocksize, num_vecs/2, 1 ); if( beta == MAGMA_C_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_texb), dim3(grid), dim3(block), 0, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, d_y ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex), dim3(grid), dim3(block), 0, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); } else if( alignment == 4){ dim3 block( blocksize, alignment, num_vecs/2 ); if( beta == MAGMA_C_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_texb), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, d_y ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); } else if( alignment == 8){ dim3 block( blocksize, alignment, num_vecs/2 ); if( beta == MAGMA_C_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_texb), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, d_y ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); } else if( alignment == 16){ dim3 block( blocksize, alignment, num_vecs/2 ); if( beta == MAGMA_C_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_texb), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, d_y ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); } else if( alignment == 32){ dim3 block( blocksize, alignment, num_vecs/2 ); if( beta == MAGMA_C_MAKE( 0.0, 0.0 ) ) hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_texb), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, d_y ); else hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); } else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } }else{ if( num_vecs%2 ==1 ){ // only multiple of 2 can be processed printf("error: number of vectors has to be multiple of 2.\n"); exit(-1); } if( num_vecs > 8 ) // avoid running into memory problems alignment = 1; int num_threads = num_vecs * blocksize*alignment; // every thread handles two vectors if ( num_threads > 1024 ) printf("error: too many threads requested.\n"); int dimgrid1 = sqrt(slices); int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_threads * sizeof( magmaFloatComplex ); if( alignment == 1){ dim3 block( blocksize, num_vecs, 1 ); hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D), dim3(grid), dim3(block), 0, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); } else if( alignment == 4){ dim3 block( blocksize, alignment, num_vecs ); hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); } else if( alignment == 8){ dim3 block( blocksize, alignment, num_vecs ); hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); } else if( alignment == 16){ dim3 block( blocksize, alignment, num_vecs ); hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); } else if( alignment == 32){ dim3 block( blocksize, alignment, num_vecs ); hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D), dim3(grid), dim3(block), Ms, magma_stream , m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); } else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } } return MAGMA_SUCCESS; }
99d39e1a34eb5842af33d96679faef56f1f659ff.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zmgesellcmmv.cu normal z -> c, Tue Sep 2 12:38:33 2014 */ #include "cuda_runtime.h" #include <stdio.h> #include "common_magma.h" #include <cublas_v2.h> #define PRECISION_c #define TEXTURE // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_1_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // T threads assigned to each row int idx = threadIdx.x; // local row int idy = threadIdx.y; // vector int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idx; // global row index if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int max_ = (d_rowptr[ bdx+1 ]-offset)/blocksize; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + idx + blocksize*k ]; int col = d_colind[ offset + idx + blocksize*k ] ; dot += val * d_x[ col*num_vecs+idy ]; } d_y[ row+idy*num_rows ] = dot*alpha + beta*d_y [ row+idy*num_rows ]; } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_4_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ] ; dot += val * d_x[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row+vec]; } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_8_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, const magmaFloatComplex* __restrict__ d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ] ; dot += val * d_x[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row+vec]; } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_16_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if( idx < 8 ){ shared[ldz]+=shared[ldz+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row+vec]; } } } } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_32_3D( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int vec = idz*num_rows; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col+vec ]; } shared[ldz] = dot; __syncthreads(); if( idx < 16 ){ shared[ldz]+=shared[ldz+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row+vec]; } } } } /************************* same but using texture mem *************************/ // SELLP SpMV kernel 2D grid - for large number of vectors // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_1_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaFloatComplex beta, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) int idx = threadIdx.x; // local row int idy = threadIdx.y; // vector int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idx; // global row index if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int max_ = (d_rowptr[ bdx+1 ]-offset)/blocksize; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + idx + blocksize*k ]; int col = num_vecs * d_colind[ offset + idx + blocksize*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idy ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } d_y[row+num_rows*idy*2] = dot1*alpha + beta*d_y [row*num_vecs+idy*2]; d_y[row+num_rows*idy*2+num_rows] = dot2*alpha + beta*d_y [row*num_vecs+idy*2+1]; } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_4_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaFloatComplex beta, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2]; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2+1]; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_8_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaFloatComplex beta, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; __syncthreads(); if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2]; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2+1]; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_16_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaFloatComplex beta, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 8 ){ shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; __syncthreads(); if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2]; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2+1]; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_32_3D_tex( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaFloatComplex beta, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 16 ){ shared[ldz]+=shared[ldz+blocksize*16]; shared[ldz+sv]+=shared[ldz+sv+blocksize*16]; __syncthreads(); if( idx < 8 ){ shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; } if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2]; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha + beta*d_y [row*num_vecs+idz*2+1]; } } } #endif } //***************** routines for beta = 0 ************************************// // SELLP SpMV kernel 2D grid - for large number of vectors // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_1_3D_texb( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) int idx = threadIdx.x; // local row int idy = threadIdx.y; // vector int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idx; // global row index if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + idx + blocksize*k ]; int col = num_vecs * d_colind[ offset + idx + blocksize*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idy ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } d_y[row+num_rows*idy*2] = dot1*alpha; d_y[row+num_rows*idy*2+num_rows] = dot2*alpha; } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_4_3D_texb( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_8_3D_texb( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; __syncthreads(); if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_16_3D_texb( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 8 ){ shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; __syncthreads(); if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } } } #endif } // SELLP SpMV kernel 3D grid // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zmgesellptmv_kernel_32_3D_texb( int num_rows, int num_cols, int num_vecs, int blocksize, int T, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, magmaFloatComplex *d_y) { #if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int idz = threadIdx.z; // vector int ldx = idx * blocksize + idy; int ldz = idz * blocksize * T + idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index int sv = num_vecs/2 * blocksize * T; extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = num_vecs * d_colind[ offset + ldx + block*k ] ; int4 v = tex1Dfetch<int4>(texdx, col/2 + idz ); dot1 += val * __hiloint2float(v.y, v.x); dot2 += val * __hiloint2float(v.w, v.z); } shared[ldz] = dot1; shared[ldz+sv] = dot2; __syncthreads(); if( idx < 16 ){ shared[ldz]+=shared[ldz+blocksize*16]; shared[ldz+sv]+=shared[ldz+sv+blocksize*16]; __syncthreads(); if( idx < 8 ){ shared[ldz]+=shared[ldz+blocksize*8]; shared[ldz+sv]+=shared[ldz+sv+blocksize*8]; } if( idx < 4 ){ shared[ldz]+=shared[ldz+blocksize*4]; shared[ldz+sv]+=shared[ldz+sv+blocksize*4]; } if( idx < 2 ){ shared[ldz]+=shared[ldz+blocksize*2]; shared[ldz+sv]+=shared[ldz+sv+blocksize*2]; } __syncthreads(); if( idx == 0 ) { d_y[row+num_rows*idz*2] = (shared[ldz]+shared[ldz+blocksize*1])*alpha; d_y[row+num_rows*idz*2+num_rows] = (shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha; } } } #endif } //*************************** end kernels using texture ********************// /** Purpose ------- This routine computes Y = alpha * A^t * X + beta * Y on the GPU. Input format is SELLP. Note, that the input format for X is row-major while the output format for Y is column major! Arguments --------- @param transA magma_trans_t transpose A? @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param num_vecs magma_int_t number of columns in X and Y @param blocksize magma_int_t number of rows in one ELL-slice @param slices magma_int_t number of slices in matrix @param alignment magma_int_t number of threads assigned to one row @param alpha magmaFloatComplex scalar multiplier @param d_val magmaFloatComplex* array containing values of A in SELLP @param d_colind magma_int_t* columnindices of A in SELLP @param d_rowptr magma_int_t* rowpointer of SELLP @param d_x magmaFloatComplex* input vector x @param beta magmaFloatComplex scalar multiplier @param d_y magmaFloatComplex* input/output vector y @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cmgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y ){ // using a 3D thread grid for small num_vecs, a 2D grid otherwise int texture=0, kepler=0, precision=0; magma_int_t arch = magma_getdevice_arch(); if ( arch > 300 ) kepler = 1; #if defined(PRECISION_d) precision = 1; #endif #if defined(TEXTURE) texture = 1; #endif if( (texture==1) && (precision==1) && (kepler==1) ){ // Create channel. cudaChannelFormatDesc channel_desc; channel_desc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindSigned); // Create resource descriptor. struct cudaResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = cudaResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)d_x; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(float); // Specify texture object parameters. struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; // Create texture object. cudaTextureObject_t texdx = 0; cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); if( num_vecs%2 ==1 ){ // only multiple of 2 can be processed printf("error: number of vectors has to be multiple of 2.\n"); exit(-1); } if( num_vecs > 8 ) // avoid running into memory problems alignment = 1; int num_threads = (num_vecs/2) * blocksize*alignment; // every thread handles two vectors if ( num_threads > 1024 ) printf("error: too many threads requested.\n"); dim3 block( blocksize, alignment, num_vecs/2 ); int dimgrid1 = sqrt(slices); int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_vecs * blocksize*alignment * sizeof( magmaFloatComplex ); if( alignment == 1){ dim3 block( blocksize, num_vecs/2, 1 ); if( beta == MAGMA_C_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_1_3D_texb<<< grid, block, 0, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, d_y ); else zmgesellptmv_kernel_1_3D_tex<<< grid, block, 0, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); } else if( alignment == 4){ dim3 block( blocksize, alignment, num_vecs/2 ); if( beta == MAGMA_C_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_4_3D_texb<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, d_y ); else zmgesellptmv_kernel_4_3D_tex<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); } else if( alignment == 8){ dim3 block( blocksize, alignment, num_vecs/2 ); if( beta == MAGMA_C_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_8_3D_texb<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, d_y ); else zmgesellptmv_kernel_8_3D_tex<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); } else if( alignment == 16){ dim3 block( blocksize, alignment, num_vecs/2 ); if( beta == MAGMA_C_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_16_3D_texb<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, d_y ); else zmgesellptmv_kernel_16_3D_tex<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); } else if( alignment == 32){ dim3 block( blocksize, alignment, num_vecs/2 ); if( beta == MAGMA_C_MAKE( 0.0, 0.0 ) ) zmgesellptmv_kernel_32_3D_texb<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, d_y ); else zmgesellptmv_kernel_32_3D_tex<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); } else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } }else{ if( num_vecs%2 ==1 ){ // only multiple of 2 can be processed printf("error: number of vectors has to be multiple of 2.\n"); exit(-1); } if( num_vecs > 8 ) // avoid running into memory problems alignment = 1; int num_threads = num_vecs * blocksize*alignment; // every thread handles two vectors if ( num_threads > 1024 ) printf("error: too many threads requested.\n"); int dimgrid1 = sqrt(slices); int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_threads * sizeof( magmaFloatComplex ); if( alignment == 1){ dim3 block( blocksize, num_vecs, 1 ); zmgesellptmv_kernel_1_3D<<< grid, block, 0, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); } else if( alignment == 4){ dim3 block( blocksize, alignment, num_vecs ); zmgesellptmv_kernel_4_3D<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); } else if( alignment == 8){ dim3 block( blocksize, alignment, num_vecs ); zmgesellptmv_kernel_8_3D<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); } else if( alignment == 16){ dim3 block( blocksize, alignment, num_vecs ); zmgesellptmv_kernel_16_3D<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); } else if( alignment == 32){ dim3 block( blocksize, alignment, num_vecs ); zmgesellptmv_kernel_32_3D<<< grid, block, Ms, magma_stream >>> ( m, n, num_vecs, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); } else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } } return MAGMA_SUCCESS; }
b680f8bc7b2586258bf3568641ae9d3805d34645.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "reduction.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *g_data = NULL; hipMalloc(&g_data, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( reduction), dim3(gridBlock),dim3(threadBlock), 0, 0, g_data,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( reduction), dim3(gridBlock),dim3(threadBlock), 0, 0, g_data,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( reduction), dim3(gridBlock),dim3(threadBlock), 0, 0, g_data,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b680f8bc7b2586258bf3568641ae9d3805d34645.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "reduction.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *g_data = NULL; cudaMalloc(&g_data, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); reduction<<<gridBlock,threadBlock>>>(g_data,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { reduction<<<gridBlock,threadBlock>>>(g_data,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { reduction<<<gridBlock,threadBlock>>>(g_data,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4c9f58dd7663922704ed74efb2c4eef92abd3f2d.hip
// !!! This is a file automatically generated by hipify!!! // #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/Resize.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Resize.h> #include <ATen/native/hip/Normalization.cuh> #include <c10/hip/HIPMathCompat.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/batch_norm_backward_elemt_native.h> #include <ATen/ops/batch_norm_backward_reduce_native.h> #include <ATen/ops/batch_norm_elemt_native.h> #include <ATen/ops/batch_norm_gather_stats_native.h> #include <ATen/ops/batch_norm_gather_stats_with_counts_native.h> #include <ATen/ops/batch_norm_stats_native.h> #include <ATen/ops/batch_norm_update_stats_native.h> #include <ATen/ops/empty_like.h> #include <ATen/ops/native_batch_norm_backward_native.h> #include <ATen/ops/native_batch_norm_native.h> #include <ATen/ops/scalar_tensor.h> #endif // TODO: Doesn't exist in this branch #if 0 #include <ATen/ops/from_blob.h> #else #include <ATen/Functions.h> #endif namespace at { namespace native { namespace { ScalarType first_type() { return ScalarType::Undefined; } template <typename... Args> ScalarType first_type(const Tensor& arg, const Args&... parameters) { return arg.defined() ? arg.scalar_type() : first_type(parameters...); } // A transform is mixed type if the parameters are higher precision than the input template <typename... Args> bool is_mixed_type(const Tensor& input, const Args&... parameters) { const auto parameter_type = first_type(parameters...); return ((parameter_type != ScalarType::Undefined) && (parameter_type != input.scalar_type())); } inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) { return (self.is_contiguous(at::MemoryFormat::ChannelsLast) || (self.is_contiguous() && self.strides()[1] == 1)); } enum class Impl { Contiguous, ChannelsLast, General, }; inline Impl batch_norm_choose_impl(const Tensor& self) { if (!at::cuda::detail::canUse32BitIndexMath(self)) { return Impl::General; } if (self.is_contiguous()) { return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous; } if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) { return Impl::ChannelsLast; } return Impl::General; } inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) { auto imp1 = batch_norm_choose_impl(in1); if (imp1 == Impl::General) { return imp1; } auto imp2 = batch_norm_choose_impl(in2); return imp1 == imp2 ? imp1 : Impl::General; } void batch_norm_elementwise( const Tensor& out, const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) { switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt); resize_output(out, self.sizes()); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const bool mixed_type = is_mixed_type(self, *weight, *bias); if (mixed_type) { batch_norm_elemt_cuda_template<scalar_t, accscalar_t, int32_t>( out, self, *weight, *bias, mean_, invstd_); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>( out, self, *weight, *bias, mean_, invstd_); } }); return; } case Impl::ChannelsLast: { auto weight = at::borrow_from_optional_tensor(weight_opt); auto bias = at::borrow_from_optional_tensor(bias_opt); if (resize_output_check(out, self.sizes())) { resize_impl_cuda_(out.unsafeGetTensorImpl(), self.sizes(), self.strides()); } if ((out.strides() == self.strides()) && (!weight->defined() || weight->is_contiguous()) && (!bias->defined() || bias->is_contiguous()) && (!mean_.defined() || mean_.is_contiguous()) && (!invstd_.defined() || invstd_.is_contiguous())) { batch_norm_elemt_channels_last_cuda_template( out, self, *weight, *bias, mean_, invstd_); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector sizes(ndim, 1), strides(ndim, 0); // Helper to convert 1d tensors to an nd tensor that broadcasts with input // All elements go into the channel dimension auto as_nd = [&](const Tensor& t) { TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); sizes[1] = t.sizes()[0]; strides[1] = t.strides()[0]; return t.as_strided(sizes, strides); }; auto weight = weight_opt.has_value() && weight_opt->defined() ? as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options()); auto bias = bias_opt.has_value() && bias_opt->defined() ? as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options()); auto mean = as_nd(mean_); auto invstd = as_nd(invstd_); auto iter = TensorIteratorConfig() .add_output(out) .add_input(self) .add_input(weight) .add_input(bias) .add_input(mean) .add_input(invstd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias, acc_t mean, acc_t invstd) -> scalar_t { return ((input - mean) * invstd) * weight + bias; }); }); return; } } } Tensor batch_norm_elementwise_backward_train( const Tensor& grad_out, const Tensor& input, const Tensor& mean, const Tensor& invstd, const Tensor& weight, const Tensor& sum_dy, const Tensor& sum_dy_xmu) { switch (batch_norm_choose_impl(input, grad_out)) { case Impl::Contiguous: { return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batch_norm_backward_elemt", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const bool mixed_type = is_mixed_type(input, weight); if (mixed_type) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } }); } case Impl::ChannelsLast: { if ((!weight.defined() || weight.is_contiguous()) && mean.is_contiguous() && invstd.is_contiguous()) { return batch_norm_backward_elemt_channels_last_cuda_template( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } C10_FALLTHROUGH; } case Impl::General: { const auto ndim = input.dim(); DimVector sizes(ndim, 1), strides(ndim, 0); auto as_nd = [&](const Tensor& t) { TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); sizes[1] = t.sizes()[0]; strides[1] = t.strides()[0]; return t.as_strided(sizes, strides); }; auto invstd_nd = as_nd(invstd); auto mean_nd = as_nd(mean); auto sum_dy_nd = as_nd(sum_dy); auto sum_dy_xmu_nd = as_nd(sum_dy_xmu); auto weight_nd = weight.defined() ? as_nd(weight) : at::scalar_tensor(1.0, input.options().dtype(mean.scalar_type())); Tensor grad_input = at::empty(input.sizes(), grad_out.options()); auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(input) .add_input(weight_nd) .add_input(mean_nd) .add_input(invstd_nd) .add_input(sum_dy_xmu_nd) .add_input(sum_dy_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; auto norm_fct = static_cast<accscalar_t>(1.0 / (input.numel() /input.size(1)) ); gpu_kernel(iter, [norm_fct] GPU_LAMBDA (scalar_t gO, scalar_t input, accscalar_t weight, accscalar_t mean, accscalar_t invstd, accscalar_t xmu, accscalar_t dy) -> scalar_t { auto factor_1_c = invstd * invstd * xmu * norm_fct; auto factor_2_c = weight * invstd; auto m_dy_c = dy * norm_fct; return (gO - m_dy_c - (input - mean) * factor_1_c) * factor_2_c; }); }); return grad_input; } } TORCH_INTERNAL_ASSERT(false); } Tensor batch_norm_elementwise_backward_eval( const Tensor& grad_out, const Tensor& input, const Tensor& invstd, const Tensor& weight) { const auto ndim = input.dim(); DimVector shape(ndim, 1), strides(ndim, 0); shape[1] = invstd.sizes()[0]; strides[1] = invstd.strides()[0]; auto invstd_nd = invstd.as_strided(shape, strides); Tensor grad_input = at::empty(input.sizes(), grad_out.options()); if (weight.defined()) { strides[1] = weight.strides()[0]; auto weight_nd = weight.as_strided(shape, strides); auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(invstd_nd) .add_input(weight_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd, accscalar_t weight) -> scalar_t { return gO * weight * invstd; }); }); } else { auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(invstd_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd) -> scalar_t { return gO * invstd; }); }); } return grad_input; } void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) { // NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored. const double dummy_epsilon = 1e-5; switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_cuda_template<scalar_t, int32_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } case Impl::ChannelsLast: { if ((!save_mean.defined() || save_mean.is_contiguous()) && (!save_var.defined() || save_var.is_contiguous())) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_channels_last_cuda_template<scalar_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector reduce_dims(ndim - 1); reduce_dims[0] = 0; for (int64_t i = 2; i < ndim; ++i) { reduce_dims[i - 1] = i; } // For some reason this isn't an actual operator but it exists anyway... at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims, /*unbiased=*/false, /*keepdim=*/false); return; } } } void batch_norm_update_stats( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, int64_t N) { auto iter = TensorIteratorConfig() .add_output(running_mean) .add_output(running_var) .add_input(save_mean) .add_input(save_var) .add_input(running_mean) .add_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, }; }); }); } void batch_norm_update_stats_and_invert( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, double epsilon, int64_t N) { auto iter = TensorIteratorConfig() .add_output(running_mean) .add_output(running_var) .add_output(save_var) .add_input(save_mean) .add_input(save_var) .add_input(running_mean) .add_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto eps = static_cast<acc_t>(epsilon); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t, acc_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t, acc_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, c10::hip::compat::rsqrt(var + eps) }; }); }); } void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) { auto iter = TensorIteratorConfig() .add_output(out_invstd) .add_input(running_var) .check_all_same_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(), "batch_norm_invert_std_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; auto eps = static_cast<acc_t>(epsilon); gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t { return c10::hip::compat::rsqrt(var + eps); }); }); } } std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined()); const bool has_running_var = (running_var_opt.has_value() && running_var_opt->defined()); TORCH_CHECK(has_running_mean == has_running_var); if (train) { batch_norm_mean_var(self, save_mean, save_invstd); if (has_running_mean) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats_and_invert( save_mean, save_invstd, *running_mean_opt, *running_var_opt, momentum, epsilon, N); } else { batch_norm_calc_invstd(save_invstd, save_invstd, epsilon); } } else { TORCH_CHECK(has_running_mean); at::native::resize_output(save_mean, running_mean_opt->sizes()); save_mean.copy_(*running_mean_opt, /*non_blocking=*/true); batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon); } batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd); return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) { auto output = at::empty_like(self); int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_invstd = at::empty({n_input}, options); at::native::batch_norm_cuda_out( self, weight_opt, bias_opt, running_mean_opt, running_var_opt, train, momentum, epsilon, output, save_mean, save_invstd); return std::make_tuple(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> save_mean = at::borrow_from_optional_tensor(save_mean_opt); c10::MaybeOwned<Tensor> save_invstd = at::borrow_from_optional_tensor(save_invstd_opt); c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt); const bool needs_reduction = train || grad_input_mask[1] || grad_input_mask[2]; // Fused reducion & elementwise kernel if (needs_reduction && grad_input_mask[0] && !batch_norm_use_channels_last_kernels(input) && cuda::detail::canUse32BitIndexMath(input) && cuda::detail::canUse32BitIndexMath(grad_out)) { return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batch_norm_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const bool mixed_type = is_mixed_type(input, *weight, *running_mean, *running_var); if (mixed_type) { return batch_norm_backward_cuda_template<scalar_t, accscalar_t, int32_t>( grad_out, input, *weight, *running_mean, *running_var, *save_mean, *save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>( grad_out, input, *weight, *running_mean, *running_var, *save_mean, *save_invstd, train, epsilon, grad_input_mask); } }); } // NOTE: native_batch_norm always returns save_mean and save_invstd to be reused in backward. // However, this is also called from cudnn_batch_norm in eval mode which doesn't give // save_mean and save_invstd, so it needs recalculated. const auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true); Tensor mean; TORCH_INTERNAL_ASSERT(save_mean->defined(), "save_mean should always be defined\n"); if (save_mean->numel() != 0) { mean = *save_mean; } else if (needs_reduction) { TORCH_CHECK(!train && running_mean->defined()); mean = (running_mean->scalar_type() == acc_type) ? *running_mean : running_mean->to(acc_type); } Tensor invstd; TORCH_INTERNAL_ASSERT(save_invstd->defined(), "save_invstd should always be defined\n"); if (save_invstd->numel() != 0) { invstd = *save_invstd; } else { TORCH_CHECK(!train && running_var->defined()); auto n_channels = input.sizes()[1]; invstd = at::empty({n_channels}, input.options().dtype(acc_type)); batch_norm_calc_invstd(invstd, *running_var, epsilon); } Tensor sum_dy, sum_dy_xmu, grad_weight, grad_bias; if (needs_reduction) { std::tie(sum_dy, sum_dy_xmu, grad_weight, grad_bias) = batch_norm_backward_reduce_cuda( grad_out, input, mean, invstd, *weight, grad_input_mask[0], grad_input_mask[1], grad_input_mask[2]); } Tensor grad_input; if (grad_input_mask[0]) { if (train) { // NOTE: sum_dy and sum_dy_xmy are defined, as train implies needs_reduction grad_input = batch_norm_elementwise_backward_train( grad_out, input, mean, invstd, *weight, sum_dy, sum_dy_xmu); } else { grad_input = batch_norm_elementwise_backward_eval( grad_out, input, invstd, *weight); } } return std::make_tuple(grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) { auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto n_channels = self.size(1); auto save_mean = at::empty({n_channels}, options); auto save_invstd = at::empty({n_channels}, options); bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { if (cuda::detail::canUse32BitIndexMath(self)) { if (use_channels_last_kernel) { batch_norm_stats_channels_last_cuda_template<scalar_t, InvStd>( save_mean, save_invstd, self, epsilon); } else { batch_norm_stats_cuda_template<scalar_t, int32_t, InvStd>( save_mean, save_invstd, self, epsilon); } } else { batch_norm_stats_cuda_template<scalar_t, int64_t, InvStd>( save_mean, save_invstd, self, epsilon); } }); return std::tuple<Tensor, Tensor>(save_mean, save_invstd); } Tensor batch_norm_elemt_cuda( const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon) { auto output = at::empty_like(self); // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } // accepting input(self) here to determine template data types, since running_mean/running_var are optional std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); std::vector<int64_t> counts(mean.size(0), count); Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype()); return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); } std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda( const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type(); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } else { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } }); } std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; if (at::cuda::detail::canUse32BitIndexMath(grad_output) && batch_norm_use_channels_last_kernels(grad_output) && batch_norm_use_channels_last_kernels(input) && (!weight.defined() || weight.is_contiguous()) && mean.is_contiguous() && invstd.is_contiguous()){ return batch_norm_backward_reduce_cuda_channels_last_template( grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "batch_norm_backward_reduce", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); const bool mixed_type = is_mixed_type(input, weight); using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(grad_output)) { if (mixed_type) { return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } } else { if (mixed_type) { return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } } }); } Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self) && batch_norm_use_channels_last_kernels(input)) { return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } }); } std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda( const Tensor& self, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum) { c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt); const int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_var = at::empty({n_input}, options); batch_norm_mean_var(self, save_mean, save_var); TORCH_CHECK(running_mean->defined() == running_var->defined()); if (running_mean->defined()) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N); } return std::tuple<Tensor, Tensor>(save_mean, save_var); } } } // namespace at::native
4c9f58dd7663922704ed74efb2c4eef92abd3f2d.cu
// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/Resize.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Resize.h> #include <ATen/native/cuda/Normalization.cuh> #include <c10/cuda/CUDAMathCompat.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/batch_norm_backward_elemt_native.h> #include <ATen/ops/batch_norm_backward_reduce_native.h> #include <ATen/ops/batch_norm_elemt_native.h> #include <ATen/ops/batch_norm_gather_stats_native.h> #include <ATen/ops/batch_norm_gather_stats_with_counts_native.h> #include <ATen/ops/batch_norm_stats_native.h> #include <ATen/ops/batch_norm_update_stats_native.h> #include <ATen/ops/empty_like.h> #include <ATen/ops/native_batch_norm_backward_native.h> #include <ATen/ops/native_batch_norm_native.h> #include <ATen/ops/scalar_tensor.h> #endif // TODO: Doesn't exist in this branch #if 0 #include <ATen/ops/from_blob.h> #else #include <ATen/Functions.h> #endif namespace at { namespace native { namespace { ScalarType first_type() { return ScalarType::Undefined; } template <typename... Args> ScalarType first_type(const Tensor& arg, const Args&... parameters) { return arg.defined() ? arg.scalar_type() : first_type(parameters...); } // A transform is mixed type if the parameters are higher precision than the input template <typename... Args> bool is_mixed_type(const Tensor& input, const Args&... parameters) { const auto parameter_type = first_type(parameters...); return ((parameter_type != ScalarType::Undefined) && (parameter_type != input.scalar_type())); } inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) { return (self.is_contiguous(at::MemoryFormat::ChannelsLast) || (self.is_contiguous() && self.strides()[1] == 1)); } enum class Impl { Contiguous, ChannelsLast, General, }; inline Impl batch_norm_choose_impl(const Tensor& self) { if (!at::cuda::detail::canUse32BitIndexMath(self)) { return Impl::General; } if (self.is_contiguous()) { return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous; } if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) { return Impl::ChannelsLast; } return Impl::General; } inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) { auto imp1 = batch_norm_choose_impl(in1); if (imp1 == Impl::General) { return imp1; } auto imp2 = batch_norm_choose_impl(in2); return imp1 == imp2 ? imp1 : Impl::General; } void batch_norm_elementwise( const Tensor& out, const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) { switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt); resize_output(out, self.sizes()); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const bool mixed_type = is_mixed_type(self, *weight, *bias); if (mixed_type) { batch_norm_elemt_cuda_template<scalar_t, accscalar_t, int32_t>( out, self, *weight, *bias, mean_, invstd_); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>( out, self, *weight, *bias, mean_, invstd_); } }); return; } case Impl::ChannelsLast: { auto weight = at::borrow_from_optional_tensor(weight_opt); auto bias = at::borrow_from_optional_tensor(bias_opt); if (resize_output_check(out, self.sizes())) { resize_impl_cuda_(out.unsafeGetTensorImpl(), self.sizes(), self.strides()); } if ((out.strides() == self.strides()) && (!weight->defined() || weight->is_contiguous()) && (!bias->defined() || bias->is_contiguous()) && (!mean_.defined() || mean_.is_contiguous()) && (!invstd_.defined() || invstd_.is_contiguous())) { batch_norm_elemt_channels_last_cuda_template( out, self, *weight, *bias, mean_, invstd_); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector sizes(ndim, 1), strides(ndim, 0); // Helper to convert 1d tensors to an nd tensor that broadcasts with input // All elements go into the channel dimension auto as_nd = [&](const Tensor& t) { TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); sizes[1] = t.sizes()[0]; strides[1] = t.strides()[0]; return t.as_strided(sizes, strides); }; auto weight = weight_opt.has_value() && weight_opt->defined() ? as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options()); auto bias = bias_opt.has_value() && bias_opt->defined() ? as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options()); auto mean = as_nd(mean_); auto invstd = as_nd(invstd_); auto iter = TensorIteratorConfig() .add_output(out) .add_input(self) .add_input(weight) .add_input(bias) .add_input(mean) .add_input(invstd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias, acc_t mean, acc_t invstd) -> scalar_t { return ((input - mean) * invstd) * weight + bias; }); }); return; } } } Tensor batch_norm_elementwise_backward_train( const Tensor& grad_out, const Tensor& input, const Tensor& mean, const Tensor& invstd, const Tensor& weight, const Tensor& sum_dy, const Tensor& sum_dy_xmu) { switch (batch_norm_choose_impl(input, grad_out)) { case Impl::Contiguous: { return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batch_norm_backward_elemt", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const bool mixed_type = is_mixed_type(input, weight); if (mixed_type) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } }); } case Impl::ChannelsLast: { if ((!weight.defined() || weight.is_contiguous()) && mean.is_contiguous() && invstd.is_contiguous()) { return batch_norm_backward_elemt_channels_last_cuda_template( grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); } C10_FALLTHROUGH; } case Impl::General: { const auto ndim = input.dim(); DimVector sizes(ndim, 1), strides(ndim, 0); auto as_nd = [&](const Tensor& t) { TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); sizes[1] = t.sizes()[0]; strides[1] = t.strides()[0]; return t.as_strided(sizes, strides); }; auto invstd_nd = as_nd(invstd); auto mean_nd = as_nd(mean); auto sum_dy_nd = as_nd(sum_dy); auto sum_dy_xmu_nd = as_nd(sum_dy_xmu); auto weight_nd = weight.defined() ? as_nd(weight) : at::scalar_tensor(1.0, input.options().dtype(mean.scalar_type())); Tensor grad_input = at::empty(input.sizes(), grad_out.options()); auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(input) .add_input(weight_nd) .add_input(mean_nd) .add_input(invstd_nd) .add_input(sum_dy_xmu_nd) .add_input(sum_dy_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; auto norm_fct = static_cast<accscalar_t>(1.0 / (input.numel() /input.size(1)) ); gpu_kernel(iter, [norm_fct] GPU_LAMBDA (scalar_t gO, scalar_t input, accscalar_t weight, accscalar_t mean, accscalar_t invstd, accscalar_t xmu, accscalar_t dy) -> scalar_t { auto factor_1_c = invstd * invstd * xmu * norm_fct; auto factor_2_c = weight * invstd; auto m_dy_c = dy * norm_fct; return (gO - m_dy_c - (input - mean) * factor_1_c) * factor_2_c; }); }); return grad_input; } } TORCH_INTERNAL_ASSERT(false); } Tensor batch_norm_elementwise_backward_eval( const Tensor& grad_out, const Tensor& input, const Tensor& invstd, const Tensor& weight) { const auto ndim = input.dim(); DimVector shape(ndim, 1), strides(ndim, 0); shape[1] = invstd.sizes()[0]; strides[1] = invstd.strides()[0]; auto invstd_nd = invstd.as_strided(shape, strides); Tensor grad_input = at::empty(input.sizes(), grad_out.options()); if (weight.defined()) { strides[1] = weight.strides()[0]; auto weight_nd = weight.as_strided(shape, strides); auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(invstd_nd) .add_input(weight_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd, accscalar_t weight) -> scalar_t { return gO * weight * invstd; }); }); } else { auto iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad_out) .add_input(invstd_nd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), "batch_norm_eval_backward", [&]{ using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd) -> scalar_t { return gO * invstd; }); }); } return grad_input; } void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) { // NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored. const double dummy_epsilon = 1e-5; switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_cuda_template<scalar_t, int32_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } case Impl::ChannelsLast: { if ((!save_mean.defined() || save_mean.is_contiguous()) && (!save_var.defined() || save_var.is_contiguous())) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_channels_last_cuda_template<scalar_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector reduce_dims(ndim - 1); reduce_dims[0] = 0; for (int64_t i = 2; i < ndim; ++i) { reduce_dims[i - 1] = i; } // For some reason this isn't an actual operator but it exists anyway... at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims, /*unbiased=*/false, /*keepdim=*/false); return; } } } void batch_norm_update_stats( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, int64_t N) { auto iter = TensorIteratorConfig() .add_output(running_mean) .add_output(running_var) .add_input(save_mean) .add_input(save_var) .add_input(running_mean) .add_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, }; }); }); } void batch_norm_update_stats_and_invert( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, double epsilon, int64_t N) { auto iter = TensorIteratorConfig() .add_output(running_mean) .add_output(running_var) .add_output(save_var) .add_input(save_mean) .add_input(save_var) .add_input(running_mean) .add_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto eps = static_cast<acc_t>(epsilon); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t, acc_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t, acc_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, c10::cuda::compat::rsqrt(var + eps) }; }); }); } void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) { auto iter = TensorIteratorConfig() .add_output(out_invstd) .add_input(running_var) .check_all_same_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(), "batch_norm_invert_std_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; auto eps = static_cast<acc_t>(epsilon); gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t { return c10::cuda::compat::rsqrt(var + eps); }); }); } } std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined()); const bool has_running_var = (running_var_opt.has_value() && running_var_opt->defined()); TORCH_CHECK(has_running_mean == has_running_var); if (train) { batch_norm_mean_var(self, save_mean, save_invstd); if (has_running_mean) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats_and_invert( save_mean, save_invstd, *running_mean_opt, *running_var_opt, momentum, epsilon, N); } else { batch_norm_calc_invstd(save_invstd, save_invstd, epsilon); } } else { TORCH_CHECK(has_running_mean); at::native::resize_output(save_mean, running_mean_opt->sizes()); save_mean.copy_(*running_mean_opt, /*non_blocking=*/true); batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon); } batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd); return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) { auto output = at::empty_like(self); int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_invstd = at::empty({n_input}, options); at::native::batch_norm_cuda_out( self, weight_opt, bias_opt, running_mean_opt, running_var_opt, train, momentum, epsilon, output, save_mean, save_invstd); return std::make_tuple(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> save_mean = at::borrow_from_optional_tensor(save_mean_opt); c10::MaybeOwned<Tensor> save_invstd = at::borrow_from_optional_tensor(save_invstd_opt); c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt); const bool needs_reduction = train || grad_input_mask[1] || grad_input_mask[2]; // Fused reducion & elementwise kernel if (needs_reduction && grad_input_mask[0] && !batch_norm_use_channels_last_kernels(input) && cuda::detail::canUse32BitIndexMath(input) && cuda::detail::canUse32BitIndexMath(grad_out)) { return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batch_norm_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const bool mixed_type = is_mixed_type(input, *weight, *running_mean, *running_var); if (mixed_type) { return batch_norm_backward_cuda_template<scalar_t, accscalar_t, int32_t>( grad_out, input, *weight, *running_mean, *running_var, *save_mean, *save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>( grad_out, input, *weight, *running_mean, *running_var, *save_mean, *save_invstd, train, epsilon, grad_input_mask); } }); } // NOTE: native_batch_norm always returns save_mean and save_invstd to be reused in backward. // However, this is also called from cudnn_batch_norm in eval mode which doesn't give // save_mean and save_invstd, so it needs recalculated. const auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true); Tensor mean; TORCH_INTERNAL_ASSERT(save_mean->defined(), "save_mean should always be defined\n"); if (save_mean->numel() != 0) { mean = *save_mean; } else if (needs_reduction) { TORCH_CHECK(!train && running_mean->defined()); mean = (running_mean->scalar_type() == acc_type) ? *running_mean : running_mean->to(acc_type); } Tensor invstd; TORCH_INTERNAL_ASSERT(save_invstd->defined(), "save_invstd should always be defined\n"); if (save_invstd->numel() != 0) { invstd = *save_invstd; } else { TORCH_CHECK(!train && running_var->defined()); auto n_channels = input.sizes()[1]; invstd = at::empty({n_channels}, input.options().dtype(acc_type)); batch_norm_calc_invstd(invstd, *running_var, epsilon); } Tensor sum_dy, sum_dy_xmu, grad_weight, grad_bias; if (needs_reduction) { std::tie(sum_dy, sum_dy_xmu, grad_weight, grad_bias) = batch_norm_backward_reduce_cuda( grad_out, input, mean, invstd, *weight, grad_input_mask[0], grad_input_mask[1], grad_input_mask[2]); } Tensor grad_input; if (grad_input_mask[0]) { if (train) { // NOTE: sum_dy and sum_dy_xmy are defined, as train implies needs_reduction grad_input = batch_norm_elementwise_backward_train( grad_out, input, mean, invstd, *weight, sum_dy, sum_dy_xmu); } else { grad_input = batch_norm_elementwise_backward_eval( grad_out, input, invstd, *weight); } } return std::make_tuple(grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) { auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto n_channels = self.size(1); auto save_mean = at::empty({n_channels}, options); auto save_invstd = at::empty({n_channels}, options); bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { if (cuda::detail::canUse32BitIndexMath(self)) { if (use_channels_last_kernel) { batch_norm_stats_channels_last_cuda_template<scalar_t, InvStd>( save_mean, save_invstd, self, epsilon); } else { batch_norm_stats_cuda_template<scalar_t, int32_t, InvStd>( save_mean, save_invstd, self, epsilon); } } else { batch_norm_stats_cuda_template<scalar_t, int64_t, InvStd>( save_mean, save_invstd, self, epsilon); } }); return std::tuple<Tensor, Tensor>(save_mean, save_invstd); } Tensor batch_norm_elemt_cuda( const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon) { auto output = at::empty_like(self); // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } // accepting input(self) here to determine template data types, since running_mean/running_var are optional std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); std::vector<int64_t> counts(mean.size(0), count); Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype()); return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); } std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda( const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type(); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } else { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } }); } std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; if (at::cuda::detail::canUse32BitIndexMath(grad_output) && batch_norm_use_channels_last_kernels(grad_output) && batch_norm_use_channels_last_kernels(input) && (!weight.defined() || weight.is_contiguous()) && mean.is_contiguous() && invstd.is_contiguous()){ return batch_norm_backward_reduce_cuda_channels_last_template( grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "batch_norm_backward_reduce", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); const bool mixed_type = is_mixed_type(input, weight); using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(grad_output)) { if (mixed_type) { return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } } else { if (mixed_type) { return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); } } }); } Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self) && batch_norm_use_channels_last_kernels(input)) { return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } }); } std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda( const Tensor& self, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum) { c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt); const int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_var = at::empty({n_input}, options); batch_norm_mean_var(self, save_mean, save_var); TORCH_CHECK(running_mean->defined() == running_var->defined()); if (running_mean->defined()) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N); } return std::tuple<Tensor, Tensor>(save_mean, save_var); } } } // namespace at::native
a0b95e7902174c1fdfbcf3e3760c6800e523f5be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nearest_neighbour_hip.cuh" #include <cmath> #define NREPS 10 // number of repetations for time calculations #define THREADS_PER_BLOCK 1024 __global__ void NearestNeighbourKernel(Point *train, Point *test, int *result, int *trainSize, int *testSize) { //int blockId = blockIdx.x + blockIdx.y * gridDim.x; //unsigned int i = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < *testSize) { __shared__ int minDist; __shared__ int minID; __shared__ int dist; __shared__ uint4s minMax; minDist = INT32_MAX; minID = -1; for (int j = 0; j < *trainSize; j++) { dist = 0; // Calculate distance between points for (int k = 0; k < 8; k++) { // calculate max-min of 2 numbers without branching (hack) minMax.first = train[j][k].first ^ ((test[i][k].first ^ train[j][k].first) & -(test[i][k].first < train[j][k].first)); // min(x, y) minMax.second = test[i][k].first ^ ((test[i][k].first ^ train[j][k].first) & -(test[i][k].first < train[j][k].first)); // max(x, y) dist += (minMax.second - minMax.first) * (minMax.second - minMax.first); // (max(x,y)-min(x,y))^2 minMax.first = train[j][k].second ^ ((test[i][k].second ^ train[j][k].second) & -(test[i][k].second < train[j][k].second)); // min(x, y) minMax.second = test[i][k].second ^ ((test[i][k].second ^ train[j][k].second) & -(test[i][k].second < train[j][k].second)); // max(x, y) dist += (minMax.second - minMax.first) * (minMax.second - minMax.first); // (max(x,y)-min(x,y))^2 } if (dist < minDist) { minDist = dist; minID = j; } } result[i] = minID; } } // Helper function for using CUDA to add vectors in parallel. hipError_t CudaNearestNeighbour(Point *train, Point *test, int *result, int trainSize, int testSize) { Point *dev_train, *dev_test; int *dev_result; int *dev_trainSize, *dev_testSize; hipError_t cudaStatus; int numThreads = (int)sqrt(THREADS_PER_BLOCK); dim3 dimBlock(numThreads, numThreads, 1); //=========================================================================================================================== // Allocate GPU buffers for three vectors (two input, one output) // cudaStatus = hipMalloc((void**)&dev_result, testSize * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_train, trainSize * sizeof(Point)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_test, testSize * sizeof(Point)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_testSize, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_trainSize, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } //=========================================================================================================================== // Copy input vectors from host memory to GPU buffers. // cudaStatus = hipMemcpy(dev_train, train, trainSize * sizeof(Point), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_test, test, testSize * sizeof(Point), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_testSize, &testSize, sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_trainSize, &trainSize, sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } //=========================================================================================================================== // Launch a kernel on the GPU with one thread for each element, and check for errors. // hipLaunchKernelGGL(( NearestNeighbourKernel), dim3((testSize+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK), dim3(dimBlock), 0, 0, dev_train, dev_test, dev_result, dev_trainSize, dev_testSize); //=========================================================================================================================== // Check for any errors launching the kernel // cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(result, dev_result, testSize * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_result); hipFree(dev_train); hipFree(dev_test); hipFree(dev_testSize); hipFree(dev_trainSize); return cudaStatus; }
a0b95e7902174c1fdfbcf3e3760c6800e523f5be.cu
#include "nearest_neighbour.cuh" #include <cmath> #define NREPS 10 // number of repetations for time calculations #define THREADS_PER_BLOCK 1024 __global__ void NearestNeighbourKernel(Point *train, Point *test, int *result, int *trainSize, int *testSize) { //int blockId = blockIdx.x + blockIdx.y * gridDim.x; //unsigned int i = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < *testSize) { __shared__ int minDist; __shared__ int minID; __shared__ int dist; __shared__ uint4s minMax; minDist = INT32_MAX; minID = -1; for (int j = 0; j < *trainSize; j++) { dist = 0; // Calculate distance between points for (int k = 0; k < 8; k++) { // calculate max-min of 2 numbers without branching (hack) minMax.first = train[j][k].first ^ ((test[i][k].first ^ train[j][k].first) & -(test[i][k].first < train[j][k].first)); // min(x, y) minMax.second = test[i][k].first ^ ((test[i][k].first ^ train[j][k].first) & -(test[i][k].first < train[j][k].first)); // max(x, y) dist += (minMax.second - minMax.first) * (minMax.second - minMax.first); // (max(x,y)-min(x,y))^2 minMax.first = train[j][k].second ^ ((test[i][k].second ^ train[j][k].second) & -(test[i][k].second < train[j][k].second)); // min(x, y) minMax.second = test[i][k].second ^ ((test[i][k].second ^ train[j][k].second) & -(test[i][k].second < train[j][k].second)); // max(x, y) dist += (minMax.second - minMax.first) * (minMax.second - minMax.first); // (max(x,y)-min(x,y))^2 } if (dist < minDist) { minDist = dist; minID = j; } } result[i] = minID; } } // Helper function for using CUDA to add vectors in parallel. cudaError_t CudaNearestNeighbour(Point *train, Point *test, int *result, int trainSize, int testSize) { Point *dev_train, *dev_test; int *dev_result; int *dev_trainSize, *dev_testSize; cudaError_t cudaStatus; int numThreads = (int)sqrt(THREADS_PER_BLOCK); dim3 dimBlock(numThreads, numThreads, 1); //=========================================================================================================================== // Allocate GPU buffers for three vectors (two input, one output) // cudaStatus = cudaMalloc((void**)&dev_result, testSize * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_train, trainSize * sizeof(Point)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_test, testSize * sizeof(Point)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_testSize, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_trainSize, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } //=========================================================================================================================== // Copy input vectors from host memory to GPU buffers. // cudaStatus = cudaMemcpy(dev_train, train, trainSize * sizeof(Point), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_test, test, testSize * sizeof(Point), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_testSize, &testSize, sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_trainSize, &trainSize, sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } //=========================================================================================================================== // Launch a kernel on the GPU with one thread for each element, and check for errors. // NearestNeighbourKernel<<<(testSize+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, dimBlock>>>(dev_train, dev_test, dev_result, dev_trainSize, dev_testSize); //=========================================================================================================================== // Check for any errors launching the kernel // cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(result, dev_result, testSize * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_result); cudaFree(dev_train); cudaFree(dev_test); cudaFree(dev_testSize); cudaFree(dev_trainSize); return cudaStatus; }
dc69b2a78d6639d9015f26b331e66481ea887f85.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "checkpoint.h" #include <assert.h> #include <stdio.h> #define CHECK(call) { \ hipError_t err = (call); \ if (err != hipSuccess) { \ fprintf(stderr, "CUDA Error at %s:%d - %s\n", __FILE__, __LINE__, \ hipGetErrorString(err)); \ exit(1); \ } \ } __global__ void kernel(int *arr) { int tid = blockIdx.x * blockDim.x + threadIdx.x; arr[tid] = tid; } int main(int argc, char **argv) { int *d_arr; int *h_arr = (int *)malloc(sizeof(int) * 100); CHECK(hipMalloc((void **)&d_arr, sizeof(int) * 100)); CHECK(hipMemcpy(d_arr, h_arr, sizeof(int) * 100, hipMemcpyHostToDevice)); h_arr[0] = 0; h_arr[1] = 1; h_arr[2] = 2; checkpoint(); hipLaunchKernelGGL(( kernel), dim3(10), dim3(128), 0, 0, d_arr); assert(h_arr != NULL); assert(d_arr != NULL); assert(h_arr[0] == 0); assert(h_arr[1] == 1); assert(h_arr[2] == 2); CHECK(hipMemcpy(h_arr, d_arr, sizeof(int) * 100, hipMemcpyDeviceToHost)); assert(h_arr[0] == 0); assert(h_arr[1] == 1); assert(h_arr[2] == 2); CHECK(hipFree(d_arr)); free(h_arr); return 0; }
dc69b2a78d6639d9015f26b331e66481ea887f85.cu
#include "checkpoint.h" #include <assert.h> #include <stdio.h> #define CHECK(call) { \ cudaError_t err = (call); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA Error at %s:%d - %s\n", __FILE__, __LINE__, \ cudaGetErrorString(err)); \ exit(1); \ } \ } __global__ void kernel(int *arr) { int tid = blockIdx.x * blockDim.x + threadIdx.x; arr[tid] = tid; } int main(int argc, char **argv) { int *d_arr; int *h_arr = (int *)malloc(sizeof(int) * 100); CHECK(cudaMalloc((void **)&d_arr, sizeof(int) * 100)); CHECK(cudaMemcpy(d_arr, h_arr, sizeof(int) * 100, cudaMemcpyHostToDevice)); h_arr[0] = 0; h_arr[1] = 1; h_arr[2] = 2; checkpoint(); kernel<<<10, 128>>>(d_arr); assert(h_arr != NULL); assert(d_arr != NULL); assert(h_arr[0] == 0); assert(h_arr[1] == 1); assert(h_arr[2] == 2); CHECK(cudaMemcpy(h_arr, d_arr, sizeof(int) * 100, cudaMemcpyDeviceToHost)); assert(h_arr[0] == 0); assert(h_arr[1] == 1); assert(h_arr[2] == 2); CHECK(cudaFree(d_arr)); free(h_arr); return 0; }
9dab76577a784cd8c887f04c9e3215ca09492d59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Vector addition on the GPU: C = A + B */ #include <stdio.h> #include <stdlib.h> #include <string.h> #define SIZE 1024 #define BLOCKSIZE 32 // Device function (i.e. kernel) __global__ void VecAdd(float * A, float * B, float * C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if ( i < N ) { C[i] = A[i] + B[i]; } } // CPU version of the vector addition function void vecAddCPU(float * A, float * B, float * C, int N) { int i; for (i=0; i<N; i++) { C[i] = A[i] + B[i]; } } // Function compares two 1d arrays void compareVecs( float * vec1, float * vec2, int N ) { int i; int vecsEqual = 1; for (i=0; i<N; i++) { if ( abs (vec1[i] - vec2[i]) > 0.00001 ) { printf("vectors not equal! i: %d vec1[i]: %f vec2[i]: %f\n",i,vec1[i],vec2[i]); vecsEqual = 0; } } if ( vecsEqual ) printf("GPU vector addition agrees with CPU version!\n"); } /* Host function for filling vector (1d array) with random numbers between -20.0 and 20.0 */ void fillOutVector( float * vec, int vec_length ) { time_t t; srand((unsigned) time(&t)); // initialize random number generator int i; for (i=0; i<vec_length; i++) { vec[i] = ( (float)rand() / (float)(RAND_MAX) ) * 40.0; vec[i] -= 20.0; } } // Host function for printing a vector (1d array) void printVector( float * vec, int vec_length ) { int i; for (i=0; i<vec_length; i++) { printf("i: %d vec[i]: %f\n",i,vec[i]); } } // program execution begins here int main( int argc, char ** argv ) { int cnt = 1; int gpu_index = 0; if ( argc == 1 ) { printf("Usage: ./vec_add --device <GPU_INDEX>\n"); printf("where GPU_INDEX is the GPU you want to use.\n"); printf("On the ACCRE GPU cluster valid values are 0, 1, 2, and 3\n"); exit(-1); } while ( cnt < argc ) { if ( !strcmp("--device",argv[cnt]) ) { cnt++; gpu_index = atoi( argv[cnt] ); } else { printf("Usage: ./vec_add --device <GPU_INDEX>\n"); printf("where GPU_INDEX is the GPU you want to use.\n"); printf("On the ACCRE GPU cluster valid values are 0, 1, 2, and 3\n\n"); printf("Unrecognized option: %s\n",argv[cnt]); exit(-1); } cnt++; } // it's important to set the device before initializing events! Otherwise // the events will assume gpu=0 hipSetDevice(gpu_index); // get number of SMs on this GPU int devID; hipDeviceProp_t props; hipGetDevice(&devID); hipGetDeviceProperties(&props, devID); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major,props.minor); // initialize timer events hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); size_t vec_bytes = SIZE * sizeof(float); // host arrays float * h_A = (float *)malloc( vec_bytes ); float * h_B = (float *)malloc( vec_bytes ); float * h_C = (float *)malloc( vec_bytes ); // fill array with random floats fillOutVector( h_A, SIZE ); fillOutVector( h_B, SIZE ); // device arrays float * d_A, * d_B, * d_C; hipError_t rc; // return code from cuda functions rc = hipMalloc(&d_A, vec_bytes); if ( rc ) printf("Error from hipMalloc: %s\n",hipGetErrorString(rc)); hipMalloc(&d_B, vec_bytes); hipMalloc(&d_C, vec_bytes); // copy A and B to the device hipMemcpy(d_A, h_A, vec_bytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, vec_bytes, hipMemcpyHostToDevice); // dim3 is a 3-element struct with elements x, y, z (all ints) dim3 threadsPerBlock(BLOCKSIZE); dim3 blocksPerGrid( (SIZE + BLOCKSIZE - 1) / BLOCKSIZE ); // launch vector addition kernel! hipEventRecord(start); hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_A, d_B, d_C, SIZE); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("kernel time (ms) : %7.5f\n",milliseconds); // copy results to host hipMemcpy(h_C, d_C, vec_bytes, hipMemcpyDeviceToHost); printVector( h_C, SIZE ); // verify that we got correct results float * gold_C = (float *)malloc( vec_bytes ); hipEventRecord(start); vecAddCPU( h_A, h_B, gold_C, SIZE ); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("cpu function time (ms) : %7.5f\n",milliseconds); compareVecs( gold_C, h_C, SIZE ); // clean up timer variables hipEventDestroy(start); hipEventDestroy(stop); // free memory on device hipFree(d_A); hipFree(d_B); hipFree(d_C); // free memory on host free(h_A); free(h_B); free(h_C); free(gold_C); return 0; }
9dab76577a784cd8c887f04c9e3215ca09492d59.cu
/* Vector addition on the GPU: C = A + B */ #include <stdio.h> #include <stdlib.h> #include <string.h> #define SIZE 1024 #define BLOCKSIZE 32 // Device function (i.e. kernel) __global__ void VecAdd(float * A, float * B, float * C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if ( i < N ) { C[i] = A[i] + B[i]; } } // CPU version of the vector addition function void vecAddCPU(float * A, float * B, float * C, int N) { int i; for (i=0; i<N; i++) { C[i] = A[i] + B[i]; } } // Function compares two 1d arrays void compareVecs( float * vec1, float * vec2, int N ) { int i; int vecsEqual = 1; for (i=0; i<N; i++) { if ( abs (vec1[i] - vec2[i]) > 0.00001 ) { printf("vectors not equal! i: %d vec1[i]: %f vec2[i]: %f\n",i,vec1[i],vec2[i]); vecsEqual = 0; } } if ( vecsEqual ) printf("GPU vector addition agrees with CPU version!\n"); } /* Host function for filling vector (1d array) with random numbers between -20.0 and 20.0 */ void fillOutVector( float * vec, int vec_length ) { time_t t; srand((unsigned) time(&t)); // initialize random number generator int i; for (i=0; i<vec_length; i++) { vec[i] = ( (float)rand() / (float)(RAND_MAX) ) * 40.0; vec[i] -= 20.0; } } // Host function for printing a vector (1d array) void printVector( float * vec, int vec_length ) { int i; for (i=0; i<vec_length; i++) { printf("i: %d vec[i]: %f\n",i,vec[i]); } } // program execution begins here int main( int argc, char ** argv ) { int cnt = 1; int gpu_index = 0; if ( argc == 1 ) { printf("Usage: ./vec_add --device <GPU_INDEX>\n"); printf("where GPU_INDEX is the GPU you want to use.\n"); printf("On the ACCRE GPU cluster valid values are 0, 1, 2, and 3\n"); exit(-1); } while ( cnt < argc ) { if ( !strcmp("--device",argv[cnt]) ) { cnt++; gpu_index = atoi( argv[cnt] ); } else { printf("Usage: ./vec_add --device <GPU_INDEX>\n"); printf("where GPU_INDEX is the GPU you want to use.\n"); printf("On the ACCRE GPU cluster valid values are 0, 1, 2, and 3\n\n"); printf("Unrecognized option: %s\n",argv[cnt]); exit(-1); } cnt++; } // it's important to set the device before initializing events! Otherwise // the events will assume gpu=0 cudaSetDevice(gpu_index); // get number of SMs on this GPU int devID; cudaDeviceProp props; cudaGetDevice(&devID); cudaGetDeviceProperties(&props, devID); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major,props.minor); // initialize timer events cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); size_t vec_bytes = SIZE * sizeof(float); // host arrays float * h_A = (float *)malloc( vec_bytes ); float * h_B = (float *)malloc( vec_bytes ); float * h_C = (float *)malloc( vec_bytes ); // fill array with random floats fillOutVector( h_A, SIZE ); fillOutVector( h_B, SIZE ); // device arrays float * d_A, * d_B, * d_C; cudaError_t rc; // return code from cuda functions rc = cudaMalloc(&d_A, vec_bytes); if ( rc ) printf("Error from cudaMalloc: %s\n",cudaGetErrorString(rc)); cudaMalloc(&d_B, vec_bytes); cudaMalloc(&d_C, vec_bytes); // copy A and B to the device cudaMemcpy(d_A, h_A, vec_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, vec_bytes, cudaMemcpyHostToDevice); // dim3 is a 3-element struct with elements x, y, z (all ints) dim3 threadsPerBlock(BLOCKSIZE); dim3 blocksPerGrid( (SIZE + BLOCKSIZE - 1) / BLOCKSIZE ); // launch vector addition kernel! cudaEventRecord(start); VecAdd<<< blocksPerGrid, threadsPerBlock >>>(d_A, d_B, d_C, SIZE); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("kernel time (ms) : %7.5f\n",milliseconds); // copy results to host cudaMemcpy(h_C, d_C, vec_bytes, cudaMemcpyDeviceToHost); printVector( h_C, SIZE ); // verify that we got correct results float * gold_C = (float *)malloc( vec_bytes ); cudaEventRecord(start); vecAddCPU( h_A, h_B, gold_C, SIZE ); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("cpu function time (ms) : %7.5f\n",milliseconds); compareVecs( gold_C, h_C, SIZE ); // clean up timer variables cudaEventDestroy(start); cudaEventDestroy(stop); // free memory on device cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // free memory on host free(h_A); free(h_B); free(h_C); free(gold_C); return 0; }
11339522f91df429650bdee6b2f236639e92f999.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu< float, 1, int32_t, float, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 4, true, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
11339522f91df429650bdee6b2f236639e92f999.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu< float, 1, int32_t, float, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 4, true, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
a600970587d5ce1521443940c5a0cd4dde45c064.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zlobpcg_maxpy.cu, normal z -> d, Mon Jun 25 18:24:24 2018 */ #include "magmasparse_internal.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 __global__ void magma_dlobpcg_maxpy_kernel( magma_int_t num_rows, magma_int_t num_vecs, double * X, double * Y) { int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index if ( row < num_rows ) { for( int i=0; i < num_vecs; i++ ) { Y[ row + i*num_rows ] += X[ row + i*num_rows ]; } } } /** Purpose ------- This routine computes a axpy for a mxn matrix: Y = X + Y It replaces: magma_daxpy(m*n, c_one, Y, 1, X, 1); / x1[0] x2[0] x3[0] \ | x1[1] x2[1] x3[1] | X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] . | x1[3] x2[3] x3[3] | \ x1[4] x2[4] x3[4] / Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] num_vecs magma_int_t number of vectors @param[in] X magmaDouble_ptr input vector X @param[in,out] Y magmaDouble_ptr input/output vector Y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dlobpcg_maxpy( magma_int_t num_rows, magma_int_t num_vecs, magmaDouble_ptr X, magmaDouble_ptr Y, magma_queue_t queue ) { // every thread handles one row magma_int_t block_size = BLOCK_SIZE; magma_int_t threads = BLOCK_SIZE; dim3 block( block_size ); dim3 grid( magma_ceildiv( num_rows, block_size ) ); hipLaunchKernelGGL(( magma_dlobpcg_maxpy_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , num_rows, num_vecs, X, Y ); return MAGMA_SUCCESS; }
a600970587d5ce1521443940c5a0cd4dde45c064.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zlobpcg_maxpy.cu, normal z -> d, Mon Jun 25 18:24:24 2018 */ #include "magmasparse_internal.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 __global__ void magma_dlobpcg_maxpy_kernel( magma_int_t num_rows, magma_int_t num_vecs, double * X, double * Y) { int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index if ( row < num_rows ) { for( int i=0; i < num_vecs; i++ ) { Y[ row + i*num_rows ] += X[ row + i*num_rows ]; } } } /** Purpose ------- This routine computes a axpy for a mxn matrix: Y = X + Y It replaces: magma_daxpy(m*n, c_one, Y, 1, X, 1); / x1[0] x2[0] x3[0] \ | x1[1] x2[1] x3[1] | X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] . | x1[3] x2[3] x3[3] | \ x1[4] x2[4] x3[4] / Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] num_vecs magma_int_t number of vectors @param[in] X magmaDouble_ptr input vector X @param[in,out] Y magmaDouble_ptr input/output vector Y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dlobpcg_maxpy( magma_int_t num_rows, magma_int_t num_vecs, magmaDouble_ptr X, magmaDouble_ptr Y, magma_queue_t queue ) { // every thread handles one row magma_int_t block_size = BLOCK_SIZE; magma_int_t threads = BLOCK_SIZE; dim3 block( block_size ); dim3 grid( magma_ceildiv( num_rows, block_size ) ); magma_dlobpcg_maxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( num_rows, num_vecs, X, Y ); return MAGMA_SUCCESS; }
0dbf8d5efdf476a1e79c766f17a3c01b3abfc3f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "config.h" template <typename scalar_t> __device__ scalar_t deform_conv3d_im2col_trilinear( const scalar_t *bottom_data, const int data_width,const int data_length, const int height, const int width, const int length,scalar_t h, scalar_t w,scalar_t l) { int h_low = floor(h); int w_low = floor(w); int l_low = floor(l); int h_high = h_low + 1; int w_high = w_low + 1; int l_high = l_low + 1; scalar_t lh = h - h_low;//dh scalar_t lw = w - w_low;//dw scalar_t ll = l - l_low;//dl scalar_t hh = 1 - lh, hw = 1 - lw, hl = 1 - ll; //1-dh 1-dw 1-dl scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0 && l_low >= 0) v1 = bottom_data[h_low * data_width*data_length + w_low*data_length+ l_low]; scalar_t v2 = 0; if (h_low >= 0 && w_low >=0 && l_high<= length -1) v2 = bottom_data[h_low * data_width*data_length + w_low*data_length+ l_high]; scalar_t v3 = 0; if (h_low >= 0 && w_high <= width - 1 && l_low >= 0) v3 = bottom_data[h_low * data_width*data_length + w_high*data_length+ l_low]; scalar_t v4 = 0; if (h_low >= 0 && w_high <= width - 1 && l_high<= length -1) v4 = bottom_data[h_low * data_width*data_length + w_high*data_length+ l_high]; scalar_t v5 = 0; if (h_high <= height -1 && w_low >= 0 && l_low >= 0) v5 = bottom_data[h_high * data_width*data_length + w_low*data_length+ l_low]; scalar_t v6 = 0; if (h_high <= height -1 && w_low >= 0 && l_high<= length -1) v6 = bottom_data[h_high * data_width*data_length + w_low*data_length+ l_high]; scalar_t v7 = 0; if (h_high <= height -1 && w_high <= width - 1 && l_low >= 0) v7 = bottom_data[h_high * data_width*data_length + w_high*data_length+ l_low]; scalar_t v8 = 0; if (h_high <= height -1 && w_high <= width - 1 && l_high<= length -1) v8 = bottom_data[h_high * data_width*data_length + w_high*data_length+ l_high]; scalar_t w1 = hh * hw *hl, w2 = hh *hw *ll, w3 = hh * lw*hl, w4 = hh * lw* ll; scalar_t w5 = lh * hw *hl, w6 = lh *hw *ll, w7 = lh * lw*hl, w8 = lh * lw* ll; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4+w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8); return val; } template <typename scalar_t> __global__ void deform_conv3d_im2col_gpu_kernel( const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int length, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, const int length_col, scalar_t *data_col) { // input = {step, channels,height, width,length} // offset={step,deformable_group*3* kernel_h * kernel_w * kernel_l,height_out, width_out,length_out} CUDA_KERNEL_LOOP(index, n)//num_kernels = channels * batch_size * height_col * width_col *length_col; { // columns = {channels * kernel_h * kernel_w * kernel_l,step * height_out * width_out * length_out} const int l_col= index % length_col; const int w_col = (index / length_col) % width_col; const int h_col = (index / length_col / width_col ) % height_col; const int b_col = (index / length_col / width_col / height_col) % batch_size; const int c_im = (index / length_col / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w * kernel_l; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; const int l_in = l_col * stride_l - pad_l; scalar_t *data_col_ptr = data_col+ (c_col* batch_size + b_col)* height_col* width_col * length_col + h_col * width_col * length_col + w_col * length_col + l_col; //27 10000 const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width * length; const scalar_t *data_offset_ptr = data_offset +(b_col * deformable_group + deformable_group_index) * 3 * kernel_h * kernel_w * kernel_l * height_col * width_col * length_col;// 1 18 100 100 for (int i = 0; i < kernel_h; ++i) for (int j = 0; j < kernel_w; ++j) for (int k = 0; k < kernel_l; ++k) { int f=i*kernel_w*kernel_l + j*kernel_l+k; const int data_offset_h_ptr = (3*f) * height_col * width_col * length_col+ h_col* width_col * length_col+ w_col* length_col + l_col; const int data_offset_w_ptr = (3*f+1) * height_col * width_col* length_col + h_col* width_col* length_col + w_col* length_col + l_col; const int data_offset_l_ptr = (3*f+2) * height_col * width_col* length_col + h_col* width_col* length_col + w_col* length_col + l_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t offset_l = data_offset_ptr[data_offset_l_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; const scalar_t l_im = l_in + k * dilation_l + offset_l; if (h_im > -1 && w_im > -1 && l_im > -1 && h_im < height && w_im < width && l_im < length) { val = deform_conv3d_im2col_trilinear(data_im_ptr, width, length, height, width, length, h_im, w_im,l_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col* length_col; } } } void deform_conv3d_im2col_cuda( at::Tensor data_im, at::Tensor data_offset, const int batch_size, const int channels, const int height_im, const int width_im, const int length_im, const int height_col, const int width_col,const int length_col, const int kernel_h,const int kenerl_w,const int kenerl_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col *length_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "deform_conv3d_im2col_gpu_kernel", ([&] { const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deform_conv3d_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_im_, data_offset_, height_im, width_im, length_im, kernel_h, kenerl_w,kenerl_l, pad_h, pad_w, pad_l, stride_h, stride_w,stride_l, dilation_h, dilation_w, dilation_l, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, length_col,data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deform_conv3d_im2col: %s\n", hipGetErrorString(err)); } } int deform_conv3d_forward_cuda( at::Tensor input, at::Tensor weight,at::Tensor bias, at::Tensor offset, at::Tensor output, const int kernel_h,const int kernel_w,const int kernel_l, const int stride_h,const int stride_w,const int stride_l, const int pad_h,const int pad_w,const int pad_l, const int dilation_h,const int dilation_w,const int dilation_l, const int group,const int deformable_group, const int in_step,const bool with_bias){ //#define DEBUG TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); TORCH_CHECK(bias.is_contiguous(), "bias tensor has to be contiguous"); TORCH_CHECK(offset.is_contiguous(), "offset tensor has to be contiguous"); TORCH_CHECK(output.is_contiguous(), "output tensor has to be contiguous"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int length = input.size(4); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int kernel_l_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_l_ != kernel_l) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h_, kernel_w,kernel_l, kernel_h_, kernel_w_,kernel_l_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int length_out = (length + 2 * pad_l - (dilation_l * (kernel_l - 1) + 1)) / stride_l + 1; const int step=GET_STEP(batch,in_step); #ifdef DEBUG print_tensor_size("deform_conv3d_forward_cuda---output size",output); print_tensor_size("deform_conv3d_forward_cuda---input size",input); print_tensor_size("deform_conv3d_forward_cuda---offset size",offset); #endif output = output.view({batch, channels_out,height_out, width_out,length_out}); output.zero_(); at::Tensor columns = at::zeros({channels * kernel_h * kernel_w * kernel_l, step * height_out * width_out * length_out},input.options()); input = input.view({batch / step, step, channels,height, width,length}); offset =offset.view({batch / step, step,deformable_group*3* kernel_h * kernel_w * kernel_l, height_out, width_out,length_out}); //divide into group output = output.view({batch/step, group, output.size(1) / group,step, output.size(2), output.size(3),output.size(4)}); weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3),weight.size(4)}); for (int b = 0; b < batch / step; b++) { columns.fill_(0); deform_conv3d_im2col_cuda( input[b], offset[b], step,channels, height,width,length, height_out, width_out, length_out, kernel_h, kernel_w,kernel_l, pad_h, pad_w,pad_l, stride_h, stride_w,stride_l, dilation_h, dilation_w, dilation_l, deformable_group, columns); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); for (int g = 0; g < group; g++) { output[b][g] = output[b][g].flatten(1) .addmm_(weight[g].flatten(1), columns[g]).view_as(output[b][g]); } columns = columns.view({columns.size(0)*columns.size(1), columns.size(2)}); } weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4),weight.size(5)}); output = output.view({output.size(0), output.size(1) * output.size(2), output.size(3), output.size(4),output.size(5),output.size(6)}); output = output.view({batch / step, channels_out, step, height_out, width_out,length_out}); output.transpose_(1, 2); output = output.contiguous().view({batch , channels_out, height_out, width_out,length_out}); if (with_bias) output += bias.view({1, bias.size(0), 1, 1, 1}); input=input.view({batch,channels,height,width,length}); offset=offset.view({batch,deformable_group * 3 *kernel_h*kernel_w*kernel_l,height_out,width_out,length_out}); return 0; } template <typename scalar_t> __global__ void deform_conv3d_gradient_gpu_kernel( const int n,const scalar_t *grad_col, const scalar_t *data_input, const scalar_t *data_offset,scalar_t * columns, const int channels_input, const int height_input, const int width_input, const int length_input, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int channel_per_deformable_group,const int step, const int offset_channels, const int deformable_group, const int height_col, const int width_col, const int length_col, scalar_t * grad_input,scalar_t *grad_offset){ // columns = {channels * kernel_h * kernel_w * kernel_l,step*height_out * width_out * length_out} // grad_columns = {channels * kernel_h * kernel_w * kernel_l,step*height_out * width_out * length_out} // grad_output = {step,channels_out,height_out,width_out , length_out} // input = {step, channels, height, width, length} // offset = {step,deformable_group * 3 * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out} // grad_offset = {step,deformable_group * 3 * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out} CUDA_KERNEL_LOOP(index, n)//channels*kernel_h * kernel_w * kernel_l* step * height_col * width_col * length_col; { int single_col_len=length_col * width_col * height_col; int f = (index /step/ single_col_len )%(kernel_h * kernel_w * kernel_l); int i=(f / kernel_l / kernel_w) % kernel_h; int j=(f / kernel_l) %kernel_w; int k=f % kernel_l; int bpos=(index%(step*single_col_len))/(single_col_len); int lpos_col = (index % (single_col_len)) % length_col; int wpos_col = ((index % (single_col_len)) / length_col) % width_col; int hpos_col = ((index % (single_col_len)) / length_col / width_col) % height_col; int cpos_col = (index / step / single_col_len); int cpos_in=cpos_col/kernel_h/kernel_w/kernel_l; int offset_group_index=cpos_in/(channels_input/deformable_group); // printf("index %d cpos_col %d hpos_col %d wpos_col %d \n",index,cpos_col,hpos_col,wpos_col); int offset_base_ptr=bpos*(deformable_group * 3 * kernel_h * kernel_w * kernel_l*single_col_len) +offset_group_index*channel_per_deformable_group*single_col_len +hpos_col*width_col*length_col+wpos_col*length_col+lpos_col; int offset_h_ptr=offset_base_ptr+3*f*single_col_len; int offset_w_ptr=offset_base_ptr+(3*f+1)*single_col_len; int offset_l_ptr=offset_base_ptr+(3*f+2)*single_col_len; scalar_t offset_h=data_offset[offset_h_ptr]; scalar_t offset_w=data_offset[offset_w_ptr]; scalar_t offset_l=data_offset[offset_l_ptr]; int hpos_in = hpos_col * stride_h -pad_h + (i) * dilation_h; int wpos_in = wpos_col * stride_w - pad_w + (j) * dilation_w; int lpos_in = lpos_col * stride_l - pad_l + (k) * dilation_l; auto real_offset_h=hpos_in+offset_h; auto real_offset_w=wpos_in+offset_w; auto real_offset_l=lpos_in+offset_l; int h_low = floor(real_offset_h); int w_low = floor(real_offset_w); int l_low = floor(real_offset_l); int h_high = h_low + 1; int w_high = w_low + 1; int l_high = l_low + 1; scalar_t dh = real_offset_h - h_low; scalar_t dw = real_offset_w - w_low; scalar_t dl = real_offset_l - l_low; scalar_t w1 = (1-dh) *(1- dw)*(1-dl), w2 =(1- dh) *(1- dw)*dl, w3 = (1-dh)*dw*(1-dl), w4 = (1-dh) * dw*dl; scalar_t w5 = dh *(1- dw)*(1-dl), w6 =dh*(1- dw)*dl, w7 = dh*dw*(1-dl), w8 = dh*dw*dl; auto dval=grad_col[index]; int data_input_base_ptr=(bpos*channels_input+cpos_in)*height_input*width_input*length_input; int grad_input_base_ptr=(bpos*channels_input+cpos_in)*height_input*width_input*length_input; bool h_low_flag=h_low >= 0 && h_low <= height_input -1; bool w_low_flag=w_low >= 0 && w_low <= width_input - 1; bool l_low_flag=l_low >= 0 && l_low <= length_input -1; bool h_high_flag=h_high >= 0 && h_high <= height_input -1 && abs(dh)>EPS; bool w_high_flag=w_high >= 0 && w_high <= width_input - 1 && abs(dw)>EPS; bool l_high_flag=l_high >= 0 && l_high <= length_input -1 && abs(dl)>EPS; scalar_t v1 = static_cast<scalar_t>(0); if (h_low_flag && w_low_flag && l_low_flag ){ v1 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_low]; atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_low,w1*dval); } scalar_t v2 = static_cast<scalar_t>(0); if (h_low_flag && w_low_flag && l_high_flag ){ v2 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_high]; atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_high,w2*dval); } scalar_t v3 = static_cast<scalar_t>(0); if (h_low_flag && w_high_flag && l_low_flag ){ v3 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_low]; atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_low,w3*dval); } scalar_t v4 = static_cast<scalar_t>(0); if (h_low_flag && w_high_flag && l_high_flag ){ v4 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_high]; atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_high,w4*dval); } scalar_t v5 = static_cast<scalar_t>(0); if (h_high_flag && w_low_flag && l_low_flag ){ v5 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_low]; atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_low,w5*dval); } scalar_t v6 = static_cast<scalar_t>(0); if (h_high_flag && w_low_flag && l_high_flag ){ v6 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_high]; atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_high,w6*dval); } scalar_t v7 = static_cast<scalar_t>(0); if (h_high_flag && w_high_flag && l_low_flag ){ v7 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_low]; atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_low,w7*dval); } scalar_t v8 = static_cast<scalar_t>(0); if (h_high_flag && w_high_flag && l_high_flag ){ v8 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_high]; atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_high,w8*dval); } atomicAdd(grad_offset + offset_h_ptr, (-1*(1-dw)*(1-dl)*v1-1*(1-dw)*dl*v2-1*dw*(1-dl)*v3-1*dw*dl*v4+(1-dw)*(1-dl)*v5+(1-dw)*dl*v6+dw*(1-dl)*v7+dw*dl*v8)*dval); atomicAdd(grad_offset + offset_w_ptr, (-1*(1-dh)*(1-dl)*v1-1*(1-dh)*dl*v2+(1-dh)*(1-dl)*v3+(1-dh)*dl*v4-1*dh*(1-dl)*v5-1*dh*dl*v6+dh*(1-dl)*v7+dh*dl*v8)*dval); atomicAdd(grad_offset + offset_l_ptr, (-1*(1-dh)*(1-dw)*v1+(1-dh)*(1-dw)*v2-1*(1-dh)*dw*v3+(1-dh)*dw*v4-1*dh*(1-dw)*v5+dh*(1-dw)*v6-1*dh*dw*v7+dh*dw*v8)*dval); scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4+w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8); columns[index]=val; } } // gradient offset mask input void deform_conv3d_gradient_cuda( at::Tensor grad_col,at::Tensor data_input, at::Tensor data_offset,at::Tensor columns, const int channels, const int height_input, const int width_input, const int length_input, const int height_col, const int width_col, const int length_col, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int step,const int deformable_group, at::Tensor grad_input, at::Tensor grad_offset) { const int num_kernels =channels*height_col * width_col * length_col * kernel_h * kernel_w * kernel_l *step; const int channel_per_deformable_group =3 * kernel_h * kernel_w * kernel_l; AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_col.scalar_type(), "deform_conv3d_gradient_gpu_kernel", ([&] { const scalar_t *grad_col_ = grad_col.data<scalar_t>(); const scalar_t *data_input_ = data_input.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *columns_ = columns.data<scalar_t>(); scalar_t *grad_input_ = grad_input.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); hipLaunchKernelGGL(( deform_conv3d_gradient_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, grad_col_, data_input_, data_offset_,columns_, channels, height_input, width_input, length_input, kernel_h, kernel_w, kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, channel_per_deformable_group, step, channel_per_deformable_group * deformable_group, deformable_group, height_col, width_col, length_col, grad_input_,grad_offset_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deform_conv3d_gradient_cuda: %s\n", hipGetErrorString(err)); } } int deform_conv3d_backward_cuda( at::Tensor input, at::Tensor weight, at::Tensor bias,at::Tensor offset, at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, at::Tensor grad_offset, at::Tensor grad_output, const int kernel_h,const int kernel_w,const int kernel_l, const int stride_h,const int stride_w,const int stride_l, const int pad_h,const int pad_w,const int pad_l, const int dilation_h,const int dilation_w,const int dilation_l, const int group, int deformable_group,const int in_step,const bool with_bias) { TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); TORCH_CHECK(bias.is_contiguous(), "bias tensor has to be contiguous"); TORCH_CHECK(offset.is_contiguous(), "offset tensor has to be contiguous"); TORCH_CHECK(grad_input.is_contiguous(), "grad_input tensor has to be contiguous"); TORCH_CHECK(grad_weight.is_contiguous(), "grad_weight tensor has to be contiguous"); TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias tensor has to be contiguous"); TORCH_CHECK(grad_offset.is_contiguous(), "grad_offset tensor has to be contiguous"); TORCH_CHECK(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int length = input.size(4); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int kernel_l_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_l_ != kernel_l) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h_, kernel_w_, kernel_l_, kernel_h, kernel_w, kernel_l); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int length_out = (length + 2 * pad_l - (dilation_l * (kernel_l - 1) + 1)) / stride_l + 1; const int step=GET_STEP(batch,in_step); at::Tensor ones = at::ones({step,height_out, width_out, length_out}, input.options()); at::Tensor columns = at::zeros({channels * kernel_h * kernel_w * kernel_l, step*height_out * width_out * length_out},input.options()); at::Tensor grad_columns=at::zeros({channels * kernel_h * kernel_w * kernel_l, step*height_out * width_out * length_out},input.options()); grad_output=grad_output.view({batch/step,step,channels_out,height_out,width_out,length_out}); grad_output.transpose_(1, 2); grad_output =grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, grad_output.size(2), grad_output.size(3),grad_output.size(4),grad_output.size(5)}); input=input.view({batch/step,step,channels,height,width,length}); grad_input = grad_input.view({batch/step,step, channels, height, width,length}); offset=offset.view({batch/step,step, deformable_group * 3 * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out}); grad_offset=grad_offset.view({batch/step,step, deformable_group * 3 * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out}); for (int b = 0; b < batch/step; b++) { // divide int group grad_columns = grad_columns.view({group, grad_columns.size(0) / group, grad_columns.size(1)}); weight = weight.view({group, weight.size(0) / group, weight.size(1),weight.size(2), weight.size(3), weight.size(4)}); for (int g = 0; g < group; g++) { grad_columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), grad_output[b][g].flatten(1), 0.0f, 1.0f); } grad_columns = grad_columns.view({grad_columns.size(0) * grad_columns.size(1), grad_columns.size(2)}); weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4), weight.size(5)}); //print_tensor_size("grad_columns size",grad_columns); //print_tensor_size("grad_mask[b] size",grad_mask[b]); columns.fill_(0); deform_conv3d_gradient_cuda( grad_columns, input[b], offset[b], columns, channels, height, width, length, height_out, width_out, length_out, kernel_h, kernel_w, kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, step,deformable_group, grad_input[b],grad_offset[b]); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); grad_weight = grad_weight.view({group, grad_weight.size(0) / group, grad_weight.size(1), grad_weight.size(2), grad_weight.size(3),grad_weight.size(4)}); if (with_bias) grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); for (int g = 0; g < group; g++) { grad_weight[g] =grad_weight[g].flatten(1) .addmm_(grad_output[b][g].flatten(1),columns[g].transpose(0, 1),1.0f,1.0f) .view_as(grad_weight[g]); if (with_bias) { at::Tensor temp=grad_bias[g].view({-1, 1}); temp.addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1}),1.0f,1.0f); grad_bias[g] =temp.view(-1); } } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4), grad_weight.size(5)}); if (with_bias) grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); } grad_output = grad_output.view({grad_output.size(0) ,grad_output.size(1)*grad_output.size(2), grad_output.size(3),grad_output.size(4), grad_output.size(5),grad_output.size(6)}); //grad_output=grad_output.view({batch/step,channels_kernel,step,height_out,width_out,length_out}); grad_output.transpose_(1, 2); grad_output =grad_output.view({batch,channels_out,height_out,width_out,length_out}); input=input.view({batch,channels,height,width,length}); grad_input = grad_input.view({batch, channels, height, width,length}); offset=offset.view({batch,deformable_group * 3 * kernel_h * kernel_w *kernel_l, height_out,width_out,length_out}); grad_offset=grad_offset.view({batch,deformable_group * 3 * kernel_h * kernel_w *kernel_l, height_out,width_out,length_out}); return 0; }
0dbf8d5efdf476a1e79c766f17a3c01b3abfc3f0.cu
#include "config.h" template <typename scalar_t> __device__ scalar_t deform_conv3d_im2col_trilinear( const scalar_t *bottom_data, const int data_width,const int data_length, const int height, const int width, const int length,scalar_t h, scalar_t w,scalar_t l) { int h_low = floor(h); int w_low = floor(w); int l_low = floor(l); int h_high = h_low + 1; int w_high = w_low + 1; int l_high = l_low + 1; scalar_t lh = h - h_low;//dh scalar_t lw = w - w_low;//dw scalar_t ll = l - l_low;//dl scalar_t hh = 1 - lh, hw = 1 - lw, hl = 1 - ll; //1-dh 1-dw 1-dl scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0 && l_low >= 0) v1 = bottom_data[h_low * data_width*data_length + w_low*data_length+ l_low]; scalar_t v2 = 0; if (h_low >= 0 && w_low >=0 && l_high<= length -1) v2 = bottom_data[h_low * data_width*data_length + w_low*data_length+ l_high]; scalar_t v3 = 0; if (h_low >= 0 && w_high <= width - 1 && l_low >= 0) v3 = bottom_data[h_low * data_width*data_length + w_high*data_length+ l_low]; scalar_t v4 = 0; if (h_low >= 0 && w_high <= width - 1 && l_high<= length -1) v4 = bottom_data[h_low * data_width*data_length + w_high*data_length+ l_high]; scalar_t v5 = 0; if (h_high <= height -1 && w_low >= 0 && l_low >= 0) v5 = bottom_data[h_high * data_width*data_length + w_low*data_length+ l_low]; scalar_t v6 = 0; if (h_high <= height -1 && w_low >= 0 && l_high<= length -1) v6 = bottom_data[h_high * data_width*data_length + w_low*data_length+ l_high]; scalar_t v7 = 0; if (h_high <= height -1 && w_high <= width - 1 && l_low >= 0) v7 = bottom_data[h_high * data_width*data_length + w_high*data_length+ l_low]; scalar_t v8 = 0; if (h_high <= height -1 && w_high <= width - 1 && l_high<= length -1) v8 = bottom_data[h_high * data_width*data_length + w_high*data_length+ l_high]; scalar_t w1 = hh * hw *hl, w2 = hh *hw *ll, w3 = hh * lw*hl, w4 = hh * lw* ll; scalar_t w5 = lh * hw *hl, w6 = lh *hw *ll, w7 = lh * lw*hl, w8 = lh * lw* ll; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4+w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8); return val; } template <typename scalar_t> __global__ void deform_conv3d_im2col_gpu_kernel( const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int length, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, const int length_col, scalar_t *data_col) { // input = {step, channels,height, width,length} // offset={step,deformable_group*3* kernel_h * kernel_w * kernel_l,height_out, width_out,length_out} CUDA_KERNEL_LOOP(index, n)//num_kernels = channels * batch_size * height_col * width_col *length_col; { // columns = {channels * kernel_h * kernel_w * kernel_l,step * height_out * width_out * length_out} const int l_col= index % length_col; const int w_col = (index / length_col) % width_col; const int h_col = (index / length_col / width_col ) % height_col; const int b_col = (index / length_col / width_col / height_col) % batch_size; const int c_im = (index / length_col / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w * kernel_l; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; const int l_in = l_col * stride_l - pad_l; scalar_t *data_col_ptr = data_col+ (c_col* batch_size + b_col)* height_col* width_col * length_col + h_col * width_col * length_col + w_col * length_col + l_col; //27 10000 const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width * length; const scalar_t *data_offset_ptr = data_offset +(b_col * deformable_group + deformable_group_index) * 3 * kernel_h * kernel_w * kernel_l * height_col * width_col * length_col;// 1 18 100 100 for (int i = 0; i < kernel_h; ++i) for (int j = 0; j < kernel_w; ++j) for (int k = 0; k < kernel_l; ++k) { int f=i*kernel_w*kernel_l + j*kernel_l+k; const int data_offset_h_ptr = (3*f) * height_col * width_col * length_col+ h_col* width_col * length_col+ w_col* length_col + l_col; const int data_offset_w_ptr = (3*f+1) * height_col * width_col* length_col + h_col* width_col* length_col + w_col* length_col + l_col; const int data_offset_l_ptr = (3*f+2) * height_col * width_col* length_col + h_col* width_col* length_col + w_col* length_col + l_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t offset_l = data_offset_ptr[data_offset_l_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; const scalar_t l_im = l_in + k * dilation_l + offset_l; if (h_im > -1 && w_im > -1 && l_im > -1 && h_im < height && w_im < width && l_im < length) { val = deform_conv3d_im2col_trilinear(data_im_ptr, width, length, height, width, length, h_im, w_im,l_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col* length_col; } } } void deform_conv3d_im2col_cuda( at::Tensor data_im, at::Tensor data_offset, const int batch_size, const int channels, const int height_im, const int width_im, const int length_im, const int height_col, const int width_col,const int length_col, const int kernel_h,const int kenerl_w,const int kenerl_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col *length_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "deform_conv3d_im2col_gpu_kernel", ([&] { const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); deform_conv3d_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_im_, data_offset_, height_im, width_im, length_im, kernel_h, kenerl_w,kenerl_l, pad_h, pad_w, pad_l, stride_h, stride_w,stride_l, dilation_h, dilation_w, dilation_l, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, length_col,data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deform_conv3d_im2col: %s\n", cudaGetErrorString(err)); } } int deform_conv3d_forward_cuda( at::Tensor input, at::Tensor weight,at::Tensor bias, at::Tensor offset, at::Tensor output, const int kernel_h,const int kernel_w,const int kernel_l, const int stride_h,const int stride_w,const int stride_l, const int pad_h,const int pad_w,const int pad_l, const int dilation_h,const int dilation_w,const int dilation_l, const int group,const int deformable_group, const int in_step,const bool with_bias){ //#define DEBUG TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); TORCH_CHECK(bias.is_contiguous(), "bias tensor has to be contiguous"); TORCH_CHECK(offset.is_contiguous(), "offset tensor has to be contiguous"); TORCH_CHECK(output.is_contiguous(), "output tensor has to be contiguous"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int length = input.size(4); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int kernel_l_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_l_ != kernel_l) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h_, kernel_w,kernel_l, kernel_h_, kernel_w_,kernel_l_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int length_out = (length + 2 * pad_l - (dilation_l * (kernel_l - 1) + 1)) / stride_l + 1; const int step=GET_STEP(batch,in_step); #ifdef DEBUG print_tensor_size("deform_conv3d_forward_cuda---output size",output); print_tensor_size("deform_conv3d_forward_cuda---input size",input); print_tensor_size("deform_conv3d_forward_cuda---offset size",offset); #endif output = output.view({batch, channels_out,height_out, width_out,length_out}); output.zero_(); at::Tensor columns = at::zeros({channels * kernel_h * kernel_w * kernel_l, step * height_out * width_out * length_out},input.options()); input = input.view({batch / step, step, channels,height, width,length}); offset =offset.view({batch / step, step,deformable_group*3* kernel_h * kernel_w * kernel_l, height_out, width_out,length_out}); //divide into group output = output.view({batch/step, group, output.size(1) / group,step, output.size(2), output.size(3),output.size(4)}); weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3),weight.size(4)}); for (int b = 0; b < batch / step; b++) { columns.fill_(0); deform_conv3d_im2col_cuda( input[b], offset[b], step,channels, height,width,length, height_out, width_out, length_out, kernel_h, kernel_w,kernel_l, pad_h, pad_w,pad_l, stride_h, stride_w,stride_l, dilation_h, dilation_w, dilation_l, deformable_group, columns); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); for (int g = 0; g < group; g++) { output[b][g] = output[b][g].flatten(1) .addmm_(weight[g].flatten(1), columns[g]).view_as(output[b][g]); } columns = columns.view({columns.size(0)*columns.size(1), columns.size(2)}); } weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4),weight.size(5)}); output = output.view({output.size(0), output.size(1) * output.size(2), output.size(3), output.size(4),output.size(5),output.size(6)}); output = output.view({batch / step, channels_out, step, height_out, width_out,length_out}); output.transpose_(1, 2); output = output.contiguous().view({batch , channels_out, height_out, width_out,length_out}); if (with_bias) output += bias.view({1, bias.size(0), 1, 1, 1}); input=input.view({batch,channels,height,width,length}); offset=offset.view({batch,deformable_group * 3 *kernel_h*kernel_w*kernel_l,height_out,width_out,length_out}); return 0; } template <typename scalar_t> __global__ void deform_conv3d_gradient_gpu_kernel( const int n,const scalar_t *grad_col, const scalar_t *data_input, const scalar_t *data_offset,scalar_t * columns, const int channels_input, const int height_input, const int width_input, const int length_input, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int channel_per_deformable_group,const int step, const int offset_channels, const int deformable_group, const int height_col, const int width_col, const int length_col, scalar_t * grad_input,scalar_t *grad_offset){ // columns = {channels * kernel_h * kernel_w * kernel_l,step*height_out * width_out * length_out} // grad_columns = {channels * kernel_h * kernel_w * kernel_l,step*height_out * width_out * length_out} // grad_output = {step,channels_out,height_out,width_out , length_out} // input = {step, channels, height, width, length} // offset = {step,deformable_group * 3 * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out} // grad_offset = {step,deformable_group * 3 * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out} CUDA_KERNEL_LOOP(index, n)//channels*kernel_h * kernel_w * kernel_l* step * height_col * width_col * length_col; { int single_col_len=length_col * width_col * height_col; int f = (index /step/ single_col_len )%(kernel_h * kernel_w * kernel_l); int i=(f / kernel_l / kernel_w) % kernel_h; int j=(f / kernel_l) %kernel_w; int k=f % kernel_l; int bpos=(index%(step*single_col_len))/(single_col_len); int lpos_col = (index % (single_col_len)) % length_col; int wpos_col = ((index % (single_col_len)) / length_col) % width_col; int hpos_col = ((index % (single_col_len)) / length_col / width_col) % height_col; int cpos_col = (index / step / single_col_len); int cpos_in=cpos_col/kernel_h/kernel_w/kernel_l; int offset_group_index=cpos_in/(channels_input/deformable_group); // printf("index %d cpos_col %d hpos_col %d wpos_col %d \n",index,cpos_col,hpos_col,wpos_col); int offset_base_ptr=bpos*(deformable_group * 3 * kernel_h * kernel_w * kernel_l*single_col_len) +offset_group_index*channel_per_deformable_group*single_col_len +hpos_col*width_col*length_col+wpos_col*length_col+lpos_col; int offset_h_ptr=offset_base_ptr+3*f*single_col_len; int offset_w_ptr=offset_base_ptr+(3*f+1)*single_col_len; int offset_l_ptr=offset_base_ptr+(3*f+2)*single_col_len; scalar_t offset_h=data_offset[offset_h_ptr]; scalar_t offset_w=data_offset[offset_w_ptr]; scalar_t offset_l=data_offset[offset_l_ptr]; int hpos_in = hpos_col * stride_h -pad_h + (i) * dilation_h; int wpos_in = wpos_col * stride_w - pad_w + (j) * dilation_w; int lpos_in = lpos_col * stride_l - pad_l + (k) * dilation_l; auto real_offset_h=hpos_in+offset_h; auto real_offset_w=wpos_in+offset_w; auto real_offset_l=lpos_in+offset_l; int h_low = floor(real_offset_h); int w_low = floor(real_offset_w); int l_low = floor(real_offset_l); int h_high = h_low + 1; int w_high = w_low + 1; int l_high = l_low + 1; scalar_t dh = real_offset_h - h_low; scalar_t dw = real_offset_w - w_low; scalar_t dl = real_offset_l - l_low; scalar_t w1 = (1-dh) *(1- dw)*(1-dl), w2 =(1- dh) *(1- dw)*dl, w3 = (1-dh)*dw*(1-dl), w4 = (1-dh) * dw*dl; scalar_t w5 = dh *(1- dw)*(1-dl), w6 =dh*(1- dw)*dl, w7 = dh*dw*(1-dl), w8 = dh*dw*dl; auto dval=grad_col[index]; int data_input_base_ptr=(bpos*channels_input+cpos_in)*height_input*width_input*length_input; int grad_input_base_ptr=(bpos*channels_input+cpos_in)*height_input*width_input*length_input; bool h_low_flag=h_low >= 0 && h_low <= height_input -1; bool w_low_flag=w_low >= 0 && w_low <= width_input - 1; bool l_low_flag=l_low >= 0 && l_low <= length_input -1; bool h_high_flag=h_high >= 0 && h_high <= height_input -1 && abs(dh)>EPS; bool w_high_flag=w_high >= 0 && w_high <= width_input - 1 && abs(dw)>EPS; bool l_high_flag=l_high >= 0 && l_high <= length_input -1 && abs(dl)>EPS; scalar_t v1 = static_cast<scalar_t>(0); if (h_low_flag && w_low_flag && l_low_flag ){ v1 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_low]; atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_low,w1*dval); } scalar_t v2 = static_cast<scalar_t>(0); if (h_low_flag && w_low_flag && l_high_flag ){ v2 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_high]; atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_high,w2*dval); } scalar_t v3 = static_cast<scalar_t>(0); if (h_low_flag && w_high_flag && l_low_flag ){ v3 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_low]; atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_low,w3*dval); } scalar_t v4 = static_cast<scalar_t>(0); if (h_low_flag && w_high_flag && l_high_flag ){ v4 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_high]; atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_high,w4*dval); } scalar_t v5 = static_cast<scalar_t>(0); if (h_high_flag && w_low_flag && l_low_flag ){ v5 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_low]; atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_low,w5*dval); } scalar_t v6 = static_cast<scalar_t>(0); if (h_high_flag && w_low_flag && l_high_flag ){ v6 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_high]; atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_high,w6*dval); } scalar_t v7 = static_cast<scalar_t>(0); if (h_high_flag && w_high_flag && l_low_flag ){ v7 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_low]; atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_low,w7*dval); } scalar_t v8 = static_cast<scalar_t>(0); if (h_high_flag && w_high_flag && l_high_flag ){ v8 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_high]; atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_high,w8*dval); } atomicAdd(grad_offset + offset_h_ptr, (-1*(1-dw)*(1-dl)*v1-1*(1-dw)*dl*v2-1*dw*(1-dl)*v3-1*dw*dl*v4+(1-dw)*(1-dl)*v5+(1-dw)*dl*v6+dw*(1-dl)*v7+dw*dl*v8)*dval); atomicAdd(grad_offset + offset_w_ptr, (-1*(1-dh)*(1-dl)*v1-1*(1-dh)*dl*v2+(1-dh)*(1-dl)*v3+(1-dh)*dl*v4-1*dh*(1-dl)*v5-1*dh*dl*v6+dh*(1-dl)*v7+dh*dl*v8)*dval); atomicAdd(grad_offset + offset_l_ptr, (-1*(1-dh)*(1-dw)*v1+(1-dh)*(1-dw)*v2-1*(1-dh)*dw*v3+(1-dh)*dw*v4-1*dh*(1-dw)*v5+dh*(1-dw)*v6-1*dh*dw*v7+dh*dw*v8)*dval); scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4+w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8); columns[index]=val; } } // gradient offset mask input void deform_conv3d_gradient_cuda( at::Tensor grad_col,at::Tensor data_input, at::Tensor data_offset,at::Tensor columns, const int channels, const int height_input, const int width_input, const int length_input, const int height_col, const int width_col, const int length_col, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int step,const int deformable_group, at::Tensor grad_input, at::Tensor grad_offset) { const int num_kernels =channels*height_col * width_col * length_col * kernel_h * kernel_w * kernel_l *step; const int channel_per_deformable_group =3 * kernel_h * kernel_w * kernel_l; AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_col.scalar_type(), "deform_conv3d_gradient_gpu_kernel", ([&] { const scalar_t *grad_col_ = grad_col.data<scalar_t>(); const scalar_t *data_input_ = data_input.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *columns_ = columns.data<scalar_t>(); scalar_t *grad_input_ = grad_input.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); deform_conv3d_gradient_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, grad_col_, data_input_, data_offset_,columns_, channels, height_input, width_input, length_input, kernel_h, kernel_w, kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, channel_per_deformable_group, step, channel_per_deformable_group * deformable_group, deformable_group, height_col, width_col, length_col, grad_input_,grad_offset_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deform_conv3d_gradient_cuda: %s\n", cudaGetErrorString(err)); } } int deform_conv3d_backward_cuda( at::Tensor input, at::Tensor weight, at::Tensor bias,at::Tensor offset, at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, at::Tensor grad_offset, at::Tensor grad_output, const int kernel_h,const int kernel_w,const int kernel_l, const int stride_h,const int stride_w,const int stride_l, const int pad_h,const int pad_w,const int pad_l, const int dilation_h,const int dilation_w,const int dilation_l, const int group, int deformable_group,const int in_step,const bool with_bias) { TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); TORCH_CHECK(bias.is_contiguous(), "bias tensor has to be contiguous"); TORCH_CHECK(offset.is_contiguous(), "offset tensor has to be contiguous"); TORCH_CHECK(grad_input.is_contiguous(), "grad_input tensor has to be contiguous"); TORCH_CHECK(grad_weight.is_contiguous(), "grad_weight tensor has to be contiguous"); TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias tensor has to be contiguous"); TORCH_CHECK(grad_offset.is_contiguous(), "grad_offset tensor has to be contiguous"); TORCH_CHECK(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int length = input.size(4); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int kernel_l_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_l_ != kernel_l) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h_, kernel_w_, kernel_l_, kernel_h, kernel_w, kernel_l); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int length_out = (length + 2 * pad_l - (dilation_l * (kernel_l - 1) + 1)) / stride_l + 1; const int step=GET_STEP(batch,in_step); at::Tensor ones = at::ones({step,height_out, width_out, length_out}, input.options()); at::Tensor columns = at::zeros({channels * kernel_h * kernel_w * kernel_l, step*height_out * width_out * length_out},input.options()); at::Tensor grad_columns=at::zeros({channels * kernel_h * kernel_w * kernel_l, step*height_out * width_out * length_out},input.options()); grad_output=grad_output.view({batch/step,step,channels_out,height_out,width_out,length_out}); grad_output.transpose_(1, 2); grad_output =grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, grad_output.size(2), grad_output.size(3),grad_output.size(4),grad_output.size(5)}); input=input.view({batch/step,step,channels,height,width,length}); grad_input = grad_input.view({batch/step,step, channels, height, width,length}); offset=offset.view({batch/step,step, deformable_group * 3 * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out}); grad_offset=grad_offset.view({batch/step,step, deformable_group * 3 * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out}); for (int b = 0; b < batch/step; b++) { // divide int group grad_columns = grad_columns.view({group, grad_columns.size(0) / group, grad_columns.size(1)}); weight = weight.view({group, weight.size(0) / group, weight.size(1),weight.size(2), weight.size(3), weight.size(4)}); for (int g = 0; g < group; g++) { grad_columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), grad_output[b][g].flatten(1), 0.0f, 1.0f); } grad_columns = grad_columns.view({grad_columns.size(0) * grad_columns.size(1), grad_columns.size(2)}); weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4), weight.size(5)}); //print_tensor_size("grad_columns size",grad_columns); //print_tensor_size("grad_mask[b] size",grad_mask[b]); columns.fill_(0); deform_conv3d_gradient_cuda( grad_columns, input[b], offset[b], columns, channels, height, width, length, height_out, width_out, length_out, kernel_h, kernel_w, kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, step,deformable_group, grad_input[b],grad_offset[b]); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); grad_weight = grad_weight.view({group, grad_weight.size(0) / group, grad_weight.size(1), grad_weight.size(2), grad_weight.size(3),grad_weight.size(4)}); if (with_bias) grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); for (int g = 0; g < group; g++) { grad_weight[g] =grad_weight[g].flatten(1) .addmm_(grad_output[b][g].flatten(1),columns[g].transpose(0, 1),1.0f,1.0f) .view_as(grad_weight[g]); if (with_bias) { at::Tensor temp=grad_bias[g].view({-1, 1}); temp.addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1}),1.0f,1.0f); grad_bias[g] =temp.view(-1); } } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4), grad_weight.size(5)}); if (with_bias) grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); } grad_output = grad_output.view({grad_output.size(0) ,grad_output.size(1)*grad_output.size(2), grad_output.size(3),grad_output.size(4), grad_output.size(5),grad_output.size(6)}); //grad_output=grad_output.view({batch/step,channels_kernel,step,height_out,width_out,length_out}); grad_output.transpose_(1, 2); grad_output =grad_output.view({batch,channels_out,height_out,width_out,length_out}); input=input.view({batch,channels,height,width,length}); grad_input = grad_input.view({batch, channels, height, width,length}); offset=offset.view({batch,deformable_group * 3 * kernel_h * kernel_w *kernel_l, height_out,width_out,length_out}); grad_offset=grad_offset.view({batch,deformable_group * 3 * kernel_h * kernel_w *kernel_l, height_out,width_out,length_out}); return 0; }
1ce7db454bba7614eb815f779790cafd51df6c31.hip
// !!! This is a file automatically generated by hipify!!! /* 159735 Parallel Programming Assignment 5 To compile: nvcc -o hyperSpace hyperSpace.cu To run: ./hyperSpace [nTrails] nTrails is 20 by default For example: "./hyperSpace 50" will generate a hyper sphere for 50 times, and count the number of integer coordinate points inside every sphere, by both cuda algorithm and sequential algorithm. */ #include <iostream> #include <cstdlib> #include <cmath> #include <string> #include <hip/hip_runtime.h> #include <vector> #include <ctime> #include <stdio.h> using namespace std; // Output latest cuda error #define cudaCheckError() { \ hipError_t e=hipGetLastError(); \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ } \ } /* * In my test, if the dimensions number is larger than 7, the time consuming * of sequential algorithm will be very large, so set maximum of dimension 8. */ const long MAXDIM = 8; const double RMIN = 2.0; const double RMAX = 8.0; // Used to calculate the total used time for different functions double diffclock(clock_t clock1,clock_t clock2) { double diffticks = clock1 - clock2; double diffms = (diffticks * 1000) / CLOCKS_PER_SEC; return diffms; // Time difference in milliseconds } /* Evaluate n**k where both are long integers */ long powlong(long n, long k) { long p = 1; for (long i = 0; i < k; ++i) p *= n; return p; } /* Query device about threads limitation */ bool getCudaDeviceInfo(int &maxThreadsPerBlock, int maxDimensionPerGrid[]) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); // In our environment, there is only one GPU. maxThreadsPerBlock = deviceProp.maxThreadsPerBlock; // We'd like to use two-dimensions because it is enough to resolve this assignment maxDimensionPerGrid[0] = deviceProp.maxGridSize[0]; maxDimensionPerGrid[1] = deviceProp.maxGridSize[1]; return true; } /*----------------------------CUDA Version--------------------------------*/ /* * Cuda function runs on GPU, convert a number into a new based number, * check if the related point is in the hyper sphere or not */ __global__ void compute(long ntotal, long base, long ndim, double rsquare, long halfb, unsigned long long int *count) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; int n = col + row * gridDim.y*blockDim.y; //int n = blockIdx.x*blockDim.x + threadIdx.x*threadDim.x + ; if(n >= ntotal) { return; } // Convert decimal number to a number in a new base long index[MAXDIM]; for (long i = 0; i < MAXDIM; ++i) index[i] = 0; long idx = 0; while (n != 0) { long rem = n % base; n = n / base; index[idx] = rem; ++idx; } // Check if the point is in the hypersphere double rtestsq = 0; for (long k = 0; k < ndim; ++k) { double xk = index[k] - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare ) atomicAdd((unsigned long long int *)count, 1); } /* CUDA version of the algorithm. Given: ndim -> number of dimensions of the hypersphere radius -> radius of the hypersphere count the number of integer points that lie wholly within the hypersphere, assuming it is centred on the origin. */ long count_in_cuda(long ndim, double radius) { const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; const double rsquare = radius * radius; // This is the total number of points we will need to test. const long ntotal = powlong(base, ndim); long *h_count = new long[1]; unsigned long long int *d_count; hipMalloc(&d_count, sizeof(unsigned long long int)); hipMemset(d_count, 0, sizeof(unsigned long long int)); /* * In the assignment, the threads we need to create may be very large, * use two dimensions for blocks in case the blocks are not enough. */ int threadsPerBlock; int maxDimensionPerGrid[2]; getCudaDeviceInfo(threadsPerBlock, maxDimensionPerGrid); int blocksPerGrid = (ntotal + threadsPerBlock - 1) / threadsPerBlock; dim3 numBlocks(blocksPerGrid, 1); // If one dimensions is not enough, use two dimensions. In this assignment, two dimensions is enough. if(blocksPerGrid > maxDimensionPerGrid[0]) { int dimensionPerGridX = (blocksPerGrid + maxDimensionPerGrid[1] - 1) / maxDimensionPerGrid[1]; int dimensionPerGridY = maxDimensionPerGrid[1]; numBlocks = dim3(dimensionPerGridX, dimensionPerGridY); } hipLaunchKernelGGL(( compute), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, ntotal, base, ndim, rsquare, halfb, d_count); // Check if there is any error reported by CUDA, for debug. cudaCheckError(); hipMemcpy(h_count, d_count, sizeof(long), hipMemcpyDeviceToHost); unsigned long long int count = *h_count; // Release memory delete[] h_count; hipFree(d_count); return count; } /*-----------------------------Sequential Version-------------------------------*/ /* Convert a decimal number into another base system - the individual digits in the new base are stored in the index array. */ void convert(long num, long base, std::vector<long>& index) { const long ndim = index.size(); for (long i = 0; i < ndim; ++i) index[i] = 0; long idx = 0; while (num != 0) { long rem = num % base; num = num / base; index[idx] = rem; idx ++; } } /* Sequential version of the algorithm. Given: ndim -> number of dimensions of the hypersphere radius -> radius of the hypersphere count the number of integer points that lie wholly within the hypersphere, assuming it is centred on the origin. */ long count_in_sequential(long ndim, double radius) { const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; const double rsquare = radius * radius; // This is the total number of points we will need to check. const long ntotal = powlong(base, ndim); long count = 0; std::vector<long> index(ndim, 0); // Loop over the total number of points. For each visit of the loop, // we covert n to its equivalent in a number system of given "base". for (long n = 0; n < ntotal; ++n) { convert(n, base, index); double rtestsq = 0; for (long k = 0; k < ndim; ++k) { double xk = index[k] - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare) ++count; } return count ; } /*-----------------------------Main Fuction-------------------------------*/ int main(int argc, char* argv[]) { int ntrials = 20; if(argc >=2) ntrials = atoi(argv[1]); // Check whether user input is legal if(ntrials <= 0) ntrials = 20; double mscuda = 0.0; double msSequential = 0.0; for (long n = 0; n < ntrials; ++n) { // Get a random value for the hypersphere radius between the two limits const double r = drand48() * (RMAX - RMIN) + RMIN; // Get a random value for the number of dimensions between 1 and // MAXDIM inclusive long nd = lrand48() % (MAXDIM - 1) + 1; cout << "### trial: " << n << ", radius: " << r << ", dimensions: " << nd << " ... " << endl; clock_t tStartCuda = clock(); const long numCuda = count_in_cuda(nd, r); clock_t tEndCuda = clock(); mscuda += diffclock(tEndCuda, tStartCuda); clock_t tStartSequential = clock(); const long numSequential = count_in_sequential(nd, r); clock_t tEndSequential = clock(); msSequential += diffclock(tEndSequential, tStartSequential); cout << "CUDA result: " << numCuda << " ==> sequential result:" << numSequential << endl; } cout << "Totally "<< ntrials << " trials," << "Cuda version used: " << mscuda << " ms, cpu version costs " << msSequential << " ms" << endl; }
1ce7db454bba7614eb815f779790cafd51df6c31.cu
/* 159735 Parallel Programming Assignment 5 To compile: nvcc -o hyperSpace hyperSpace.cu To run: ./hyperSpace [nTrails] nTrails is 20 by default For example: "./hyperSpace 50" will generate a hyper sphere for 50 times, and count the number of integer coordinate points inside every sphere, by both cuda algorithm and sequential algorithm. */ #include <iostream> #include <cstdlib> #include <cmath> #include <string> #include <cuda.h> #include <vector> #include <ctime> #include <stdio.h> using namespace std; // Output latest cuda error #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ } \ } /* * In my test, if the dimensions number is larger than 7, the time consuming * of sequential algorithm will be very large, so set maximum of dimension 8. */ const long MAXDIM = 8; const double RMIN = 2.0; const double RMAX = 8.0; // Used to calculate the total used time for different functions double diffclock(clock_t clock1,clock_t clock2) { double diffticks = clock1 - clock2; double diffms = (diffticks * 1000) / CLOCKS_PER_SEC; return diffms; // Time difference in milliseconds } /* Evaluate n**k where both are long integers */ long powlong(long n, long k) { long p = 1; for (long i = 0; i < k; ++i) p *= n; return p; } /* Query device about threads limitation */ bool getCudaDeviceInfo(int &maxThreadsPerBlock, int maxDimensionPerGrid[]) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); // In our environment, there is only one GPU. maxThreadsPerBlock = deviceProp.maxThreadsPerBlock; // We'd like to use two-dimensions because it is enough to resolve this assignment maxDimensionPerGrid[0] = deviceProp.maxGridSize[0]; maxDimensionPerGrid[1] = deviceProp.maxGridSize[1]; return true; } /*----------------------------CUDA Version--------------------------------*/ /* * Cuda function runs on GPU, convert a number into a new based number, * check if the related point is in the hyper sphere or not */ __global__ void compute(long ntotal, long base, long ndim, double rsquare, long halfb, unsigned long long int *count) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; int n = col + row * gridDim.y*blockDim.y; //int n = blockIdx.x*blockDim.x + threadIdx.x*threadDim.x + ; if(n >= ntotal) { return; } // Convert decimal number to a number in a new base long index[MAXDIM]; for (long i = 0; i < MAXDIM; ++i) index[i] = 0; long idx = 0; while (n != 0) { long rem = n % base; n = n / base; index[idx] = rem; ++idx; } // Check if the point is in the hypersphere double rtestsq = 0; for (long k = 0; k < ndim; ++k) { double xk = index[k] - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare ) atomicAdd((unsigned long long int *)count, 1); } /* CUDA version of the algorithm. Given: ndim -> number of dimensions of the hypersphere radius -> radius of the hypersphere count the number of integer points that lie wholly within the hypersphere, assuming it is centred on the origin. */ long count_in_cuda(long ndim, double radius) { const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; const double rsquare = radius * radius; // This is the total number of points we will need to test. const long ntotal = powlong(base, ndim); long *h_count = new long[1]; unsigned long long int *d_count; cudaMalloc(&d_count, sizeof(unsigned long long int)); cudaMemset(d_count, 0, sizeof(unsigned long long int)); /* * In the assignment, the threads we need to create may be very large, * use two dimensions for blocks in case the blocks are not enough. */ int threadsPerBlock; int maxDimensionPerGrid[2]; getCudaDeviceInfo(threadsPerBlock, maxDimensionPerGrid); int blocksPerGrid = (ntotal + threadsPerBlock - 1) / threadsPerBlock; dim3 numBlocks(blocksPerGrid, 1); // If one dimensions is not enough, use two dimensions. In this assignment, two dimensions is enough. if(blocksPerGrid > maxDimensionPerGrid[0]) { int dimensionPerGridX = (blocksPerGrid + maxDimensionPerGrid[1] - 1) / maxDimensionPerGrid[1]; int dimensionPerGridY = maxDimensionPerGrid[1]; numBlocks = dim3(dimensionPerGridX, dimensionPerGridY); } compute<<<numBlocks, threadsPerBlock>>>(ntotal, base, ndim, rsquare, halfb, d_count); // Check if there is any error reported by CUDA, for debug. cudaCheckError(); cudaMemcpy(h_count, d_count, sizeof(long), cudaMemcpyDeviceToHost); unsigned long long int count = *h_count; // Release memory delete[] h_count; cudaFree(d_count); return count; } /*-----------------------------Sequential Version-------------------------------*/ /* Convert a decimal number into another base system - the individual digits in the new base are stored in the index array. */ void convert(long num, long base, std::vector<long>& index) { const long ndim = index.size(); for (long i = 0; i < ndim; ++i) index[i] = 0; long idx = 0; while (num != 0) { long rem = num % base; num = num / base; index[idx] = rem; idx ++; } } /* Sequential version of the algorithm. Given: ndim -> number of dimensions of the hypersphere radius -> radius of the hypersphere count the number of integer points that lie wholly within the hypersphere, assuming it is centred on the origin. */ long count_in_sequential(long ndim, double radius) { const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; const double rsquare = radius * radius; // This is the total number of points we will need to check. const long ntotal = powlong(base, ndim); long count = 0; std::vector<long> index(ndim, 0); // Loop over the total number of points. For each visit of the loop, // we covert n to its equivalent in a number system of given "base". for (long n = 0; n < ntotal; ++n) { convert(n, base, index); double rtestsq = 0; for (long k = 0; k < ndim; ++k) { double xk = index[k] - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare) ++count; } return count ; } /*-----------------------------Main Fuction-------------------------------*/ int main(int argc, char* argv[]) { int ntrials = 20; if(argc >=2) ntrials = atoi(argv[1]); // Check whether user input is legal if(ntrials <= 0) ntrials = 20; double mscuda = 0.0; double msSequential = 0.0; for (long n = 0; n < ntrials; ++n) { // Get a random value for the hypersphere radius between the two limits const double r = drand48() * (RMAX - RMIN) + RMIN; // Get a random value for the number of dimensions between 1 and // MAXDIM inclusive long nd = lrand48() % (MAXDIM - 1) + 1; cout << "### trial: " << n << ", radius: " << r << ", dimensions: " << nd << " ... " << endl; clock_t tStartCuda = clock(); const long numCuda = count_in_cuda(nd, r); clock_t tEndCuda = clock(); mscuda += diffclock(tEndCuda, tStartCuda); clock_t tStartSequential = clock(); const long numSequential = count_in_sequential(nd, r); clock_t tEndSequential = clock(); msSequential += diffclock(tEndSequential, tStartSequential); cout << "CUDA result: " << numCuda << " ==> sequential result:" << numSequential << endl; } cout << "Totally "<< ntrials << " trials," << "Cuda version used: " << mscuda << " ms, cpu version costs " << msSequential << " ms" << endl; }
74713f6ed1a3f2e1665acd9744af5baf899150f1.hip
// !!! This is a file automatically generated by hipify!!! #include "SteerForEvasion.h" #include <hip/hip_runtime.h> #include "OpenSteer/VehicleData.h" #include "OpenSteer/PursuitDataProvider.h" #include "CUDAKernelOptions.cu" #include <iostream> using namespace OpenSteer; using namespace std; __global__ void steerForEvasionKernel(VehicleData *vehicleData, float3 *menacePosition, float3 *menaceVelocity, float3 *steeringVectors, float maxPredictionTime, float weight, kernel_options options); OpenSteer::SteerForEvasion::SteerForEvasion(PursuitDataProvider* pursuitDataProvider, float maxPredictionTime, float weight, kernel_options options) { threadsPerBlock = 128; this->weight = weight; this->options = options; this->pursuitDataProvider = pursuitDataProvider; this->maxPredictionTime = maxPredictionTime; } OpenSteer::SteerForEvasion::~SteerForEvasion() {} void OpenSteer::SteerForEvasion::init() { // nothing to do } void OpenSteer::SteerForEvasion::run() { hipLaunchKernelGGL(( steerForEvasionKernel), dim3(gridDim()), dim3(blockDim()), 0, 0, getVehicleData(), pursuitDataProvider->getPursuitPosition(), pursuitDataProvider->getPursuitVelocity(), getSteeringVectors(), maxPredictionTime, weight, options); } void OpenSteer::SteerForEvasion::close() { // nothing to do }
74713f6ed1a3f2e1665acd9744af5baf899150f1.cu
#include "SteerForEvasion.h" #include <cuda_runtime.h> #include "OpenSteer/VehicleData.h" #include "OpenSteer/PursuitDataProvider.h" #include "CUDAKernelOptions.cu" #include <iostream> using namespace OpenSteer; using namespace std; __global__ void steerForEvasionKernel(VehicleData *vehicleData, float3 *menacePosition, float3 *menaceVelocity, float3 *steeringVectors, float maxPredictionTime, float weight, kernel_options options); OpenSteer::SteerForEvasion::SteerForEvasion(PursuitDataProvider* pursuitDataProvider, float maxPredictionTime, float weight, kernel_options options) { threadsPerBlock = 128; this->weight = weight; this->options = options; this->pursuitDataProvider = pursuitDataProvider; this->maxPredictionTime = maxPredictionTime; } OpenSteer::SteerForEvasion::~SteerForEvasion() {} void OpenSteer::SteerForEvasion::init() { // nothing to do } void OpenSteer::SteerForEvasion::run() { steerForEvasionKernel<<<gridDim(), blockDim()>>>(getVehicleData(), pursuitDataProvider->getPursuitPosition(), pursuitDataProvider->getPursuitVelocity(), getSteeringVectors(), maxPredictionTime, weight, options); } void OpenSteer::SteerForEvasion::close() { // nothing to do }
65b36a997d0bb59bf85a0f3505f97c44005119e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> /* __global__ void add(int a, int b, int *c) { *c=a+b; } */ int main(void) { /* int c; int *dev_c; hipMalloc((void**)&dev_c, sizeof(int)); add<<<1,1>>>(20,7,dev_c); hipMemcpy(&c,dev_c,sizeof(int),hipMemcpyDeviceToHost); printf("20 + 7 = %d\n",c); hipFree(dev_c); */ hipDeviceProp_t prop; hipGetDeviceProperties(&prop,0); printf("Numero de multiprocesadores = %d \n",prop.multiProcessorCount); return 0; }
65b36a997d0bb59bf85a0f3505f97c44005119e3.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> /* __global__ void add(int a, int b, int *c) { *c=a+b; } */ int main(void) { /* int c; int *dev_c; cudaMalloc((void**)&dev_c, sizeof(int)); add<<<1,1>>>(20,7,dev_c); cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost); printf("20 + 7 = %d\n",c); cudaFree(dev_c); */ cudaDeviceProp prop; cudaGetDeviceProperties(&prop,0); printf("Numero de multiprocesadores = %d \n",prop.multiProcessorCount); return 0; }
50e28a0d3b13073dfea3bcc6e6268479f3b1c77a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHAtomics.cuh> #include <THH/THHNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { using namespace at::cuda::detail; namespace { template <typename scalar_t, typename accscalar_t> __device__ inline int get_interval(accscalar_t sample, int index, int inputSize, int outputSize, int poolSize) { accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) / static_cast<accscalar_t>(outputSize - 1); if (index == outputSize - 1) { return inputSize - poolSize; } else { return static_cast<int>((index + sample) * alpha) - static_cast<int>(sample * alpha); } } template <typename scalar_t> __global__ void fractional_max_pool2d_out_cuda_frame( PackedTensorAccessor<scalar_t, 4> output, PackedTensorAccessor<int64_t, 4> indices, PackedTensorAccessor<scalar_t, 4> input, PackedTensorAccessor<scalar_t, 3> samples, int poolSizeH, int poolSizeW) { using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < output.size(2) * output.size(3)) { int outputW = ourOutputPoint % output.size(3); int outputH = ourOutputPoint / output.size(3); int poolW = get_interval<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][0]), outputW, input.size(3), output.size(3), poolSizeW); int poolH = get_interval<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][1]), outputH, input.size(2), output.size(2), poolSizeH); scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound(); int maxIndex = poolH * input.size(3) + poolW; for (int h = poolH; h < poolH + poolSizeH; ++h) { if (poolSizeW < 2 || poolSizeW > 7) { for (int w = poolW; w < poolW + poolSizeW; ++w) { scalar_t val = input[batch][plane][h][w]; // for consistency with THNN, favor the first max if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) { maxIndex = h * input.size(3) + w; maxVal = val; } } } else { for (int i = 0; i < poolSizeW; ++i) { int w = i + poolW; scalar_t val = input[batch][plane][h][w]; // for consistency with THNN, favor the first max if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) { maxIndex = h * input.size(3) + w; maxVal = val; } } } } indices[batch][plane][outputH][outputW] = maxIndex; output[batch][plane][outputH][outputW] = maxVal; } } template <typename scalar_t> __global__ void fractional_max_pool2d_backward_out_cuda_frame( PackedTensorAccessor<scalar_t, 4> gradInput, PackedTensorAccessor<scalar_t, 4> gradOutput, PackedTensorAccessor<int64_t, 4> indices) { // Output (h, w) point that this thread is responsible for int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < gradOutput.size(2) * gradOutput.size(3)) { int outputW = ourOutputPoint % gradOutput.size(3); int outputH = ourOutputPoint / gradOutput.size(3); int index = indices[batch][plane][outputH][outputW]; assert(index >= 0); int inputW = index % gradInput.size(3); int inputH = index / gradInput.size(3); assert(inputH < gradInput.size(2)); gpuAtomicAdd( &gradInput[batch][plane][inputH][inputW], gradOutput[batch][plane][outputH][outputW] ); } } void fractional_max_pool2d_out_cuda_template( Tensor & output, Tensor& indices, const Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const Tensor& randomSamples) { int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int ndims = input.ndimension(); TORCH_CHECK(input.numel() > 0, "fractional_max_pool2d(): expected input to have non-empty ", "spatial dimensions."); TORCH_CHECK((ndims == 3 || ndims == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); if (ndims == 4) { numBatch = input.size(0); planeDim++; dimh++; dimw++; } /* sizes */ int numPlanes = input.size(planeDim); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputH = output_size[0]; int outputW = output_size[1]; int poolSizeH = pool_size[0]; int poolSizeW = pool_size[1]; TORCH_CHECK(outputH + poolSizeH - 1 <= inputH, "fractional_max_pool2d(): pool_size height ", poolSizeH, " too large relative to input height ", inputH); TORCH_CHECK(outputW + poolSizeW - 1 <= inputW, "pool_size width ", poolSizeW, " too large relative to input width ", inputW); if (ndims == 3) { /* resize output */ output.resize_({numPlanes, outputH, outputW}); /* indices will contain the locations for each output point */ indices.resize_({numPlanes, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputH, outputW}); indices.resize_({numBatch, numPlanes, outputH, outputW}); } auto output_ = output; auto input_ = input; auto indices_ = indices; if(ndims == 3) { output_ = output_.reshape({1, numPlanes, outputH, outputW}); indices_ = indices_.reshape({1, numPlanes, outputH, outputW}); input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)}); } // block is limited to 4 warps // grid handles overflow per each plane int outputPlaneSize = output_.size(2) * output_.size(3); dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) input_.size(1), input_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "fractional_max_pool2d_out_cuda_frame", [&] { auto devInput = input_.packed_accessor<scalar_t, 4>(); auto devOutput = output_.packed_accessor<scalar_t, 4>(); auto devIndices = indices_.packed_accessor<int64_t, 4>(); auto devSamples = randomSamples.packed_accessor<scalar_t, 3>(); hipLaunchKernelGGL(( fractional_max_pool2d_out_cuda_frame<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devOutput, devIndices, devInput, devSamples, poolSizeH, poolSizeW); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } ); } void fractional_max_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef pool_size /* unused */, IntArrayRef output_size, const Tensor& indices) { int dimh = 1; int dimw = 2; int ndims = input.ndimension(); if (ndims == 4) { dimh++; dimw++; } /* sizes */ int inputH = input.size(dimh); int inputW = input.size(dimw); int outputH = output_size[0]; int outputW = output_size[1]; TORCH_CHECK(outputH == gradOutput.size(dimh), "fractional_max_pool2d(): gradOutput height unexpected"); TORCH_CHECK(outputW == gradOutput.size(dimw), "fractional_max_pool2d(): gradOutput width unexpected"); /* resize */ gradInput.resize_as_(input); gradInput.zero_(); auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; auto indices_ = indices; if(ndims == 3) { gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW}); gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW}); indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW}); } /* backprop */ // block is limited to 4 warps // grid handles overflow per each plane int outputPlaneSize = gradOutput_.size(2) * gradOutput_.size(3); dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) gradInput_.size(1), gradInput_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); auto devIndices = indices.packed_accessor<int64_t, 4>(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "fractional_max_pool2d_backward_out_cuda_frame", [&] { auto devGradInput = gradInput_.packed_accessor<scalar_t, 4>(); auto devGradOutput = gradOutput_.packed_accessor<scalar_t, 4>(); hipLaunchKernelGGL(( fractional_max_pool2d_backward_out_cuda_frame<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, devIndices); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } ); } }// namespace std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cuda( at::Tensor& output, at::Tensor& indices, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { fractional_max_pool2d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> fractional_max_pool2d_cuda( const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); fractional_max_pool2d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& fractional_max_pool2d_backward_out_cuda( at::Tensor& gradInput, const at::Tensor& gradOutput_, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("fractional_max_pool2d_backward_out_cuda"); fractional_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, pool_size, output_size, indices); return gradInput; } Tensor fractional_max_pool2d_backward_cuda( const at::Tensor& gradOutput_, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda"); Tensor gradInput = at::empty({0}, input.options()); fractional_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, pool_size, output_size, indices); return gradInput; } }// at::native }// at
50e28a0d3b13073dfea3bcc6e6268479f3b1c77a.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCAtomics.cuh> #include <THC/THCNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { using namespace at::cuda::detail; namespace { template <typename scalar_t, typename accscalar_t> __device__ inline int get_interval(accscalar_t sample, int index, int inputSize, int outputSize, int poolSize) { accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) / static_cast<accscalar_t>(outputSize - 1); if (index == outputSize - 1) { return inputSize - poolSize; } else { return static_cast<int>((index + sample) * alpha) - static_cast<int>(sample * alpha); } } template <typename scalar_t> __global__ void fractional_max_pool2d_out_cuda_frame( PackedTensorAccessor<scalar_t, 4> output, PackedTensorAccessor<int64_t, 4> indices, PackedTensorAccessor<scalar_t, 4> input, PackedTensorAccessor<scalar_t, 3> samples, int poolSizeH, int poolSizeW) { using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < output.size(2) * output.size(3)) { int outputW = ourOutputPoint % output.size(3); int outputH = ourOutputPoint / output.size(3); int poolW = get_interval<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][0]), outputW, input.size(3), output.size(3), poolSizeW); int poolH = get_interval<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][1]), outputH, input.size(2), output.size(2), poolSizeH); scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound(); int maxIndex = poolH * input.size(3) + poolW; for (int h = poolH; h < poolH + poolSizeH; ++h) { if (poolSizeW < 2 || poolSizeW > 7) { for (int w = poolW; w < poolW + poolSizeW; ++w) { scalar_t val = input[batch][plane][h][w]; // for consistency with THNN, favor the first max if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) { maxIndex = h * input.size(3) + w; maxVal = val; } } } else { for (int i = 0; i < poolSizeW; ++i) { int w = i + poolW; scalar_t val = input[batch][plane][h][w]; // for consistency with THNN, favor the first max if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) { maxIndex = h * input.size(3) + w; maxVal = val; } } } } indices[batch][plane][outputH][outputW] = maxIndex; output[batch][plane][outputH][outputW] = maxVal; } } template <typename scalar_t> __global__ void fractional_max_pool2d_backward_out_cuda_frame( PackedTensorAccessor<scalar_t, 4> gradInput, PackedTensorAccessor<scalar_t, 4> gradOutput, PackedTensorAccessor<int64_t, 4> indices) { // Output (h, w) point that this thread is responsible for int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < gradOutput.size(2) * gradOutput.size(3)) { int outputW = ourOutputPoint % gradOutput.size(3); int outputH = ourOutputPoint / gradOutput.size(3); int index = indices[batch][plane][outputH][outputW]; assert(index >= 0); int inputW = index % gradInput.size(3); int inputH = index / gradInput.size(3); assert(inputH < gradInput.size(2)); gpuAtomicAdd( &gradInput[batch][plane][inputH][inputW], gradOutput[batch][plane][outputH][outputW] ); } } void fractional_max_pool2d_out_cuda_template( Tensor & output, Tensor& indices, const Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const Tensor& randomSamples) { int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int ndims = input.ndimension(); TORCH_CHECK(input.numel() > 0, "fractional_max_pool2d(): expected input to have non-empty ", "spatial dimensions."); TORCH_CHECK((ndims == 3 || ndims == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); if (ndims == 4) { numBatch = input.size(0); planeDim++; dimh++; dimw++; } /* sizes */ int numPlanes = input.size(planeDim); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputH = output_size[0]; int outputW = output_size[1]; int poolSizeH = pool_size[0]; int poolSizeW = pool_size[1]; TORCH_CHECK(outputH + poolSizeH - 1 <= inputH, "fractional_max_pool2d(): pool_size height ", poolSizeH, " too large relative to input height ", inputH); TORCH_CHECK(outputW + poolSizeW - 1 <= inputW, "pool_size width ", poolSizeW, " too large relative to input width ", inputW); if (ndims == 3) { /* resize output */ output.resize_({numPlanes, outputH, outputW}); /* indices will contain the locations for each output point */ indices.resize_({numPlanes, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputH, outputW}); indices.resize_({numBatch, numPlanes, outputH, outputW}); } auto output_ = output; auto input_ = input; auto indices_ = indices; if(ndims == 3) { output_ = output_.reshape({1, numPlanes, outputH, outputW}); indices_ = indices_.reshape({1, numPlanes, outputH, outputW}); input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)}); } // block is limited to 4 warps // grid handles overflow per each plane int outputPlaneSize = output_.size(2) * output_.size(3); dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) input_.size(1), input_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "fractional_max_pool2d_out_cuda_frame", [&] { auto devInput = input_.packed_accessor<scalar_t, 4>(); auto devOutput = output_.packed_accessor<scalar_t, 4>(); auto devIndices = indices_.packed_accessor<int64_t, 4>(); auto devSamples = randomSamples.packed_accessor<scalar_t, 3>(); fractional_max_pool2d_out_cuda_frame<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( devOutput, devIndices, devInput, devSamples, poolSizeH, poolSizeW); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } ); } void fractional_max_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef pool_size /* unused */, IntArrayRef output_size, const Tensor& indices) { int dimh = 1; int dimw = 2; int ndims = input.ndimension(); if (ndims == 4) { dimh++; dimw++; } /* sizes */ int inputH = input.size(dimh); int inputW = input.size(dimw); int outputH = output_size[0]; int outputW = output_size[1]; TORCH_CHECK(outputH == gradOutput.size(dimh), "fractional_max_pool2d(): gradOutput height unexpected"); TORCH_CHECK(outputW == gradOutput.size(dimw), "fractional_max_pool2d(): gradOutput width unexpected"); /* resize */ gradInput.resize_as_(input); gradInput.zero_(); auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; auto indices_ = indices; if(ndims == 3) { gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW}); gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW}); indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW}); } /* backprop */ // block is limited to 4 warps // grid handles overflow per each plane int outputPlaneSize = gradOutput_.size(2) * gradOutput_.size(3); dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) gradInput_.size(1), gradInput_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); auto devIndices = indices.packed_accessor<int64_t, 4>(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "fractional_max_pool2d_backward_out_cuda_frame", [&] { auto devGradInput = gradInput_.packed_accessor<scalar_t, 4>(); auto devGradOutput = gradOutput_.packed_accessor<scalar_t, 4>(); fractional_max_pool2d_backward_out_cuda_frame<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, devIndices); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } ); } }// namespace std::tuple<Tensor&, Tensor&> fractional_max_pool2d_out_cuda( at::Tensor& output, at::Tensor& indices, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { fractional_max_pool2d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> fractional_max_pool2d_cuda( const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); fractional_max_pool2d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& fractional_max_pool2d_backward_out_cuda( at::Tensor& gradInput, const at::Tensor& gradOutput_, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("fractional_max_pool2d_backward_out_cuda"); fractional_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, pool_size, output_size, indices); return gradInput; } Tensor fractional_max_pool2d_backward_cuda( const at::Tensor& gradOutput_, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda"); Tensor gradInput = at::empty({0}, input.options()); fractional_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, pool_size, output_size, indices); return gradInput; } }// at::native }// at
c19e84e81c435b8bbf188906c67e5e0889ffecac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cmath> #include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG __global__ void vector_add(double *C, const double *A, const double *B, int N) { // Add the kernel code int idx = blockIdx.x * blockDim.x + threadIdx.x; // Do not try to access past the allocated memory if (idx < N) { C[idx] = A[idx] + B[idx]; } } int main(void) { const int N = 20; const int ThreadsInBlock = 12800; double *dA, *dB, *dC; double hA[N], hB[N], hC[N]; for(int i = 0; i < N; ++i) { hA[i] = (double) i; hB[i] = (double) i * i; } /* Add memory allocations and copies. Wrap your runtime function calls with CUDA_CHECK( ) macro */ CUDA_CHECK(hipMalloc((void**)&dA, sizeof(double)*N)); CUDA_CHECK(hipMalloc((void**)&dB, sizeof(double)*N)); CUDA_CHECK(hipMalloc((void**)&dC, sizeof(double)*N)); // Note the maximum size of threads in a block dim3 grid, threads; CUDA_CHECK(hipMemcpy(dA, hA, sizeof(double)*N, hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(dB, hB, sizeof(double)*N, hipMemcpyHostToDevice)); //// Add the kernel call here hipLaunchKernelGGL(( vector_add), dim3(1),dim3(ThreadsInBlock), 0, 0, dC, dA, dB, N); // Here we add an explicit synchronization so that we catch errors // as early as possible. Don't do this in production code! //hipDeviceSynchronize(); //CHECK_ERROR_MSG("vector_add kernel"); //// Copy back the results and free the device memory CUDA_CHECK(hipMemcpy(hC, dC, sizeof(double)*N, hipMemcpyDeviceToHost)); for (int i = 0; i < N; i++) printf("%5.1f\n", hC[i]); CUDA_CHECK(hipFree(dA)); CUDA_CHECK(hipFree(dB)); CUDA_CHECK(hipFree(dC)); return 0; }
c19e84e81c435b8bbf188906c67e5e0889ffecac.cu
#include <cstdio> #include <cmath> #include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG __global__ void vector_add(double *C, const double *A, const double *B, int N) { // Add the kernel code int idx = blockIdx.x * blockDim.x + threadIdx.x; // Do not try to access past the allocated memory if (idx < N) { C[idx] = A[idx] + B[idx]; } } int main(void) { const int N = 20; const int ThreadsInBlock = 12800; double *dA, *dB, *dC; double hA[N], hB[N], hC[N]; for(int i = 0; i < N; ++i) { hA[i] = (double) i; hB[i] = (double) i * i; } /* Add memory allocations and copies. Wrap your runtime function calls with CUDA_CHECK( ) macro */ CUDA_CHECK(cudaMalloc((void**)&dA, sizeof(double)*N)); CUDA_CHECK(cudaMalloc((void**)&dB, sizeof(double)*N)); CUDA_CHECK(cudaMalloc((void**)&dC, sizeof(double)*N)); // Note the maximum size of threads in a block dim3 grid, threads; CUDA_CHECK(cudaMemcpy(dA, hA, sizeof(double)*N, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(dB, hB, sizeof(double)*N, cudaMemcpyHostToDevice)); //// Add the kernel call here vector_add<<<1,ThreadsInBlock>>>(dC, dA, dB, N); // Here we add an explicit synchronization so that we catch errors // as early as possible. Don't do this in production code! //cudaDeviceSynchronize(); //CHECK_ERROR_MSG("vector_add kernel"); //// Copy back the results and free the device memory CUDA_CHECK(cudaMemcpy(hC, dC, sizeof(double)*N, cudaMemcpyDeviceToHost)); for (int i = 0; i < N; i++) printf("%5.1f\n", hC[i]); CUDA_CHECK(cudaFree(dA)); CUDA_CHECK(cudaFree(dB)); CUDA_CHECK(cudaFree(dC)); return 0; }
9687d9bc343c8e659268ad999f901e7eadfa776b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (C) 2009 Scott Grauer-Gray, Chandra Kambhamettu, and Kannappan Palaniappan This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ //This file defines the methods to perform belief propagation for disparity map estimation from stereo images on CUDA //#include "kernalBpStereoHeader.cuh" #define PROCESSING_ON_GPU #include "../SharedFuncts/SharedBPProcessingFuncts.h" #undef PROCESSING_ON_GPU #include "bpStereoCudaParameters.h" #if ((USE_SHARED_MEMORY == 1) && (DISP_INDEX_START_REG_LOCAL_MEM > 0)) #include "SharedMemoryKernels/KernalBpStereoUseSharedMemory.cu" #elif ((USE_SHARED_MEMORY == 2) && (DISP_INDEX_START_REG_LOCAL_MEM > 0)) #include "SharedMemoryKernels/KernalBpStereoUseSharedMemoryActuallyDuplicateRegMem.cu" #elif ((USE_SHARED_MEMORY == 3) && (DISP_INDEX_START_REG_LOCAL_MEM > 0)) #include "SharedMemoryKernels/KernelBpStereoUseDynamicSharedMemory.cu" #elif ((USE_SHARED_MEMORY == 4) && (DISP_INDEX_START_REG_LOCAL_MEM > 0)) #include "SharedMemoryKernels/KernelBpStereoDataAndMessageInDynamicSharedMemory.cu" #else #if ((CURRENT_DATA_TYPE_PROCESSING == DATA_TYPE_PROCESSING_HALF) || (CURRENT_DATA_TYPE_PROCESSING == DATA_TYPE_PROCESSING_HALF_TWO)) //template specialization for processing messages with half-precision; has safeguard to check if valToNormalize goes to infinity and set output //for every disparity at point to be 0.0 if that's the case; this has only been observed when using more than 5 computation levels with half-precision template<> __device__ void msgStereo<half, half>(int xVal, int yVal, levelProperties& currentLevelProperties, half messageValsNeighbor1[NUM_POSSIBLE_DISPARITY_VALUES], half messageValsNeighbor2[NUM_POSSIBLE_DISPARITY_VALUES], half messageValsNeighbor3[NUM_POSSIBLE_DISPARITY_VALUES], half dataCosts[NUM_POSSIBLE_DISPARITY_VALUES], half* dstMessageArray, half disc_k_bp, bool dataAligned) { // aggregate and find min half minimum = INF_BP; half dst[NUM_POSSIBLE_DISPARITY_VALUES]; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = messageValsNeighbor1[currentDisparity] + messageValsNeighbor2[currentDisparity] + messageValsNeighbor3[currentDisparity] + dataCosts[currentDisparity]; if (dst[currentDisparity] < minimum) minimum = dst[currentDisparity]; } //retrieve the minimum value at each disparity in O(n) time using Felzenszwalb's method (see "Efficient Belief Propagation for Early Vision") dtStereo < half > (dst); // truncate minimum += disc_k_bp; // normalize half valToNormalize = 0; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { if (minimum < dst[currentDisparity]) { dst[currentDisparity] = minimum; } valToNormalize += dst[currentDisparity]; } //if valToNormalize is infinite or NaN (observed when using more than 5 computation levels with half-precision), //set destination vector to 0 for all disparities //note that may cause results to differ a little from ideal if (__hisnan(valToNormalize) || ((__hisinf(valToNormalize)) != 0)) { int destMessageArrayIndex = retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, 0, NUM_POSSIBLE_DISPARITY_VALUES); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dstMessageArray[destMessageArrayIndex] = (half) 0.0; #if OPTIMIZED_INDEXING_SETTING == 1 destMessageArrayIndex += currentLevelProperties.paddedWidthCheckerboardLevel; #else destMessageArrayIndex++; #endif //OPTIMIZED_INDEXING_SETTING == 1 } } else { valToNormalize /= NUM_POSSIBLE_DISPARITY_VALUES; int destMessageArrayIndex = retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, 0, NUM_POSSIBLE_DISPARITY_VALUES); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] -= valToNormalize; dstMessageArray[destMessageArrayIndex] = dst[currentDisparity]; #if OPTIMIZED_INDEXING_SETTING == 1 destMessageArrayIndex += currentLevelProperties.paddedWidthCheckerboardLevel; #else destMessageArrayIndex++; #endif //OPTIMIZED_INDEXING_SETTING == 1 } } } #endif //((CURRENT_DATA_TYPE_PROCESSING == DATA_TYPE_PROCESSING_HALF) || (CURRENT_DATA_TYPE_PROCESSING == DATA_TYPE_PROCESSING_HALF_TWO)) #endif //#if ((USE_SHARED_MEMORY == 1) && (DISP_INDEX_START_REG_LOCAL_MEM > 0)) //initialize the "data cost" for each possible disparity between the two full-sized input images ("bottom" of the image pyramid) //the image data is stored in the CUDA arrays image1PixelsTextureBPStereo and image2PixelsTextureBPStereo template<typename T> __global__ void initializeBottomLevelDataStereo(levelProperties currentLevelProperties, float* image1PixelsDevice, float* image2PixelsDevice, T* dataCostDeviceStereoCheckerboard1, T* dataCostDeviceStereoCheckerboard2, float lambda_bp, float data_k_bp) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; int xInCheckerboard = xVal / 2; if (withinImageBounds(xInCheckerboard, yVal, currentLevelProperties.widthLevel, currentLevelProperties.heightLevel)) { initializeBottomLevelDataStereoPixel<T, T>(xVal, yVal, currentLevelProperties, image1PixelsDevice, image2PixelsDevice,dataCostDeviceStereoCheckerboard1, dataCostDeviceStereoCheckerboard2, lambda_bp, data_k_bp); } } //initialize the data costs at the "next" level up in the pyramid given that the data at the lower has been set template<typename T> __global__ void initializeCurrentLevelDataStereo(int checkerboardPart, levelProperties currentLevelProperties, levelProperties prevLevelProperties, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* dataCostDeviceToWriteTo, int offsetNum) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal, yVal, currentLevelProperties.widthCheckerboardLevel, currentLevelProperties.heightLevel)) { initializeCurrentLevelDataStereoPixel<T, T>( xVal, yVal, checkerboardPart, currentLevelProperties, prevLevelProperties, dataCostStereoCheckerboard1, dataCostStereoCheckerboard2, dataCostDeviceToWriteTo, offsetNum); } } //initialize the message values at each pixel of the current level to the default value template<typename T> __global__ void initializeMessageValsToDefaultKernel(levelProperties currentLevelProperties, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xValInCheckerboard = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xValInCheckerboard, yVal, currentLevelProperties.widthCheckerboardLevel, currentLevelProperties.heightLevel)) { //initialize message values in both checkerboards initializeMessageValsToDefaultKernelPixel<T>(xValInCheckerboard, yVal, currentLevelProperties, messageUDeviceCurrentCheckerboard1, messageDDeviceCurrentCheckerboard1, messageLDeviceCurrentCheckerboard1, messageRDeviceCurrentCheckerboard1, messageUDeviceCurrentCheckerboard2, messageDDeviceCurrentCheckerboard2, messageLDeviceCurrentCheckerboard2, messageRDeviceCurrentCheckerboard2); } } //kernal function to run the current iteration of belief propagation in parallel using the checkerboard update method where half the pixels in the "checkerboard" //scheme retrieve messages from each 4-connected neighbor and then update their message based on the retrieved messages and the data cost template<typename T> __global__ void runBPIterationUsingCheckerboardUpdates(int checkerboardToUpdate, levelProperties currentLevelProperties, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2, float disc_k_bp, bool dataAligned) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal, yVal, currentLevelProperties.widthLevel/2, currentLevelProperties.heightLevel)) { runBPIterationUsingCheckerboardUpdatesDeviceNoTexBoundAndLocalMemPixel<T, T>( xVal, yVal, checkerboardToUpdate, currentLevelProperties, dataCostStereoCheckerboard1, dataCostStereoCheckerboard2, messageUDeviceCurrentCheckerboard1, messageDDeviceCurrentCheckerboard1, messageLDeviceCurrentCheckerboard1, messageRDeviceCurrentCheckerboard1, messageUDeviceCurrentCheckerboard2, messageDDeviceCurrentCheckerboard2, messageLDeviceCurrentCheckerboard2, messageRDeviceCurrentCheckerboard2, disc_k_bp, 0, dataAligned); } } //kernal to copy the computed BP message values at the current level to the corresponding locations at the "next" level down //the kernal works from the point of view of the pixel at the prev level that is being copied to four different places template<typename T> __global__ void copyPrevLevelToNextLevelBPCheckerboardStereo( int checkerboardPart, levelProperties currentLevelProperties, levelProperties nextLevelProperties, T* messageUPrevStereoCheckerboard1, T* messageDPrevStereoCheckerboard1, T* messageLPrevStereoCheckerboard1, T* messageRPrevStereoCheckerboard1, T* messageUPrevStereoCheckerboard2, T* messageDPrevStereoCheckerboard2, T* messageLPrevStereoCheckerboard2, T* messageRPrevStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal, yVal, currentLevelProperties.widthCheckerboardLevel, currentLevelProperties.heightLevel)) { copyPrevLevelToNextLevelBPCheckerboardStereoPixel<T>(xVal, yVal, checkerboardPart, currentLevelProperties, nextLevelProperties, messageUPrevStereoCheckerboard1, messageDPrevStereoCheckerboard1, messageLPrevStereoCheckerboard1, messageRPrevStereoCheckerboard1, messageUPrevStereoCheckerboard2, messageDPrevStereoCheckerboard2, messageLPrevStereoCheckerboard2, messageRPrevStereoCheckerboard2, messageUDeviceCurrentCheckerboard1, messageDDeviceCurrentCheckerboard1, messageLDeviceCurrentCheckerboard1, messageRDeviceCurrentCheckerboard1, messageUDeviceCurrentCheckerboard2, messageDDeviceCurrentCheckerboard2, messageLDeviceCurrentCheckerboard2, messageRDeviceCurrentCheckerboard2); } } //retrieve the best disparity estimate from image 1 to image 2 for each pixel in parallel template<typename T> __global__ void retrieveOutputDisparityCheckerboardStereoOptimized(levelProperties currentLevelProperties, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUPrevStereoCheckerboard1, T* messageDPrevStereoCheckerboard1, T* messageLPrevStereoCheckerboard1, T* messageRPrevStereoCheckerboard1, T* messageUPrevStereoCheckerboard2, T* messageDPrevStereoCheckerboard2, T* messageLPrevStereoCheckerboard2, T* messageRPrevStereoCheckerboard2, float* disparityBetweenImagesDevice) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal, yVal, currentLevelProperties.widthCheckerboardLevel, currentLevelProperties.heightLevel)) { retrieveOutputDisparityCheckerboardStereoOptimizedPixel<T, T>(xVal, yVal, currentLevelProperties, dataCostStereoCheckerboard1, dataCostStereoCheckerboard2, messageUPrevStereoCheckerboard1, messageDPrevStereoCheckerboard1, messageLPrevStereoCheckerboard1, messageRPrevStereoCheckerboard1, messageUPrevStereoCheckerboard2, messageDPrevStereoCheckerboard2, messageLPrevStereoCheckerboard2, messageRPrevStereoCheckerboard2, disparityBetweenImagesDevice); } } template<typename T> __global__ void printDataAndMessageValsAtPointKernel(int xVal, int yVal, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2, int widthLevelCheckerboardPart, int heightLevel) { if (((xVal + yVal) % 2) == 0) { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } else { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } } template<typename T> __device__ void printDataAndMessageValsAtPointDevice(int xVal, int yVal, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2, int widthLevelCheckerboardPart, int heightLevel) { if (((xVal + yVal) % 2) == 0) { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } else { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } } template<typename T> __global__ void printDataAndMessageValsToPointKernel(int xVal, int yVal, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2, int widthLevelCheckerboardPart, int heightLevel) { int checkerboardAdjustment; if (((xVal + yVal) % 2) == 0) { checkerboardAdjustment = ((yVal)%2); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { checkerboardAdjustment = ((yVal+1)%2); } if (((xVal + yVal) % 2) == 0) { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal + 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal - 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2 + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( (xVal / 2 - 1) + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } else { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal + 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal - 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2 + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( (xVal / 2 - 1) + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } } template<typename T> __device__ void printDataAndMessageValsToPointDevice(int xVal, int yVal, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2, int widthLevelCheckerboardPart, int heightLevel) { int checkerboardAdjustment; if (((xVal + yVal) % 2) == 0) { checkerboardAdjustment = ((yVal)%2); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { checkerboardAdjustment = ((yVal+1)%2); } if (((xVal + yVal) % 2) == 0) { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal + 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal - 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2 + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( (xVal / 2 - 1) + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } else { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal + 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal - 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2 + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( (xVal / 2 - 1) + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } } /*template<> __device__ half2 getZeroVal<half2>() { return __floats2half2_rn (0.0, 0.0); } __device__ half2 getMinBothPartsHalf2(half2 val1, half2 val2) { half2 val1Less = __hlt2(val1, val2); half2 val2LessOrEqual = __hle2(val2, val1); return __hadd2(__hmul2(val1Less, val1), __hmul2(val2LessOrEqual, val2)); } template<> __device__ void dtStereo<half2>(half2 f[NUM_POSSIBLE_DISPARITY_VALUES]) { half2 prev; for (int currentDisparity = 1; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { prev = __hadd2(f[currentDisparity-1], __float2half2_rn(1.0f)); f[currentDisparity] = getMinBothPartsHalf2(prev, f[currentDisparity]); } for (int currentDisparity = NUM_POSSIBLE_DISPARITY_VALUES-2; currentDisparity >= 0; currentDisparity--) { prev = __hadd2(f[currentDisparity+1], __float2half2_rn(1.0f)); f[currentDisparity] = getMinBothPartsHalf2(prev, f[currentDisparity]); } }*/ /*template<> __device__ void msgStereo<half2>(half2 messageValsNeighbor1[NUM_POSSIBLE_DISPARITY_VALUES], half2 messageValsNeighbor2[NUM_POSSIBLE_DISPARITY_VALUES], half2 messageValsNeighbor3[NUM_POSSIBLE_DISPARITY_VALUES], half2 dataCosts[NUM_POSSIBLE_DISPARITY_VALUES], half2 dst[NUM_POSSIBLE_DISPARITY_VALUES], half2 disc_k_bp) { // aggregate and find min half2 minimum = __float2half2_rn(INF_BP); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __hadd2(messageValsNeighbor1[currentDisparity], messageValsNeighbor2[currentDisparity]); dst[currentDisparity] = __hadd2(dst[currentDisparity], messageValsNeighbor3[currentDisparity]); dst[currentDisparity] = __hadd2(dst[currentDisparity], dataCosts[currentDisparity]); minimum = getMinBothPartsHalf2(dst[currentDisparity], minimum); } //retrieve the minimum value at each disparity in O(n) time using Felzenszwalb's method (see "Efficient Belief Propagation for Early Vision") dtStereo<half2>(dst); // truncate minimum = __hadd2(minimum, disc_k_bp); // normalize half2 valToNormalize = __float2half2_rn(0.0f); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = getMinBothPartsHalf2(minimum, dst[currentDisparity]); valToNormalize = __hadd2(valToNormalize, dst[currentDisparity]); } //if either valToNormalize in half2 is infinite or NaN, set destination vector to 0 for all disparities //note that may cause results to differ a little from ideal if (((__hisnan(__low2half(valToNormalize))) || ((__hisinf(__low2half(valToNormalize)) != 0))) || ((__hisnan(__high2half(valToNormalize))) || ((__hisinf(__high2half(valToNormalize)) != 0)))) { for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __floats2half2_rn(0.0f, 0.0f); } } else { valToNormalize = __h2div(valToNormalize, __float2half2_rn((float) NUM_POSSIBLE_DISPARITY_VALUES)); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __hsub2(dst[currentDisparity], valToNormalize); } } //check if both values in half2 are inf or nan /*if (((__hisnan(__low2half(valToNormalize))) || ((__hisinf(__low2half(valToNormalize)) != 0))) && ((__hisnan(__high2half(valToNormalize))) || ((__hisinf(__high2half(valToNormalize)) != 0)))) { for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __floats2half2_rn(0.0f, 0.0f); } } else if (((__hisnan(__low2half(valToNormalize))) || ((__hisinf(__low2half(valToNormalize)) != 0)))) { //lower half of half2 is inf or nan valToNormalize = __h2div(valToNormalize, __float2half2_rn((float) NUM_POSSIBLE_DISPARITY_VALUES)); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __hsub2(dst[currentDisparity], valToNormalize); } for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __halves2half2((half)0.0f, __high2half(dst[currentDisparity])); } } else if ((__hisnan(__high2half(valToNormalize))) || ((__hisinf(__high2half(valToNormalize)) != 0))) { //higher half of half2 is inf or nan valToNormalize = __h2div(valToNormalize, __float2half2_rn((float) NUM_POSSIBLE_DISPARITY_VALUES)); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __hsub2(dst[currentDisparity], valToNormalize); } for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __halves2half2( __low2half(dst[currentDisparity]), (half)0.0f); } } }*/ //device portion of the kernal function to run the current iteration of belief propagation in parallel using the checkerboard update method where half the pixels in the //"checkerboard" scheme retrieve messages from each 4-connected neighbor and then update their message based on the retrieved messages and the data cost //this function uses local memory to store the message and data values at each disparity in the intermediate step of current message computation //this function uses linear memory bound to textures to access the current data and message values /*template<> __device__ void runBPIterationUsingCheckerboardUpdatesDeviceNoTexBoundAndLocalMem<half2>(int xVal, int yVal, int checkerboardToUpdate, levelProperties& currentLevelProperties, half2* dataCostStereoCheckerboard1, half2* dataCostStereoCheckerboard2, half2* messageUDeviceCurrentCheckerboard1, half2* messageDDeviceCurrentCheckerboard1, half2* messageLDeviceCurrentCheckerboard1, half2* messageRDeviceCurrentCheckerboard1, half2* messageUDeviceCurrentCheckerboard2, half2* messageDDeviceCurrentCheckerboard2, half2* messageLDeviceCurrentCheckerboard2, half2* messageRDeviceCurrentCheckerboard2, float disc_k_bp, int offsetData) { } int indexWriteTo; int checkerboardAdjustment; //checkerboardAdjustment used for indexing into current checkerboard to update if (checkerboardToUpdate == CHECKERBOARD_PART_1) { checkerboardAdjustment = ((yVal)%2); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { checkerboardAdjustment = ((yVal+1)%2); } //may want to look into (xVal < (widthLevelCheckerboardPart - 1) since it may affect the edges //make sure that the current point is not an edge/corner that doesn't have four neighbors that can pass values to it //if ((xVal >= (1 - checkerboardAdjustment)) && (xVal < (widthLevelCheckerboardPart - 1)) && (yVal > 0) && (yVal < (heightLevel - 1))) if ((xVal >= (1/*switch to 0 if trying to match half results exactly*//* - checkerboardAdjustment)) && (xVal < (widthLevelCheckerboardPart - checkerboardAdjustment)) && (yVal > 0) && (yVal < (heightLevel - 1))) { half2 prevUMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 prevDMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 prevLMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 prevRMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 dataMessage[NUM_POSSIBLE_DISPARITY_VALUES]; if (checkerboardToUpdate == CHECKERBOARD_PART_1) { half* messageLDeviceCurrentCheckerboard2Half = (half*)messageLDeviceCurrentCheckerboard2; half* messageRDeviceCurrentCheckerboard2Half = (half*)messageRDeviceCurrentCheckerboard2; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dataMessage[currentDisparity] = dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage( xVal, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]; prevUMessage[currentDisparity] = messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal, (yVal + 1), widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; prevDMessage[currentDisparity] = messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal, (yVal - 1), widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; prevLMessage[currentDisparity] = __halves2half2( messageLDeviceCurrentCheckerboard2Half[retrieveIndexInDataAndMessage( ((xVal * 2) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageLDeviceCurrentCheckerboard2Half[retrieveIndexInDataAndMessage( ((xVal * 2 + 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); //if ((((xVal * 2) - 1) + checkerboardAdjustment) >= 0) { prevRMessage[currentDisparity] = __halves2half2( messageRDeviceCurrentCheckerboard2Half[retrieveIndexInDataAndMessage( (((xVal * 2) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageRDeviceCurrentCheckerboard2Half[retrieveIndexInDataAndMessage( (((xVal * 2 + 1) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } /*else { prevRMessage[currentDisparity] = __halves2half2((half)0.0f, messageRDeviceCurrentCheckerboard2Half[retrieveIndexInDataAndMessage( (((xVal * 2 + 1) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); }*//* } } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { half* messageLDeviceCurrentCheckerboard1Half = (half*)messageLDeviceCurrentCheckerboard1; half* messageRDeviceCurrentCheckerboard1Half = (half*)messageRDeviceCurrentCheckerboard1; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dataMessage[currentDisparity] = dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xVal, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]; prevUMessage[currentDisparity] = messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal, (yVal + 1), widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; prevDMessage[currentDisparity] = messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal, (yVal - 1), widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; prevLMessage[currentDisparity] = __halves2half2( messageLDeviceCurrentCheckerboard1Half[retrieveIndexInDataAndMessage( ((xVal * 2) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageLDeviceCurrentCheckerboard1Half[retrieveIndexInDataAndMessage( ((xVal * 2 + 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); //if ((((xVal * 2) - 1) + checkerboardAdjustment) >= 0) { prevRMessage[currentDisparity] = __halves2half2( messageRDeviceCurrentCheckerboard1Half[retrieveIndexInDataAndMessage( (((xVal * 2) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageRDeviceCurrentCheckerboard1Half[retrieveIndexInDataAndMessage( (((xVal * 2 + 1) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } /*else { prevRMessage[currentDisparity] = __halves2half2((half) 0.0, messageRDeviceCurrentCheckerboard1Half[retrieveIndexInDataAndMessage( (((xVal * 2 + 1) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); }*//* } } half2 currentUMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 currentDMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 currentLMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 currentRMessage[NUM_POSSIBLE_DISPARITY_VALUES]; //uses the previous message values and data cost to calculate the current message values and store the results runBPIterationInOutDataInLocalMem<half2>(prevUMessage, prevDMessage, prevLMessage, prevRMessage, dataMessage, currentUMessage, currentDMessage, currentLMessage, currentRMessage, __float2half2_rn(disc_k_bp)); //write the calculated message values to global memory for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { indexWriteTo = retrieveIndexInDataAndMessage(xVal, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES); if (checkerboardToUpdate == CHECKERBOARD_PART_1) { messageUDeviceCurrentCheckerboard1[indexWriteTo] = currentUMessage[currentDisparity]; messageDDeviceCurrentCheckerboard1[indexWriteTo] = currentDMessage[currentDisparity]; messageLDeviceCurrentCheckerboard1[indexWriteTo] = currentLMessage[currentDisparity]; messageRDeviceCurrentCheckerboard1[indexWriteTo] = currentRMessage[currentDisparity]; } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { messageUDeviceCurrentCheckerboard2[indexWriteTo] = currentUMessage[currentDisparity]; messageDDeviceCurrentCheckerboard2[indexWriteTo] = currentDMessage[currentDisparity]; messageLDeviceCurrentCheckerboard2[indexWriteTo] = currentLMessage[currentDisparity]; messageRDeviceCurrentCheckerboard2[indexWriteTo] = currentRMessage[currentDisparity]; } } } } */ //retrieve the best disparity estimate from image 1 to image 2 for each pixel in parallel /*template<> __global__ void retrieveOutputDisparityCheckerboardStereoOptimized<half2>(levelProperties currentLevelProperties, half2* dataCostStereoCheckerboard1, half2* dataCostStereoCheckerboard2, half2* messageUPrevStereoCheckerboard1, half2* messageDPrevStereoCheckerboard1, half2* messageLPrevStereoCheckerboard1, half2* messageRPrevStereoCheckerboard1, half2* messageUPrevStereoCheckerboard2, half2* messageDPrevStereoCheckerboard2, half2* messageLPrevStereoCheckerboard2, half2* messageRPrevStereoCheckerboard2, float* disparityBetweenImagesDevice) { }*/ //retrieve the best disparity estimate from image 1 to image 2 for each pixel in parallel /*template<typename T> __global__ void retrieveOutputDisparityCheckerboardStereo(T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUPrevStereoCheckerboard1, T* messageDPrevStereoCheckerboard1, T* messageLPrevStereoCheckerboard1, T* messageRPrevStereoCheckerboard1, T* messageUPrevStereoCheckerboard2, T* messageDPrevStereoCheckerboard2, T* messageLPrevStereoCheckerboard2, T* messageRPrevStereoCheckerboard2, float* disparityBetweenImagesDevice, int widthLevel, int heightLevel) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal, yVal, widthLevel, heightLevel)) { int widthCheckerboard = getCheckerboardWidth<T>(widthLevel); int xValInCheckerboardPart = xVal/2; if (((yVal+xVal) % 2) == 0) //if true, then pixel is from part 1 of the checkerboard; otherwise, it's from part 2 { int checkerboardPartAdjustment = (yVal%2); if ((xVal >= 1) && (xVal < (widthLevel - 1)) && (yVal >= 1) && (yVal < (heightLevel - 1))) { // keep track of "best" disparity for current pixel int bestDisparity = 0; T best_val = INF_BP; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { T val = messageUPrevStereoCheckerboard2[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal + 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageDPrevStereoCheckerboard2[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal - 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageLPrevStereoCheckerboard2[retrieveIndexInDataAndMessage((xValInCheckerboardPart + checkerboardPartAdjustment), yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageRPrevStereoCheckerboard2[retrieveIndexInDataAndMessage((xValInCheckerboardPart - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xValInCheckerboardPart, yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; if (val < (best_val)) { best_val = val; bestDisparity = currentDisparity; } } disparityBetweenImagesDevice[yVal*widthLevel + xVal] = bestDisparity; } else { disparityBetweenImagesDevice[yVal*widthLevel + xVal] = 0; } } else //pixel from part 2 of checkerboard { int checkerboardPartAdjustment = ((yVal + 1) % 2); if ((xVal >= 1) && (xVal < (widthLevel - 1)) && (yVal >= 1) && (yVal < (heightLevel - 1))) { // keep track of "best" disparity for current pixel int bestDisparity = 0; T best_val = INF_BP; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { T val = messageUPrevStereoCheckerboard1[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal + 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageDPrevStereoCheckerboard1[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal - 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageLPrevStereoCheckerboard1[retrieveIndexInDataAndMessage((xValInCheckerboardPart + checkerboardPartAdjustment), yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageRPrevStereoCheckerboard1[retrieveIndexInDataAndMessage((xValInCheckerboardPart - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage(xValInCheckerboardPart, yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; if (val < (best_val)) { best_val = val; bestDisparity = currentDisparity; } } disparityBetweenImagesDevice[yVal*widthLevel + xVal] = bestDisparity; } else { disparityBetweenImagesDevice[yVal*widthLevel + xVal] = 0; } } } } //retrieve the best disparity estimate from image 1 to image 2 for each pixel in parallel template<> __global__ void retrieveOutputDisparityCheckerboardStereo<half2>(half2* dataCostStereoCheckerboard1, half2* dataCostStereoCheckerboard2, half2* messageUPrevStereoCheckerboard1, half2* messageDPrevStereoCheckerboard1, half2* messageLPrevStereoCheckerboard1, half2* messageRPrevStereoCheckerboard1, half2* messageUPrevStereoCheckerboard2, half2* messageDPrevStereoCheckerboard2, half2* messageLPrevStereoCheckerboard2, half2* messageRPrevStereoCheckerboard2, float* disparityBetweenImagesDevice, int widthLevel, int heightLevel) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal*2, yVal, widthLevel, heightLevel)) { int widthCheckerboard = getCheckerboardWidth<half2>(widthLevel); int xValInCheckerboardPart = xVal/2; if (((yVal+xVal) % 2) == 0) //if true, then pixel is from part 1 of the checkerboard; otherwise, it's from part 2 { int checkerboardPartAdjustment = (yVal%2); half* messageLPrevStereoCheckerboard2Half = (half*)messageLPrevStereoCheckerboard2; half* messageRPrevStereoCheckerboard2Half = (half*)messageRPrevStereoCheckerboard2; if ((xVal >= 1) && (xVal < (widthLevel - 1)) && (yVal >= 1) && (yVal < (heightLevel - 1))) { // keep track of "best" disparity for current pixel int bestDisparity1 = 0; int bestDisparity2 = 0; float best_val1 = INF_BP; float best_val2 = INF_BP; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { half2 val = __hadd2(messageUPrevStereoCheckerboard2[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal + 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageDPrevStereoCheckerboard2[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal - 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); val = __hadd2(val, __halves2half2( messageLPrevStereoCheckerboard2Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2) + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageLPrevStereoCheckerboard2Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2 + 1) + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)])); val = __hadd2(val, __halves2half2( messageRPrevStereoCheckerboard2Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2) - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageRPrevStereoCheckerboard2Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2 + 1) - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)])); val = __hadd2(val, dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xValInCheckerboardPart, yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); float valLow = __low2float ( val); float valHigh = __high2float ( val); if (valLow < best_val1) { best_val1 = valLow; bestDisparity1 = currentDisparity; } if (valHigh < best_val2) { best_val2 = valHigh; bestDisparity2 = currentDisparity; } } disparityBetweenImagesDevice[yVal*widthLevel + (xVal*2 - checkerboardPartAdjustment)] = bestDisparity1; if (((xVal*2) + 2) < widthLevel) { disparityBetweenImagesDevice[yVal*widthLevel + (xVal*2 - checkerboardPartAdjustment) + 2] = bestDisparity2; } } else { disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment)] = 0; if (((xVal * 2) + 2) < widthLevel) { disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment) + 2] = 0; } } } else //pixel from part 2 of checkerboard { int checkerboardPartAdjustment = ((yVal + 1) % 2); half* messageLPrevStereoCheckerboard1Half = (half*)messageLPrevStereoCheckerboard1; half* messageRPrevStereoCheckerboard1Half = (half*)messageRPrevStereoCheckerboard1; if ((xVal >= 1) && (xVal < (widthLevel - 1)) && (yVal >= 1) && (yVal < (heightLevel - 1))) { // keep track of "best" disparity for current pixel int bestDisparity1 = 0; int bestDisparity2 = 0; float best_val1 = INF_BP; float best_val2 = INF_BP; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { half2 val = __hadd2( messageUPrevStereoCheckerboard1[retrieveIndexInDataAndMessage( xValInCheckerboardPart, (yVal + 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageDPrevStereoCheckerboard1[retrieveIndexInDataAndMessage( xValInCheckerboardPart, (yVal - 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); val = __hadd2(val, __halves2half2( messageLPrevStereoCheckerboard1Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2) + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageLPrevStereoCheckerboard1Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2 + 1) + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)])); val = __hadd2(val, __halves2half2( messageRPrevStereoCheckerboard1Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2) - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageRPrevStereoCheckerboard1Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2 + 1) - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)])); val = __hadd2(val, dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xValInCheckerboardPart, yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); float val1 = __low2float(val); float val2 = __high2float(val); if (val1 < best_val1) { best_val1 = val1; bestDisparity1 = currentDisparity; } if (val2 < best_val2) { best_val2 = val2; bestDisparity2 = currentDisparity; } } disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment)] = bestDisparity1; if (((xVal * 2) + 2) < widthLevel) { disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment) + 2] = bestDisparity2; } } else { disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment)] = 0; if (((xVal * 2) + 2) < widthLevel) { disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment) + 2] = 0; } } } } } */ /*template<> __global__ void initializeBottomLevelDataStereo<half2>(levelProperties currentLevelProperties, float* image1PixelsDevice, float* image2PixelsDevice, half2* dataCostDeviceStereoCheckerboard1, half2* dataCostDeviceStereoCheckerboard2, float lambda_bp, float data_k_bp) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; int indexVal; int imageCheckerboardWidth = getCheckerboardWidth<half2>(widthImages); int xInCheckerboard = xVal / 2; if (withinImageBounds(xInCheckerboard, yVal, imageCheckerboardWidth, heightImages)) { int imageXPixelIndexStart = 0; int checkerboardNum = 1; //check which checkerboard data values for and make necessary adjustment to start if (((yVal) % 2) == 0) { if (((xVal) % 2) == 0) { checkerboardNum = 1; } else { checkerboardNum = 2; } } else { if (((xVal) % 2) == 0) { checkerboardNum = 2; } else { checkerboardNum = 1; } } imageXPixelIndexStart = xVal*2; if ((((yVal) % 2) == 0) && (checkerboardNum == 2)) { imageXPixelIndexStart -= 1; } if ((((yVal) % 2) == 1) && (checkerboardNum == 1)) { imageXPixelIndexStart -= 1; } //make sure that it is possible to check every disparity value if ((((imageXPixelIndexStart + 2) - (NUM_POSSIBLE_DISPARITY_VALUES-1)) >= 0)) { for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { float currentPixelImage1_low = 0.0; float currentPixelImage2_low = 0.0; if ((((imageXPixelIndexStart) - (NUM_POSSIBLE_DISPARITY_VALUES-1)) >= 0)) { if (withinImageBounds(imageXPixelIndexStart, yVal, widthImages, heightImages)) { currentPixelImage1_low = image1PixelsDevice[yVal * widthImages + imageXPixelIndexStart]; currentPixelImage2_low = image2PixelsDevice[yVal * widthImages + (imageXPixelIndexStart - currentDisparity)]; } } float currentPixelImage1_high = 0.0; float currentPixelImage2_high = 0.0; if (withinImageBounds(imageXPixelIndexStart + 2, yVal, widthImages, heightImages)) { currentPixelImage1_high = image1PixelsDevice[yVal * widthImages + (imageXPixelIndexStart + 2)]; currentPixelImage2_high = image2PixelsDevice[yVal * widthImages + ((imageXPixelIndexStart + 2) - currentDisparity)]; } indexVal = retrieveIndexInDataAndMessage(xInCheckerboard, yVal, imageCheckerboardWidth, heightImages, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES); half lowVal = (half)(lambda_bp * min(abs(currentPixelImage1_low - currentPixelImage2_low), data_k_bp)); half highVal = (half)(lambda_bp * min(abs(currentPixelImage1_high - currentPixelImage2_high), data_k_bp)); //data cost is equal to dataWeight value for weighting times the absolute difference in corresponding pixel intensity values capped at dataCostCap if (checkerboardNum == 1) { dataCostDeviceStereoCheckerboard1[indexVal] = __halves2half2(lowVal, highVal); } else { dataCostDeviceStereoCheckerboard2[indexVal] = __halves2half2(lowVal, highVal); } } } else { for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { indexVal = retrieveIndexInDataAndMessage(xInCheckerboard, yVal, imageCheckerboardWidth, heightImages, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES); //data cost is equal to dataWeight value for weighting times the absolute difference in corresponding pixel intensity values capped at dataCostCap if (((xVal + yVal) % 2) == 0) { dataCostDeviceStereoCheckerboard1[indexVal] = getZeroVal<half2>(); } else { dataCostDeviceStereoCheckerboard2[indexVal] = getZeroVal<half2>(); } } } } }*/
9687d9bc343c8e659268ad999f901e7eadfa776b.cu
/* Copyright (C) 2009 Scott Grauer-Gray, Chandra Kambhamettu, and Kannappan Palaniappan This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ //This file defines the methods to perform belief propagation for disparity map estimation from stereo images on CUDA //#include "kernalBpStereoHeader.cuh" #define PROCESSING_ON_GPU #include "../SharedFuncts/SharedBPProcessingFuncts.h" #undef PROCESSING_ON_GPU #include "bpStereoCudaParameters.h" #if ((USE_SHARED_MEMORY == 1) && (DISP_INDEX_START_REG_LOCAL_MEM > 0)) #include "SharedMemoryKernels/KernalBpStereoUseSharedMemory.cu" #elif ((USE_SHARED_MEMORY == 2) && (DISP_INDEX_START_REG_LOCAL_MEM > 0)) #include "SharedMemoryKernels/KernalBpStereoUseSharedMemoryActuallyDuplicateRegMem.cu" #elif ((USE_SHARED_MEMORY == 3) && (DISP_INDEX_START_REG_LOCAL_MEM > 0)) #include "SharedMemoryKernels/KernelBpStereoUseDynamicSharedMemory.cu" #elif ((USE_SHARED_MEMORY == 4) && (DISP_INDEX_START_REG_LOCAL_MEM > 0)) #include "SharedMemoryKernels/KernelBpStereoDataAndMessageInDynamicSharedMemory.cu" #else #if ((CURRENT_DATA_TYPE_PROCESSING == DATA_TYPE_PROCESSING_HALF) || (CURRENT_DATA_TYPE_PROCESSING == DATA_TYPE_PROCESSING_HALF_TWO)) //template specialization for processing messages with half-precision; has safeguard to check if valToNormalize goes to infinity and set output //for every disparity at point to be 0.0 if that's the case; this has only been observed when using more than 5 computation levels with half-precision template<> __device__ void msgStereo<half, half>(int xVal, int yVal, levelProperties& currentLevelProperties, half messageValsNeighbor1[NUM_POSSIBLE_DISPARITY_VALUES], half messageValsNeighbor2[NUM_POSSIBLE_DISPARITY_VALUES], half messageValsNeighbor3[NUM_POSSIBLE_DISPARITY_VALUES], half dataCosts[NUM_POSSIBLE_DISPARITY_VALUES], half* dstMessageArray, half disc_k_bp, bool dataAligned) { // aggregate and find min half minimum = INF_BP; half dst[NUM_POSSIBLE_DISPARITY_VALUES]; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = messageValsNeighbor1[currentDisparity] + messageValsNeighbor2[currentDisparity] + messageValsNeighbor3[currentDisparity] + dataCosts[currentDisparity]; if (dst[currentDisparity] < minimum) minimum = dst[currentDisparity]; } //retrieve the minimum value at each disparity in O(n) time using Felzenszwalb's method (see "Efficient Belief Propagation for Early Vision") dtStereo < half > (dst); // truncate minimum += disc_k_bp; // normalize half valToNormalize = 0; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { if (minimum < dst[currentDisparity]) { dst[currentDisparity] = minimum; } valToNormalize += dst[currentDisparity]; } //if valToNormalize is infinite or NaN (observed when using more than 5 computation levels with half-precision), //set destination vector to 0 for all disparities //note that may cause results to differ a little from ideal if (__hisnan(valToNormalize) || ((__hisinf(valToNormalize)) != 0)) { int destMessageArrayIndex = retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, 0, NUM_POSSIBLE_DISPARITY_VALUES); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dstMessageArray[destMessageArrayIndex] = (half) 0.0; #if OPTIMIZED_INDEXING_SETTING == 1 destMessageArrayIndex += currentLevelProperties.paddedWidthCheckerboardLevel; #else destMessageArrayIndex++; #endif //OPTIMIZED_INDEXING_SETTING == 1 } } else { valToNormalize /= NUM_POSSIBLE_DISPARITY_VALUES; int destMessageArrayIndex = retrieveIndexInDataAndMessage(xVal, yVal, currentLevelProperties.paddedWidthCheckerboardLevel, currentLevelProperties.heightLevel, 0, NUM_POSSIBLE_DISPARITY_VALUES); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] -= valToNormalize; dstMessageArray[destMessageArrayIndex] = dst[currentDisparity]; #if OPTIMIZED_INDEXING_SETTING == 1 destMessageArrayIndex += currentLevelProperties.paddedWidthCheckerboardLevel; #else destMessageArrayIndex++; #endif //OPTIMIZED_INDEXING_SETTING == 1 } } } #endif //((CURRENT_DATA_TYPE_PROCESSING == DATA_TYPE_PROCESSING_HALF) || (CURRENT_DATA_TYPE_PROCESSING == DATA_TYPE_PROCESSING_HALF_TWO)) #endif //#if ((USE_SHARED_MEMORY == 1) && (DISP_INDEX_START_REG_LOCAL_MEM > 0)) //initialize the "data cost" for each possible disparity between the two full-sized input images ("bottom" of the image pyramid) //the image data is stored in the CUDA arrays image1PixelsTextureBPStereo and image2PixelsTextureBPStereo template<typename T> __global__ void initializeBottomLevelDataStereo(levelProperties currentLevelProperties, float* image1PixelsDevice, float* image2PixelsDevice, T* dataCostDeviceStereoCheckerboard1, T* dataCostDeviceStereoCheckerboard2, float lambda_bp, float data_k_bp) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; int xInCheckerboard = xVal / 2; if (withinImageBounds(xInCheckerboard, yVal, currentLevelProperties.widthLevel, currentLevelProperties.heightLevel)) { initializeBottomLevelDataStereoPixel<T, T>(xVal, yVal, currentLevelProperties, image1PixelsDevice, image2PixelsDevice,dataCostDeviceStereoCheckerboard1, dataCostDeviceStereoCheckerboard2, lambda_bp, data_k_bp); } } //initialize the data costs at the "next" level up in the pyramid given that the data at the lower has been set template<typename T> __global__ void initializeCurrentLevelDataStereo(int checkerboardPart, levelProperties currentLevelProperties, levelProperties prevLevelProperties, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* dataCostDeviceToWriteTo, int offsetNum) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal, yVal, currentLevelProperties.widthCheckerboardLevel, currentLevelProperties.heightLevel)) { initializeCurrentLevelDataStereoPixel<T, T>( xVal, yVal, checkerboardPart, currentLevelProperties, prevLevelProperties, dataCostStereoCheckerboard1, dataCostStereoCheckerboard2, dataCostDeviceToWriteTo, offsetNum); } } //initialize the message values at each pixel of the current level to the default value template<typename T> __global__ void initializeMessageValsToDefaultKernel(levelProperties currentLevelProperties, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xValInCheckerboard = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xValInCheckerboard, yVal, currentLevelProperties.widthCheckerboardLevel, currentLevelProperties.heightLevel)) { //initialize message values in both checkerboards initializeMessageValsToDefaultKernelPixel<T>(xValInCheckerboard, yVal, currentLevelProperties, messageUDeviceCurrentCheckerboard1, messageDDeviceCurrentCheckerboard1, messageLDeviceCurrentCheckerboard1, messageRDeviceCurrentCheckerboard1, messageUDeviceCurrentCheckerboard2, messageDDeviceCurrentCheckerboard2, messageLDeviceCurrentCheckerboard2, messageRDeviceCurrentCheckerboard2); } } //kernal function to run the current iteration of belief propagation in parallel using the checkerboard update method where half the pixels in the "checkerboard" //scheme retrieve messages from each 4-connected neighbor and then update their message based on the retrieved messages and the data cost template<typename T> __global__ void runBPIterationUsingCheckerboardUpdates(int checkerboardToUpdate, levelProperties currentLevelProperties, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2, float disc_k_bp, bool dataAligned) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal, yVal, currentLevelProperties.widthLevel/2, currentLevelProperties.heightLevel)) { runBPIterationUsingCheckerboardUpdatesDeviceNoTexBoundAndLocalMemPixel<T, T>( xVal, yVal, checkerboardToUpdate, currentLevelProperties, dataCostStereoCheckerboard1, dataCostStereoCheckerboard2, messageUDeviceCurrentCheckerboard1, messageDDeviceCurrentCheckerboard1, messageLDeviceCurrentCheckerboard1, messageRDeviceCurrentCheckerboard1, messageUDeviceCurrentCheckerboard2, messageDDeviceCurrentCheckerboard2, messageLDeviceCurrentCheckerboard2, messageRDeviceCurrentCheckerboard2, disc_k_bp, 0, dataAligned); } } //kernal to copy the computed BP message values at the current level to the corresponding locations at the "next" level down //the kernal works from the point of view of the pixel at the prev level that is being copied to four different places template<typename T> __global__ void copyPrevLevelToNextLevelBPCheckerboardStereo( int checkerboardPart, levelProperties currentLevelProperties, levelProperties nextLevelProperties, T* messageUPrevStereoCheckerboard1, T* messageDPrevStereoCheckerboard1, T* messageLPrevStereoCheckerboard1, T* messageRPrevStereoCheckerboard1, T* messageUPrevStereoCheckerboard2, T* messageDPrevStereoCheckerboard2, T* messageLPrevStereoCheckerboard2, T* messageRPrevStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal, yVal, currentLevelProperties.widthCheckerboardLevel, currentLevelProperties.heightLevel)) { copyPrevLevelToNextLevelBPCheckerboardStereoPixel<T>(xVal, yVal, checkerboardPart, currentLevelProperties, nextLevelProperties, messageUPrevStereoCheckerboard1, messageDPrevStereoCheckerboard1, messageLPrevStereoCheckerboard1, messageRPrevStereoCheckerboard1, messageUPrevStereoCheckerboard2, messageDPrevStereoCheckerboard2, messageLPrevStereoCheckerboard2, messageRPrevStereoCheckerboard2, messageUDeviceCurrentCheckerboard1, messageDDeviceCurrentCheckerboard1, messageLDeviceCurrentCheckerboard1, messageRDeviceCurrentCheckerboard1, messageUDeviceCurrentCheckerboard2, messageDDeviceCurrentCheckerboard2, messageLDeviceCurrentCheckerboard2, messageRDeviceCurrentCheckerboard2); } } //retrieve the best disparity estimate from image 1 to image 2 for each pixel in parallel template<typename T> __global__ void retrieveOutputDisparityCheckerboardStereoOptimized(levelProperties currentLevelProperties, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUPrevStereoCheckerboard1, T* messageDPrevStereoCheckerboard1, T* messageLPrevStereoCheckerboard1, T* messageRPrevStereoCheckerboard1, T* messageUPrevStereoCheckerboard2, T* messageDPrevStereoCheckerboard2, T* messageLPrevStereoCheckerboard2, T* messageRPrevStereoCheckerboard2, float* disparityBetweenImagesDevice) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal, yVal, currentLevelProperties.widthCheckerboardLevel, currentLevelProperties.heightLevel)) { retrieveOutputDisparityCheckerboardStereoOptimizedPixel<T, T>(xVal, yVal, currentLevelProperties, dataCostStereoCheckerboard1, dataCostStereoCheckerboard2, messageUPrevStereoCheckerboard1, messageDPrevStereoCheckerboard1, messageLPrevStereoCheckerboard1, messageRPrevStereoCheckerboard1, messageUPrevStereoCheckerboard2, messageDPrevStereoCheckerboard2, messageLPrevStereoCheckerboard2, messageRPrevStereoCheckerboard2, disparityBetweenImagesDevice); } } template<typename T> __global__ void printDataAndMessageValsAtPointKernel(int xVal, int yVal, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2, int widthLevelCheckerboardPart, int heightLevel) { if (((xVal + yVal) % 2) == 0) { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } else { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } } template<typename T> __device__ void printDataAndMessageValsAtPointDevice(int xVal, int yVal, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2, int widthLevelCheckerboardPart, int heightLevel) { if (((xVal + yVal) % 2) == 0) { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } else { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } } template<typename T> __global__ void printDataAndMessageValsToPointKernel(int xVal, int yVal, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2, int widthLevelCheckerboardPart, int heightLevel) { int checkerboardAdjustment; if (((xVal + yVal) % 2) == 0) { checkerboardAdjustment = ((yVal)%2); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { checkerboardAdjustment = ((yVal+1)%2); } if (((xVal + yVal) % 2) == 0) { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal + 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal - 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2 + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( (xVal / 2 - 1) + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } else { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal + 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal - 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2 + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( (xVal / 2 - 1) + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } } template<typename T> __device__ void printDataAndMessageValsToPointDevice(int xVal, int yVal, T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUDeviceCurrentCheckerboard1, T* messageDDeviceCurrentCheckerboard1, T* messageLDeviceCurrentCheckerboard1, T* messageRDeviceCurrentCheckerboard1, T* messageUDeviceCurrentCheckerboard2, T* messageDDeviceCurrentCheckerboard2, T* messageLDeviceCurrentCheckerboard2, T* messageRDeviceCurrentCheckerboard2, int widthLevelCheckerboardPart, int heightLevel) { int checkerboardAdjustment; if (((xVal + yVal) % 2) == 0) { checkerboardAdjustment = ((yVal)%2); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { checkerboardAdjustment = ((yVal+1)%2); } if (((xVal + yVal) % 2) == 0) { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal + 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal - 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2 + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( (xVal / 2 - 1) + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } else { printf("xVal: %d\n", xVal); printf("yVal: %d\n", yVal); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { printf("DISP: %d\n", currentDisparity); printf("messageUPrevStereoCheckerboard: %f \n", (float) messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal + 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageDPrevStereoCheckerboard: %f \n", (float) messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2, yVal - 1, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageLPrevStereoCheckerboard: %f \n", (float) messageLDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal / 2 + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("messageRPrevStereoCheckerboard: %f \n", (float) messageRDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( (xVal / 2 - 1) + checkerboardAdjustment, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); printf("dataCostStereoCheckerboard: %f \n", (float) dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xVal / 2, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } } } /*template<> __device__ half2 getZeroVal<half2>() { return __floats2half2_rn (0.0, 0.0); } __device__ half2 getMinBothPartsHalf2(half2 val1, half2 val2) { half2 val1Less = __hlt2(val1, val2); half2 val2LessOrEqual = __hle2(val2, val1); return __hadd2(__hmul2(val1Less, val1), __hmul2(val2LessOrEqual, val2)); } template<> __device__ void dtStereo<half2>(half2 f[NUM_POSSIBLE_DISPARITY_VALUES]) { half2 prev; for (int currentDisparity = 1; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { prev = __hadd2(f[currentDisparity-1], __float2half2_rn(1.0f)); f[currentDisparity] = getMinBothPartsHalf2(prev, f[currentDisparity]); } for (int currentDisparity = NUM_POSSIBLE_DISPARITY_VALUES-2; currentDisparity >= 0; currentDisparity--) { prev = __hadd2(f[currentDisparity+1], __float2half2_rn(1.0f)); f[currentDisparity] = getMinBothPartsHalf2(prev, f[currentDisparity]); } }*/ /*template<> __device__ void msgStereo<half2>(half2 messageValsNeighbor1[NUM_POSSIBLE_DISPARITY_VALUES], half2 messageValsNeighbor2[NUM_POSSIBLE_DISPARITY_VALUES], half2 messageValsNeighbor3[NUM_POSSIBLE_DISPARITY_VALUES], half2 dataCosts[NUM_POSSIBLE_DISPARITY_VALUES], half2 dst[NUM_POSSIBLE_DISPARITY_VALUES], half2 disc_k_bp) { // aggregate and find min half2 minimum = __float2half2_rn(INF_BP); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __hadd2(messageValsNeighbor1[currentDisparity], messageValsNeighbor2[currentDisparity]); dst[currentDisparity] = __hadd2(dst[currentDisparity], messageValsNeighbor3[currentDisparity]); dst[currentDisparity] = __hadd2(dst[currentDisparity], dataCosts[currentDisparity]); minimum = getMinBothPartsHalf2(dst[currentDisparity], minimum); } //retrieve the minimum value at each disparity in O(n) time using Felzenszwalb's method (see "Efficient Belief Propagation for Early Vision") dtStereo<half2>(dst); // truncate minimum = __hadd2(minimum, disc_k_bp); // normalize half2 valToNormalize = __float2half2_rn(0.0f); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = getMinBothPartsHalf2(minimum, dst[currentDisparity]); valToNormalize = __hadd2(valToNormalize, dst[currentDisparity]); } //if either valToNormalize in half2 is infinite or NaN, set destination vector to 0 for all disparities //note that may cause results to differ a little from ideal if (((__hisnan(__low2half(valToNormalize))) || ((__hisinf(__low2half(valToNormalize)) != 0))) || ((__hisnan(__high2half(valToNormalize))) || ((__hisinf(__high2half(valToNormalize)) != 0)))) { for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __floats2half2_rn(0.0f, 0.0f); } } else { valToNormalize = __h2div(valToNormalize, __float2half2_rn((float) NUM_POSSIBLE_DISPARITY_VALUES)); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __hsub2(dst[currentDisparity], valToNormalize); } } //check if both values in half2 are inf or nan /*if (((__hisnan(__low2half(valToNormalize))) || ((__hisinf(__low2half(valToNormalize)) != 0))) && ((__hisnan(__high2half(valToNormalize))) || ((__hisinf(__high2half(valToNormalize)) != 0)))) { for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __floats2half2_rn(0.0f, 0.0f); } } else if (((__hisnan(__low2half(valToNormalize))) || ((__hisinf(__low2half(valToNormalize)) != 0)))) { //lower half of half2 is inf or nan valToNormalize = __h2div(valToNormalize, __float2half2_rn((float) NUM_POSSIBLE_DISPARITY_VALUES)); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __hsub2(dst[currentDisparity], valToNormalize); } for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __halves2half2((half)0.0f, __high2half(dst[currentDisparity])); } } else if ((__hisnan(__high2half(valToNormalize))) || ((__hisinf(__high2half(valToNormalize)) != 0))) { //higher half of half2 is inf or nan valToNormalize = __h2div(valToNormalize, __float2half2_rn((float) NUM_POSSIBLE_DISPARITY_VALUES)); for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __hsub2(dst[currentDisparity], valToNormalize); } for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dst[currentDisparity] = __halves2half2( __low2half(dst[currentDisparity]), (half)0.0f); } } }*/ //device portion of the kernal function to run the current iteration of belief propagation in parallel using the checkerboard update method where half the pixels in the //"checkerboard" scheme retrieve messages from each 4-connected neighbor and then update their message based on the retrieved messages and the data cost //this function uses local memory to store the message and data values at each disparity in the intermediate step of current message computation //this function uses linear memory bound to textures to access the current data and message values /*template<> __device__ void runBPIterationUsingCheckerboardUpdatesDeviceNoTexBoundAndLocalMem<half2>(int xVal, int yVal, int checkerboardToUpdate, levelProperties& currentLevelProperties, half2* dataCostStereoCheckerboard1, half2* dataCostStereoCheckerboard2, half2* messageUDeviceCurrentCheckerboard1, half2* messageDDeviceCurrentCheckerboard1, half2* messageLDeviceCurrentCheckerboard1, half2* messageRDeviceCurrentCheckerboard1, half2* messageUDeviceCurrentCheckerboard2, half2* messageDDeviceCurrentCheckerboard2, half2* messageLDeviceCurrentCheckerboard2, half2* messageRDeviceCurrentCheckerboard2, float disc_k_bp, int offsetData) { } int indexWriteTo; int checkerboardAdjustment; //checkerboardAdjustment used for indexing into current checkerboard to update if (checkerboardToUpdate == CHECKERBOARD_PART_1) { checkerboardAdjustment = ((yVal)%2); } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { checkerboardAdjustment = ((yVal+1)%2); } //may want to look into (xVal < (widthLevelCheckerboardPart - 1) since it may affect the edges //make sure that the current point is not an edge/corner that doesn't have four neighbors that can pass values to it //if ((xVal >= (1 - checkerboardAdjustment)) && (xVal < (widthLevelCheckerboardPart - 1)) && (yVal > 0) && (yVal < (heightLevel - 1))) if ((xVal >= (1/*switch to 0 if trying to match half results exactly*//* - checkerboardAdjustment)) && (xVal < (widthLevelCheckerboardPart - checkerboardAdjustment)) && (yVal > 0) && (yVal < (heightLevel - 1))) { half2 prevUMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 prevDMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 prevLMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 prevRMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 dataMessage[NUM_POSSIBLE_DISPARITY_VALUES]; if (checkerboardToUpdate == CHECKERBOARD_PART_1) { half* messageLDeviceCurrentCheckerboard2Half = (half*)messageLDeviceCurrentCheckerboard2; half* messageRDeviceCurrentCheckerboard2Half = (half*)messageRDeviceCurrentCheckerboard2; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dataMessage[currentDisparity] = dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage( xVal, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]; prevUMessage[currentDisparity] = messageUDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal, (yVal + 1), widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; prevDMessage[currentDisparity] = messageDDeviceCurrentCheckerboard2[retrieveIndexInDataAndMessage( xVal, (yVal - 1), widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; prevLMessage[currentDisparity] = __halves2half2( messageLDeviceCurrentCheckerboard2Half[retrieveIndexInDataAndMessage( ((xVal * 2) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageLDeviceCurrentCheckerboard2Half[retrieveIndexInDataAndMessage( ((xVal * 2 + 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); //if ((((xVal * 2) - 1) + checkerboardAdjustment) >= 0) { prevRMessage[currentDisparity] = __halves2half2( messageRDeviceCurrentCheckerboard2Half[retrieveIndexInDataAndMessage( (((xVal * 2) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageRDeviceCurrentCheckerboard2Half[retrieveIndexInDataAndMessage( (((xVal * 2 + 1) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } /*else { prevRMessage[currentDisparity] = __halves2half2((half)0.0f, messageRDeviceCurrentCheckerboard2Half[retrieveIndexInDataAndMessage( (((xVal * 2 + 1) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); }*//* } } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { half* messageLDeviceCurrentCheckerboard1Half = (half*)messageLDeviceCurrentCheckerboard1; half* messageRDeviceCurrentCheckerboard1Half = (half*)messageRDeviceCurrentCheckerboard1; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { dataMessage[currentDisparity] = dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xVal, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES, offsetData)]; prevUMessage[currentDisparity] = messageUDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal, (yVal + 1), widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; prevDMessage[currentDisparity] = messageDDeviceCurrentCheckerboard1[retrieveIndexInDataAndMessage( xVal, (yVal - 1), widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; prevLMessage[currentDisparity] = __halves2half2( messageLDeviceCurrentCheckerboard1Half[retrieveIndexInDataAndMessage( ((xVal * 2) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageLDeviceCurrentCheckerboard1Half[retrieveIndexInDataAndMessage( ((xVal * 2 + 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); //if ((((xVal * 2) - 1) + checkerboardAdjustment) >= 0) { prevRMessage[currentDisparity] = __halves2half2( messageRDeviceCurrentCheckerboard1Half[retrieveIndexInDataAndMessage( (((xVal * 2) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageRDeviceCurrentCheckerboard1Half[retrieveIndexInDataAndMessage( (((xVal * 2 + 1) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); } /*else { prevRMessage[currentDisparity] = __halves2half2((half) 0.0, messageRDeviceCurrentCheckerboard1Half[retrieveIndexInDataAndMessage( (((xVal * 2 + 1) - 1) + checkerboardAdjustment), yVal, widthLevelCheckerboardPart * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); }*//* } } half2 currentUMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 currentDMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 currentLMessage[NUM_POSSIBLE_DISPARITY_VALUES]; half2 currentRMessage[NUM_POSSIBLE_DISPARITY_VALUES]; //uses the previous message values and data cost to calculate the current message values and store the results runBPIterationInOutDataInLocalMem<half2>(prevUMessage, prevDMessage, prevLMessage, prevRMessage, dataMessage, currentUMessage, currentDMessage, currentLMessage, currentRMessage, __float2half2_rn(disc_k_bp)); //write the calculated message values to global memory for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { indexWriteTo = retrieveIndexInDataAndMessage(xVal, yVal, widthLevelCheckerboardPart, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES); if (checkerboardToUpdate == CHECKERBOARD_PART_1) { messageUDeviceCurrentCheckerboard1[indexWriteTo] = currentUMessage[currentDisparity]; messageDDeviceCurrentCheckerboard1[indexWriteTo] = currentDMessage[currentDisparity]; messageLDeviceCurrentCheckerboard1[indexWriteTo] = currentLMessage[currentDisparity]; messageRDeviceCurrentCheckerboard1[indexWriteTo] = currentRMessage[currentDisparity]; } else //checkerboardToUpdate == CHECKERBOARD_PART_2 { messageUDeviceCurrentCheckerboard2[indexWriteTo] = currentUMessage[currentDisparity]; messageDDeviceCurrentCheckerboard2[indexWriteTo] = currentDMessage[currentDisparity]; messageLDeviceCurrentCheckerboard2[indexWriteTo] = currentLMessage[currentDisparity]; messageRDeviceCurrentCheckerboard2[indexWriteTo] = currentRMessage[currentDisparity]; } } } } */ //retrieve the best disparity estimate from image 1 to image 2 for each pixel in parallel /*template<> __global__ void retrieveOutputDisparityCheckerboardStereoOptimized<half2>(levelProperties currentLevelProperties, half2* dataCostStereoCheckerboard1, half2* dataCostStereoCheckerboard2, half2* messageUPrevStereoCheckerboard1, half2* messageDPrevStereoCheckerboard1, half2* messageLPrevStereoCheckerboard1, half2* messageRPrevStereoCheckerboard1, half2* messageUPrevStereoCheckerboard2, half2* messageDPrevStereoCheckerboard2, half2* messageLPrevStereoCheckerboard2, half2* messageRPrevStereoCheckerboard2, float* disparityBetweenImagesDevice) { }*/ //retrieve the best disparity estimate from image 1 to image 2 for each pixel in parallel /*template<typename T> __global__ void retrieveOutputDisparityCheckerboardStereo(T* dataCostStereoCheckerboard1, T* dataCostStereoCheckerboard2, T* messageUPrevStereoCheckerboard1, T* messageDPrevStereoCheckerboard1, T* messageLPrevStereoCheckerboard1, T* messageRPrevStereoCheckerboard1, T* messageUPrevStereoCheckerboard2, T* messageDPrevStereoCheckerboard2, T* messageLPrevStereoCheckerboard2, T* messageRPrevStereoCheckerboard2, float* disparityBetweenImagesDevice, int widthLevel, int heightLevel) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal, yVal, widthLevel, heightLevel)) { int widthCheckerboard = getCheckerboardWidth<T>(widthLevel); int xValInCheckerboardPart = xVal/2; if (((yVal+xVal) % 2) == 0) //if true, then pixel is from part 1 of the checkerboard; otherwise, it's from part 2 { int checkerboardPartAdjustment = (yVal%2); if ((xVal >= 1) && (xVal < (widthLevel - 1)) && (yVal >= 1) && (yVal < (heightLevel - 1))) { // keep track of "best" disparity for current pixel int bestDisparity = 0; T best_val = INF_BP; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { T val = messageUPrevStereoCheckerboard2[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal + 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageDPrevStereoCheckerboard2[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal - 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageLPrevStereoCheckerboard2[retrieveIndexInDataAndMessage((xValInCheckerboardPart + checkerboardPartAdjustment), yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageRPrevStereoCheckerboard2[retrieveIndexInDataAndMessage((xValInCheckerboardPart - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xValInCheckerboardPart, yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; if (val < (best_val)) { best_val = val; bestDisparity = currentDisparity; } } disparityBetweenImagesDevice[yVal*widthLevel + xVal] = bestDisparity; } else { disparityBetweenImagesDevice[yVal*widthLevel + xVal] = 0; } } else //pixel from part 2 of checkerboard { int checkerboardPartAdjustment = ((yVal + 1) % 2); if ((xVal >= 1) && (xVal < (widthLevel - 1)) && (yVal >= 1) && (yVal < (heightLevel - 1))) { // keep track of "best" disparity for current pixel int bestDisparity = 0; T best_val = INF_BP; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { T val = messageUPrevStereoCheckerboard1[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal + 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageDPrevStereoCheckerboard1[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal - 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageLPrevStereoCheckerboard1[retrieveIndexInDataAndMessage((xValInCheckerboardPart + checkerboardPartAdjustment), yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + messageRPrevStereoCheckerboard1[retrieveIndexInDataAndMessage((xValInCheckerboardPart - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)] + dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage(xValInCheckerboardPart, yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]; if (val < (best_val)) { best_val = val; bestDisparity = currentDisparity; } } disparityBetweenImagesDevice[yVal*widthLevel + xVal] = bestDisparity; } else { disparityBetweenImagesDevice[yVal*widthLevel + xVal] = 0; } } } } //retrieve the best disparity estimate from image 1 to image 2 for each pixel in parallel template<> __global__ void retrieveOutputDisparityCheckerboardStereo<half2>(half2* dataCostStereoCheckerboard1, half2* dataCostStereoCheckerboard2, half2* messageUPrevStereoCheckerboard1, half2* messageDPrevStereoCheckerboard1, half2* messageLPrevStereoCheckerboard1, half2* messageRPrevStereoCheckerboard1, half2* messageUPrevStereoCheckerboard2, half2* messageDPrevStereoCheckerboard2, half2* messageLPrevStereoCheckerboard2, half2* messageRPrevStereoCheckerboard2, float* disparityBetweenImagesDevice, int widthLevel, int heightLevel) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; if (withinImageBounds(xVal*2, yVal, widthLevel, heightLevel)) { int widthCheckerboard = getCheckerboardWidth<half2>(widthLevel); int xValInCheckerboardPart = xVal/2; if (((yVal+xVal) % 2) == 0) //if true, then pixel is from part 1 of the checkerboard; otherwise, it's from part 2 { int checkerboardPartAdjustment = (yVal%2); half* messageLPrevStereoCheckerboard2Half = (half*)messageLPrevStereoCheckerboard2; half* messageRPrevStereoCheckerboard2Half = (half*)messageRPrevStereoCheckerboard2; if ((xVal >= 1) && (xVal < (widthLevel - 1)) && (yVal >= 1) && (yVal < (heightLevel - 1))) { // keep track of "best" disparity for current pixel int bestDisparity1 = 0; int bestDisparity2 = 0; float best_val1 = INF_BP; float best_val2 = INF_BP; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { half2 val = __hadd2(messageUPrevStereoCheckerboard2[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal + 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageDPrevStereoCheckerboard2[retrieveIndexInDataAndMessage(xValInCheckerboardPart, (yVal - 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); val = __hadd2(val, __halves2half2( messageLPrevStereoCheckerboard2Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2) + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageLPrevStereoCheckerboard2Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2 + 1) + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)])); val = __hadd2(val, __halves2half2( messageRPrevStereoCheckerboard2Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2) - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageRPrevStereoCheckerboard2Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2 + 1) - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)])); val = __hadd2(val, dataCostStereoCheckerboard1[retrieveIndexInDataAndMessage(xValInCheckerboardPart, yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); float valLow = __low2float ( val); float valHigh = __high2float ( val); if (valLow < best_val1) { best_val1 = valLow; bestDisparity1 = currentDisparity; } if (valHigh < best_val2) { best_val2 = valHigh; bestDisparity2 = currentDisparity; } } disparityBetweenImagesDevice[yVal*widthLevel + (xVal*2 - checkerboardPartAdjustment)] = bestDisparity1; if (((xVal*2) + 2) < widthLevel) { disparityBetweenImagesDevice[yVal*widthLevel + (xVal*2 - checkerboardPartAdjustment) + 2] = bestDisparity2; } } else { disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment)] = 0; if (((xVal * 2) + 2) < widthLevel) { disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment) + 2] = 0; } } } else //pixel from part 2 of checkerboard { int checkerboardPartAdjustment = ((yVal + 1) % 2); half* messageLPrevStereoCheckerboard1Half = (half*)messageLPrevStereoCheckerboard1; half* messageRPrevStereoCheckerboard1Half = (half*)messageRPrevStereoCheckerboard1; if ((xVal >= 1) && (xVal < (widthLevel - 1)) && (yVal >= 1) && (yVal < (heightLevel - 1))) { // keep track of "best" disparity for current pixel int bestDisparity1 = 0; int bestDisparity2 = 0; float best_val1 = INF_BP; float best_val2 = INF_BP; for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { half2 val = __hadd2( messageUPrevStereoCheckerboard1[retrieveIndexInDataAndMessage( xValInCheckerboardPart, (yVal + 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageDPrevStereoCheckerboard1[retrieveIndexInDataAndMessage( xValInCheckerboardPart, (yVal - 1), widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); val = __hadd2(val, __halves2half2( messageLPrevStereoCheckerboard1Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2) + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageLPrevStereoCheckerboard1Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2 + 1) + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)])); val = __hadd2(val, __halves2half2( messageRPrevStereoCheckerboard1Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2) - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)], messageRPrevStereoCheckerboard1Half[retrieveIndexInDataAndMessage( ((xValInCheckerboardPart * 2 + 1) - 1 + checkerboardPartAdjustment), yVal, widthCheckerboard * 2, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)])); val = __hadd2(val, dataCostStereoCheckerboard2[retrieveIndexInDataAndMessage( xValInCheckerboardPart, yVal, widthCheckerboard, heightLevel, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES)]); float val1 = __low2float(val); float val2 = __high2float(val); if (val1 < best_val1) { best_val1 = val1; bestDisparity1 = currentDisparity; } if (val2 < best_val2) { best_val2 = val2; bestDisparity2 = currentDisparity; } } disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment)] = bestDisparity1; if (((xVal * 2) + 2) < widthLevel) { disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment) + 2] = bestDisparity2; } } else { disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment)] = 0; if (((xVal * 2) + 2) < widthLevel) { disparityBetweenImagesDevice[yVal * widthLevel + (xVal * 2 - checkerboardPartAdjustment) + 2] = 0; } } } } } */ /*template<> __global__ void initializeBottomLevelDataStereo<half2>(levelProperties currentLevelProperties, float* image1PixelsDevice, float* image2PixelsDevice, half2* dataCostDeviceStereoCheckerboard1, half2* dataCostDeviceStereoCheckerboard2, float lambda_bp, float data_k_bp) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int xVal = bx * BLOCK_SIZE_WIDTH_BP + tx; int yVal = by * BLOCK_SIZE_HEIGHT_BP + ty; int indexVal; int imageCheckerboardWidth = getCheckerboardWidth<half2>(widthImages); int xInCheckerboard = xVal / 2; if (withinImageBounds(xInCheckerboard, yVal, imageCheckerboardWidth, heightImages)) { int imageXPixelIndexStart = 0; int checkerboardNum = 1; //check which checkerboard data values for and make necessary adjustment to start if (((yVal) % 2) == 0) { if (((xVal) % 2) == 0) { checkerboardNum = 1; } else { checkerboardNum = 2; } } else { if (((xVal) % 2) == 0) { checkerboardNum = 2; } else { checkerboardNum = 1; } } imageXPixelIndexStart = xVal*2; if ((((yVal) % 2) == 0) && (checkerboardNum == 2)) { imageXPixelIndexStart -= 1; } if ((((yVal) % 2) == 1) && (checkerboardNum == 1)) { imageXPixelIndexStart -= 1; } //make sure that it is possible to check every disparity value if ((((imageXPixelIndexStart + 2) - (NUM_POSSIBLE_DISPARITY_VALUES-1)) >= 0)) { for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { float currentPixelImage1_low = 0.0; float currentPixelImage2_low = 0.0; if ((((imageXPixelIndexStart) - (NUM_POSSIBLE_DISPARITY_VALUES-1)) >= 0)) { if (withinImageBounds(imageXPixelIndexStart, yVal, widthImages, heightImages)) { currentPixelImage1_low = image1PixelsDevice[yVal * widthImages + imageXPixelIndexStart]; currentPixelImage2_low = image2PixelsDevice[yVal * widthImages + (imageXPixelIndexStart - currentDisparity)]; } } float currentPixelImage1_high = 0.0; float currentPixelImage2_high = 0.0; if (withinImageBounds(imageXPixelIndexStart + 2, yVal, widthImages, heightImages)) { currentPixelImage1_high = image1PixelsDevice[yVal * widthImages + (imageXPixelIndexStart + 2)]; currentPixelImage2_high = image2PixelsDevice[yVal * widthImages + ((imageXPixelIndexStart + 2) - currentDisparity)]; } indexVal = retrieveIndexInDataAndMessage(xInCheckerboard, yVal, imageCheckerboardWidth, heightImages, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES); half lowVal = (half)(lambda_bp * min(abs(currentPixelImage1_low - currentPixelImage2_low), data_k_bp)); half highVal = (half)(lambda_bp * min(abs(currentPixelImage1_high - currentPixelImage2_high), data_k_bp)); //data cost is equal to dataWeight value for weighting times the absolute difference in corresponding pixel intensity values capped at dataCostCap if (checkerboardNum == 1) { dataCostDeviceStereoCheckerboard1[indexVal] = __halves2half2(lowVal, highVal); } else { dataCostDeviceStereoCheckerboard2[indexVal] = __halves2half2(lowVal, highVal); } } } else { for (int currentDisparity = 0; currentDisparity < NUM_POSSIBLE_DISPARITY_VALUES; currentDisparity++) { indexVal = retrieveIndexInDataAndMessage(xInCheckerboard, yVal, imageCheckerboardWidth, heightImages, currentDisparity, NUM_POSSIBLE_DISPARITY_VALUES); //data cost is equal to dataWeight value for weighting times the absolute difference in corresponding pixel intensity values capped at dataCostCap if (((xVal + yVal) % 2) == 0) { dataCostDeviceStereoCheckerboard1[indexVal] = getZeroVal<half2>(); } else { dataCostDeviceStereoCheckerboard2[indexVal] = getZeroVal<half2>(); } } } } }*/
da91875890f57703db48afc4a87e467898d6da18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2021-2022 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/unary/convert.h" #include "cunumeric/unary/convert_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { template <typename Function, typename ARG, typename RES> static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) dense_kernel(size_t volume, Function func, RES* out, const ARG* in) { const size_t idx = global_tid_1d(); if (idx >= volume) return; out[idx] = func(in[idx]); } template <typename Function, typename ReadAcc, typename WriteAcc, typename Pitches, typename Rect> static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) generic_kernel(size_t volume, Function func, WriteAcc out, ReadAcc in, Pitches pitches, Rect rect) { const size_t idx = global_tid_1d(); if (idx >= volume) return; auto point = pitches.unflatten(idx, rect.lo); out[point] = func(in[point]); } template <ConvertCode NAN_OP, Type::Code DST_TYPE, Type::Code SRC_TYPE, int DIM> struct ConvertImplBody<VariantKind::GPU, NAN_OP, DST_TYPE, SRC_TYPE, DIM> { using OP = ConvertOp<NAN_OP, DST_TYPE, SRC_TYPE>; using SRC = legate_type_of<SRC_TYPE>; using DST = legate_type_of<DST_TYPE>; void operator()(OP func, AccessorWO<DST, DIM> out, AccessorRO<SRC, DIM> in, const Pitches<DIM - 1>& pitches, const Rect<DIM>& rect, bool dense) const { const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; auto stream = get_cached_stream(); if (dense) { auto outptr = out.ptr(rect); auto inptr = in.ptr(rect); hipLaunchKernelGGL(( dense_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, volume, func, outptr, inptr); } else { hipLaunchKernelGGL(( generic_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, volume, func, out, in, pitches, rect); } CHECK_CUDA_STREAM(stream); } }; /*static*/ void ConvertTask::gpu_variant(TaskContext& context) { convert_template<VariantKind::GPU>(context); } } // namespace cunumeric
da91875890f57703db48afc4a87e467898d6da18.cu
/* Copyright 2021-2022 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/unary/convert.h" #include "cunumeric/unary/convert_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { template <typename Function, typename ARG, typename RES> static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) dense_kernel(size_t volume, Function func, RES* out, const ARG* in) { const size_t idx = global_tid_1d(); if (idx >= volume) return; out[idx] = func(in[idx]); } template <typename Function, typename ReadAcc, typename WriteAcc, typename Pitches, typename Rect> static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) generic_kernel(size_t volume, Function func, WriteAcc out, ReadAcc in, Pitches pitches, Rect rect) { const size_t idx = global_tid_1d(); if (idx >= volume) return; auto point = pitches.unflatten(idx, rect.lo); out[point] = func(in[point]); } template <ConvertCode NAN_OP, Type::Code DST_TYPE, Type::Code SRC_TYPE, int DIM> struct ConvertImplBody<VariantKind::GPU, NAN_OP, DST_TYPE, SRC_TYPE, DIM> { using OP = ConvertOp<NAN_OP, DST_TYPE, SRC_TYPE>; using SRC = legate_type_of<SRC_TYPE>; using DST = legate_type_of<DST_TYPE>; void operator()(OP func, AccessorWO<DST, DIM> out, AccessorRO<SRC, DIM> in, const Pitches<DIM - 1>& pitches, const Rect<DIM>& rect, bool dense) const { const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; auto stream = get_cached_stream(); if (dense) { auto outptr = out.ptr(rect); auto inptr = in.ptr(rect); dense_kernel<<<blocks, THREADS_PER_BLOCK, 0, stream>>>(volume, func, outptr, inptr); } else { generic_kernel<<<blocks, THREADS_PER_BLOCK, 0, stream>>>( volume, func, out, in, pitches, rect); } CHECK_CUDA_STREAM(stream); } }; /*static*/ void ConvertTask::gpu_variant(TaskContext& context) { convert_template<VariantKind::GPU>(context); } } // namespace cunumeric
326a5b3eee7cdce5f1625fc0908f61127f49d7f5.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <cusparse_v2.h> #include "rocblas.h" #include <hiprand/hiprand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" //poison log likelihood for one observation __device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt) { KC_FP_TYPE logex = KC_MAX((g*x>80)?g*x:KC_LOG(1.0+KC_EXP(g*x)),1e-30);//1e-30 return y*(KC_LOG(logex)+KC_LOG(dt)) - logex*dt - KC_GAMMALN(y+1.0); } //sums up log likelihood of each trial given model parameters __global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < 1) { log_p[0] = 0; for(int ii = 0; ii < NT; ii++) { log_p[0] += log_p_tr[ii]; } } } //averages log likelihood of each simulated path // (one thread for each trial) __global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { log_p_tr[idx] = 0; KC_FP_TYPE trSum = 0; KC_FP_TYPE log_x = 0; log_p_tr[idx] = KC_SQRT(-1.0); //computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial // does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) { trSum = 1 ; log_x = log_p[ii*NT+idx]; for(int kk = 0; kk < ii; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } for(int kk = ii+1; kk < nSims; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) { log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum); break; } } } } //simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood __global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, const int trsPerKernel, const int trialsToSim) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int trNum = idx/trsPerKernel; int ss = (idx % trsPerKernel); int simNum = ss + sim*trsPerKernel; if(trNum < NT && simNum < trialsToSim && ss < trsPerKernel) { int T1 = trIdx[trNum]; //xx contains zero mean Gaussian noise of variance \omega^2 int currIdx = simNum*(NT)+trNum; int x_offset = ss*TT; xx[T1+x_offset] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial log_p[currIdx] = lh(y[T1],xx[T1+x_offset],g,dt); for(int ii = T1+1; ii < trIdx[trNum+1];ii++) { //progates particle forward in time xx[ii+x_offset] = (xx[ii-1+x_offset] >= 1.0)?1.0:KC_MIN(xx[ii+x_offset] + xx[ii-1+x_offset]+b[betaIdx[ii]],1.0); //log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii] log_p[currIdx] += lh(y[ii],xx[ii+x_offset],g,dt); } } } //Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters // This estimation is made by Monte Carlo simulations from the model to integrate out latent variable //args // 0 = y (observations) // 1 = NT (number of trials) // 2 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 3 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) // 4 = w (variance of diffusion process) // 5 = l_0 (starting lambda value) // 6 = g (absorbing boundary effective height) // 7 = dt (bin size in seconds) // 8 = number of samples to use to estimate log probability of observations (I recommend using at least 1000) //outputs (left-hand side) // 0 = log p(y|\theta) // 1 = log p(y|\theta) for each individual trial void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { hipError_t ce; int trsPerKernel = 32; //load up trial data unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * y = kcGetArrayData(prhs[0]); int * trIdx = kcGetArrayDataInt(prhs[1]); unsigned int NT = kcGetArrayNumEl(prhs[1])-1; int * betaIdx = kcGetArrayDataInt(prhs[2],TT); //how many simulations to use to estimate log p(y|\theta) int trialsToSim = (int)mxGetScalar(prhs[8]); //load up parameters to simulate model if(mxGetClassID(prhs[3]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!"); } KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[3]); int numBetas = mxGetNumberOfElements(prhs[3]); KC_FP_TYPE * b_gpu; ce = hipMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); if(ce != hipSuccess) { mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } checkCudaErrors(hipMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,hipMemcpyHostToDevice)); KC_FP_TYPE w = mxGetScalar(prhs[4]); KC_FP_TYPE l_0 = mxGetScalar(prhs[5]); KC_FP_TYPE g = mxGetScalar(prhs[6]); KC_FP_TYPE dt = mxGetScalar(prhs[7]); //setup CUDA variables + random number generator int randSize = TT*trsPerKernel + (((TT*trsPerKernel)%2==0)?0:1); KC_FP_TYPE * xx; checkCudaErrors(hipMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE))); hiprandGenerator_t curandGen = 0; hiprandStatus_t hiprandStatus_t; hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND-1 error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors"); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND-2 error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors"); } int blockSize = 128; int nBlocks = (NT*trsPerKernel)/blockSize + (((NT*trsPerKernel)%blockSize==0)?0:1); int blockSizeT = 128; int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1); //allocates sspace on GPU for simulating the likelihood KC_FP_TYPE * log_p; KC_FP_TYPE * log_p_tr; KC_FP_TYPE * sum_log_p; checkCudaErrors(hipMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim)); checkCudaErrors(hipMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT)); checkCudaErrors(hipMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1)); // generate AR1 noise //clock_t begin, end; //double time_spent; //begin = clock(); for(int kk = 0; kk < trialsToSim/trsPerKernel + ((trialsToSim%trsPerKernel==0)?0:1); kk++) { //generates zero mean Gaussian noise with correct variance hiprandStatus_t = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w)); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND gen error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors"); } //checkCudaErrors(hipDeviceSynchronize()); //calculate path + logP hipLaunchKernelGGL(( kcSimGBPaths), dim3(nBlocks),dim3(blockSize), 0, 0, y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,trsPerKernel,trialsToSim); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error in simulating of kcSimGaussianBound.cu "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA errors"); } } //end = clock(); //time_spent = ((double)(end - begin))/((double)CLOCKS_PER_SEC); //mexPrintf("ML = %2.4f\n",time_spent); //average likelihood of each sampled path to get log p(y|\theta) for each trial hipLaunchKernelGGL(( kcSumGBlogpTr), dim3(nBlocksT),dim3(blockSizeT), 0, 0, log_p,log_p_tr,NT,trialsToSim); checkCudaErrors(hipDeviceSynchronize()); //sums up log likelihood of each trial hipLaunchKernelGGL(( kcSumGBfinal), dim3(1),dim3(1), 0, 0, log_p_tr,sum_log_p,NT); checkCudaErrors(hipDeviceSynchronize()); //copy back to host if(nlhs > 0) { plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),hipMemcpyDeviceToHost)); } if(nlhs > 1) { plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p,NT*sizeof(KC_FP_TYPE),hipMemcpyDeviceToHost)); } //free up CUDA variables checkCudaErrors(hiprandDestroyGenerator(curandGen)); checkCudaErrors(hipFree(xx)); checkCudaErrors(hipFree(b_gpu)); checkCudaErrors(hipFree(log_p)); checkCudaErrors(hipFree(log_p_tr)); checkCudaErrors(hipFree(sum_log_p)); }
326a5b3eee7cdce5f1625fc0908f61127f49d7f5.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <cusparse_v2.h> #include "cublas_v2.h" #include <curand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" //poison log likelihood for one observation __device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt) { KC_FP_TYPE logex = KC_MAX((g*x>80)?g*x:KC_LOG(1.0+KC_EXP(g*x)),1e-30);//1e-30 return y*(KC_LOG(logex)+KC_LOG(dt)) - logex*dt - KC_GAMMALN(y+1.0); } //sums up log likelihood of each trial given model parameters __global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < 1) { log_p[0] = 0; for(int ii = 0; ii < NT; ii++) { log_p[0] += log_p_tr[ii]; } } } //averages log likelihood of each simulated path // (one thread for each trial) __global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { log_p_tr[idx] = 0; KC_FP_TYPE trSum = 0; KC_FP_TYPE log_x = 0; log_p_tr[idx] = KC_SQRT(-1.0); //computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial // does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) { trSum = 1 ; log_x = log_p[ii*NT+idx]; for(int kk = 0; kk < ii; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } for(int kk = ii+1; kk < nSims; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) { log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum); break; } } } } //simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood __global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, const int trsPerKernel, const int trialsToSim) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int trNum = idx/trsPerKernel; int ss = (idx % trsPerKernel); int simNum = ss + sim*trsPerKernel; if(trNum < NT && simNum < trialsToSim && ss < trsPerKernel) { int T1 = trIdx[trNum]; //xx contains zero mean Gaussian noise of variance \omega^2 int currIdx = simNum*(NT)+trNum; int x_offset = ss*TT; xx[T1+x_offset] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial log_p[currIdx] = lh(y[T1],xx[T1+x_offset],g,dt); for(int ii = T1+1; ii < trIdx[trNum+1];ii++) { //progates particle forward in time xx[ii+x_offset] = (xx[ii-1+x_offset] >= 1.0)?1.0:KC_MIN(xx[ii+x_offset] + xx[ii-1+x_offset]+b[betaIdx[ii]],1.0); //log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii] log_p[currIdx] += lh(y[ii],xx[ii+x_offset],g,dt); } } } //Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters // This estimation is made by Monte Carlo simulations from the model to integrate out latent variable //args // 0 = y (observations) // 1 = NT (number of trials) // 2 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 3 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) // 4 = w (variance of diffusion process) // 5 = l_0 (starting lambda value) // 6 = g (absorbing boundary effective height) // 7 = dt (bin size in seconds) // 8 = number of samples to use to estimate log probability of observations (I recommend using at least 1000) //outputs (left-hand side) // 0 = log p(y|\theta) // 1 = log p(y|\theta) for each individual trial void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { cudaError_t ce; int trsPerKernel = 32; //load up trial data unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * y = kcGetArrayData(prhs[0]); int * trIdx = kcGetArrayDataInt(prhs[1]); unsigned int NT = kcGetArrayNumEl(prhs[1])-1; int * betaIdx = kcGetArrayDataInt(prhs[2],TT); //how many simulations to use to estimate log p(y|\theta) int trialsToSim = (int)mxGetScalar(prhs[8]); //load up parameters to simulate model if(mxGetClassID(prhs[3]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!"); } KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[3]); int numBetas = mxGetNumberOfElements(prhs[3]); KC_FP_TYPE * b_gpu; ce = cudaMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); if(ce != cudaSuccess) { mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } checkCudaErrors(cudaMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,cudaMemcpyHostToDevice)); KC_FP_TYPE w = mxGetScalar(prhs[4]); KC_FP_TYPE l_0 = mxGetScalar(prhs[5]); KC_FP_TYPE g = mxGetScalar(prhs[6]); KC_FP_TYPE dt = mxGetScalar(prhs[7]); //setup CUDA variables + random number generator int randSize = TT*trsPerKernel + (((TT*trsPerKernel)%2==0)?0:1); KC_FP_TYPE * xx; checkCudaErrors(cudaMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE))); curandGenerator_t curandGen = 0; curandStatus_t curandStatus; curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND-1 error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors"); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND-2 error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors"); } int blockSize = 128; int nBlocks = (NT*trsPerKernel)/blockSize + (((NT*trsPerKernel)%blockSize==0)?0:1); int blockSizeT = 128; int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1); //allocates sspace on GPU for simulating the likelihood KC_FP_TYPE * log_p; KC_FP_TYPE * log_p_tr; KC_FP_TYPE * sum_log_p; checkCudaErrors(cudaMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim)); checkCudaErrors(cudaMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT)); checkCudaErrors(cudaMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1)); // generate AR1 noise //clock_t begin, end; //double time_spent; //begin = clock(); for(int kk = 0; kk < trialsToSim/trsPerKernel + ((trialsToSim%trsPerKernel==0)?0:1); kk++) { //generates zero mean Gaussian noise with correct variance curandStatus = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w)); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND gen error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors"); } //checkCudaErrors(cudaDeviceSynchronize()); //calculate path + logP kcSimGBPaths<<<nBlocks,blockSize>>>(y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,trsPerKernel,trialsToSim); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error in simulating of kcSimGaussianBound.cu "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA errors"); } } //end = clock(); //time_spent = ((double)(end - begin))/((double)CLOCKS_PER_SEC); //mexPrintf("ML = %2.4f\n",time_spent); //average likelihood of each sampled path to get log p(y|\theta) for each trial kcSumGBlogpTr<<<nBlocksT,blockSizeT>>>(log_p,log_p_tr,NT,trialsToSim); checkCudaErrors(cudaDeviceSynchronize()); //sums up log likelihood of each trial kcSumGBfinal<<<1,1>>>(log_p_tr,sum_log_p,NT); checkCudaErrors(cudaDeviceSynchronize()); //copy back to host if(nlhs > 0) { plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost)); } if(nlhs > 1) { plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p,NT*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost)); } //free up CUDA variables checkCudaErrors(curandDestroyGenerator(curandGen)); checkCudaErrors(cudaFree(xx)); checkCudaErrors(cudaFree(b_gpu)); checkCudaErrors(cudaFree(log_p)); checkCudaErrors(cudaFree(log_p_tr)); checkCudaErrors(cudaFree(sum_log_p)); }
95b4efa5bccea1d2bd1877b2272418edb464c7d1.hip
// !!! This is a file automatically generated by hipify!!! ///* // * File: BucketSort.c // * Author: vijay manoharan // *. C program for running bucket sort on CUDA. // * Created on November 8, 2014, 8:19 PM // */ #include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include <stdint.h> //header file for random value generation. #include "rnd.h" #include "rnd.c" //headed file for cuda #include <hip/hip_runtime.h> int size; /* * Using quickselect to partially sort the array * algorithm refered from wikipedia "http://en.wikipedia.org/wiki/Quickselect" * Implementation is done by me. */ int partition(float a[size], int left, int right, int pivot) { float pivotValue = a[pivot], temp; int index = left; int i; //move pivot to rightmost of the array a[pivot] = a[right]; a[right] = pivotValue; /* We start from left of the array and keep comparing the values. * If we find any value greater than pivot we swap the pivot with that value. * this is done so that the value on the left are always lesser than pivot */ for (i = left; i < right; i++) { if (a[i] < pivotValue) { temp = a[i]; a[i] = a[index]; a[index] = temp; index++; } } /*move pivot back to its initial position */ temp = a[index]; a[index] = a[right]; a[right] = temp; return index; } /*the main aspect of the function is * find the pivot value which is pivot given and return the value * next thing is all the value in the left index of pivot index is smaller * all the values in the right index is greater. */ float quickSelect(float a[size], int left, int right, int pivot) { //base condition if list contains one element return that if (left == right) return a[left]; /* select the pivot index between left and right * i am selecting the mid value */ int pivotindex = (right + left) / 2; pivotindex = partition(a, left, right, pivotindex); //int size_leftarray = pivotposition - low + 1; //pivot in its correct position if (pivot == pivotindex) return a[pivotindex]; else if (pivot < pivotindex) return quickSelect(a, left, pivotindex - 1, pivot); else return quickSelect(a, pivotindex + 1, right, pivot); } //merging the sorted left and right of the merge sort void merge(float *a, int low, int mid, int high) { //printf("inside merge\n"); int i = 0; int left = low, right = mid + 1; int j; float temp[high - low + 1 ]; /* Create a temp sorted list * get the min of left part and right part * if min is in the left part iterate left or else iterate right */ while ((left <= mid)&&(right <= high)) { if (a[left] < a[right]) temp[i++] = a[left++]; else temp[i++] = a[right++]; } //left part contains larger values while (left <= mid) temp[i++] = a[left++]; //right part contains larger values while (right <= high) temp[i++] = a[right++]; //copy the sorted values for (j = 0; j < i; j++) { a[low + j] = temp[j]; } } //merge sort algorithm void Mergesort(float *a, int low, int high) { int mid; if (low < high) { mid = (low + high) / 2; Mergesort(a, low, mid); Mergesort(a, mid + 1, high); merge(a, low, mid, high); } } void sort(float *a, int n, int m) { float temp; int i, j; for (i = n; i < m - 1; i++) { for (j = i; j < m; j++) if (a[i] < a[j]) { temp = a[i]; a[i] = a[j]; a[j] = temp; } } } int isnumber(int size) { int flag = 0; return flag; } __global__ void cudaBucketSort(float *array, float *pivots) { /* 1.call merge sort for each threads * 2.Range is given based on the pivot values. * 3.Each thread will sort its own data set. */ int start = pivots[(blockIdx.x * blockDim.x) + threadIdx.x]; int end = pivots[(blockIdx.x * blockDim.x) + threadIdx.x+1]-1; printf("pivot index %d\t start %d \t end %d\n", (blockIdx.x * blockDim.x) + threadIdx.x, start, end); Mergesort(array, start, end); } int main(int argc, char** argv) { if (strcmp("-t", argv[1]) != 0) { printf("invalid command line agrument\n"); return 0; } size = atoi(argv[2]); //variables for size and number of threads, blocks etc. int BlockNum; if (size <= 10000) { BlockNum = 1; } else { BlockNum = size / 10000; } int Range=10; int TotalThread = size/Range; int ThreadSize = TotalThread/BlockNum; //variables for pivots int pivots[TotalThread + 1]; float pivots_value[TotalThread + 1]; struct timeval tv1, tv2; float array[size]; printf("the size is %d", size); int i, j, k; //cuda device variables float *dev_array, *dev_pivots; hipMalloc((void **) &dev_array, size * sizeof (float)); hipMalloc((void **) &dev_pivots, (TotalThread + 1) * sizeof (float)); random_number_generator_normal(array, size, size); // for (i = 0; i < size; i++) { // printf("%d-->> %f\t", i, array[i]); // } //find 25th element gettimeofday(&tv1, NULL); for (i = 0; i < TotalThread; i++) { pivots[i] = TotalThread * (i); } pivots[i] = size; pivots_value[0] = 0; pivots_value[i] = array[size - 1]; for (i = 1; i < TotalThread; i++) { pivots_value[i] = quickSelect(array, pivots[i - 1], size - 1, (pivots[i])); printf("is the %d largest value %f\n", pivots[i], pivots_value[i]); //find 5th largest element } /* MergeSort here. * */ hipMemcpy(dev_array, array, size * sizeof (float), hipMemcpyHostToDevice); hipMemcpy(dev_pivots, pivots, (TotalThread + 1) * sizeof (float), hipMemcpyHostToDevice); cudaBucketSort << <BlockNum, ThreadSize>>>(dev_array, dev_pivots); hipMemcpy(array, dev_array, size * sizeof (float), hipMemcpyDeviceToHost); //cuda function here. printf("\nmergesort starts \n"); gettimeofday(&tv2, NULL); //sort(array); printf("sorted list is \n"); for (i = 0; i < size; i++) { printf("%d-->> %f\n", i, array[i]); } printf("\nTotal time = %f seconds\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); return 0; }
95b4efa5bccea1d2bd1877b2272418edb464c7d1.cu
///* // * File: BucketSort.c // * Author: vijay manoharan // *. C program for running bucket sort on CUDA. // * Created on November 8, 2014, 8:19 PM // */ #include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include <stdint.h> //header file for random value generation. #include "rnd.h" #include "rnd.c" //headed file for cuda #include <cuda.h> int size; /* * Using quickselect to partially sort the array * algorithm refered from wikipedia "http://en.wikipedia.org/wiki/Quickselect" * Implementation is done by me. */ int partition(float a[size], int left, int right, int pivot) { float pivotValue = a[pivot], temp; int index = left; int i; //move pivot to rightmost of the array a[pivot] = a[right]; a[right] = pivotValue; /* We start from left of the array and keep comparing the values. * If we find any value greater than pivot we swap the pivot with that value. * this is done so that the value on the left are always lesser than pivot */ for (i = left; i < right; i++) { if (a[i] < pivotValue) { temp = a[i]; a[i] = a[index]; a[index] = temp; index++; } } /*move pivot back to its initial position */ temp = a[index]; a[index] = a[right]; a[right] = temp; return index; } /*the main aspect of the function is * find the pivot value which is pivot given and return the value * next thing is all the value in the left index of pivot index is smaller * all the values in the right index is greater. */ float quickSelect(float a[size], int left, int right, int pivot) { //base condition if list contains one element return that if (left == right) return a[left]; /* select the pivot index between left and right * i am selecting the mid value */ int pivotindex = (right + left) / 2; pivotindex = partition(a, left, right, pivotindex); //int size_leftarray = pivotposition - low + 1; //pivot in its correct position if (pivot == pivotindex) return a[pivotindex]; else if (pivot < pivotindex) return quickSelect(a, left, pivotindex - 1, pivot); else return quickSelect(a, pivotindex + 1, right, pivot); } //merging the sorted left and right of the merge sort void merge(float *a, int low, int mid, int high) { //printf("inside merge\n"); int i = 0; int left = low, right = mid + 1; int j; float temp[high - low + 1 ]; /* Create a temp sorted list * get the min of left part and right part * if min is in the left part iterate left or else iterate right */ while ((left <= mid)&&(right <= high)) { if (a[left] < a[right]) temp[i++] = a[left++]; else temp[i++] = a[right++]; } //left part contains larger values while (left <= mid) temp[i++] = a[left++]; //right part contains larger values while (right <= high) temp[i++] = a[right++]; //copy the sorted values for (j = 0; j < i; j++) { a[low + j] = temp[j]; } } //merge sort algorithm void Mergesort(float *a, int low, int high) { int mid; if (low < high) { mid = (low + high) / 2; Mergesort(a, low, mid); Mergesort(a, mid + 1, high); merge(a, low, mid, high); } } void sort(float *a, int n, int m) { float temp; int i, j; for (i = n; i < m - 1; i++) { for (j = i; j < m; j++) if (a[i] < a[j]) { temp = a[i]; a[i] = a[j]; a[j] = temp; } } } int isnumber(int size) { int flag = 0; return flag; } __global__ void cudaBucketSort(float *array, float *pivots) { /* 1.call merge sort for each threads * 2.Range is given based on the pivot values. * 3.Each thread will sort its own data set. */ int start = pivots[(blockIdx.x * blockDim.x) + threadIdx.x]; int end = pivots[(blockIdx.x * blockDim.x) + threadIdx.x+1]-1; printf("pivot index %d\t start %d \t end %d\n", (blockIdx.x * blockDim.x) + threadIdx.x, start, end); Mergesort(array, start, end); } int main(int argc, char** argv) { if (strcmp("-t", argv[1]) != 0) { printf("invalid command line agrument\n"); return 0; } size = atoi(argv[2]); //variables for size and number of threads, blocks etc. int BlockNum; if (size <= 10000) { BlockNum = 1; } else { BlockNum = size / 10000; } int Range=10; int TotalThread = size/Range; int ThreadSize = TotalThread/BlockNum; //variables for pivots int pivots[TotalThread + 1]; float pivots_value[TotalThread + 1]; struct timeval tv1, tv2; float array[size]; printf("the size is %d", size); int i, j, k; //cuda device variables float *dev_array, *dev_pivots; cudaMalloc((void **) &dev_array, size * sizeof (float)); cudaMalloc((void **) &dev_pivots, (TotalThread + 1) * sizeof (float)); random_number_generator_normal(array, size, size); // for (i = 0; i < size; i++) { // printf("%d-->> %f\t", i, array[i]); // } //find 25th element gettimeofday(&tv1, NULL); for (i = 0; i < TotalThread; i++) { pivots[i] = TotalThread * (i); } pivots[i] = size; pivots_value[0] = 0; pivots_value[i] = array[size - 1]; for (i = 1; i < TotalThread; i++) { pivots_value[i] = quickSelect(array, pivots[i - 1], size - 1, (pivots[i])); printf("is the %d largest value %f\n", pivots[i], pivots_value[i]); //find 5th largest element } /* MergeSort here. * */ cudaMemcpy(dev_array, array, size * sizeof (float), cudaMemcpyHostToDevice); cudaMemcpy(dev_pivots, pivots, (TotalThread + 1) * sizeof (float), cudaMemcpyHostToDevice); cudaBucketSort << <BlockNum, ThreadSize>>>(dev_array, dev_pivots); cudaMemcpy(array, dev_array, size * sizeof (float), cudaMemcpyDeviceToHost); //cuda function here. printf("\nmergesort starts \n"); gettimeofday(&tv2, NULL); //sort(array); printf("sorted list is \n"); for (i = 0; i < size; i++) { printf("%d-->> %f\n", i, array[i]); } printf("\nTotal time = %f seconds\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); return 0; }
e3a366781a06cdb0c7195fb3a2f950fe4ac6d16b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_tea_leaf_axpby_kernel [4][1]; static int dims_tea_leaf_axpby_kernel_h [4][1] = {0}; //user function __device__ void tea_leaf_axpby_kernel_gpu(ACC<double> & u, const ACC<double> & p, const double * alpha, const double * beta) { u(0,0) = (*alpha) * u(0,0) + (*beta)*p(0,0); } __global__ void ops_tea_leaf_axpby_kernel( double* __restrict arg0, double* __restrict arg1, const double arg2, const double arg3, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_axpby_kernel[0][0]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_axpby_kernel[1][0]; if (idx_x < size0 && idx_y < size1) { ACC<double> argp0(dims_tea_leaf_axpby_kernel[0][0], arg0); const ACC<double> argp1(dims_tea_leaf_axpby_kernel[1][0], arg1); tea_leaf_axpby_kernel_gpu(argp0, argp1, &arg2, &arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_tea_leaf_axpby_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_tea_leaf_axpby_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,27)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(27,"tea_leaf_axpby_kernel"); OPS_kernels[27].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[2]; #endif #ifdef OPS_MPI if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; if (xdim0 != dims_tea_leaf_axpby_kernel_h[0][0] || xdim1 != dims_tea_leaf_axpby_kernel_h[1][0]) { dims_tea_leaf_axpby_kernel_h[0][0] = xdim0; dims_tea_leaf_axpby_kernel_h[1][0] = xdim1; cutilSafeCall(hipMemcpyToSymbol( dims_tea_leaf_axpby_kernel, dims_tea_leaf_axpby_kernel_h, sizeof(dims_tea_leaf_axpby_kernel))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[27].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) hipLaunchKernelGGL(( ops_tea_leaf_axpby_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], *(double *)arg2.data, *(double *)arg3.data,x_size, y_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[27].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[27].mpi_time += t2-t1; OPS_kernels[27].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[27].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_tea_leaf_axpby_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 27; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 27; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(1*sizeof(double)); memcpy(tmp, arg2.data,1*sizeof(double)); desc->args[2].data = tmp; desc->args[3] = arg3; tmp = (char*)malloc(1*sizeof(double)); memcpy(tmp, arg3.data,1*sizeof(double)); desc->args[3].data = tmp; desc->function = ops_par_loop_tea_leaf_axpby_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(27,"tea_leaf_axpby_kernel"); } ops_enqueue_kernel(desc); } #endif
e3a366781a06cdb0c7195fb3a2f950fe4ac6d16b.cu
// // auto-generated by ops.py // __constant__ int dims_tea_leaf_axpby_kernel [4][1]; static int dims_tea_leaf_axpby_kernel_h [4][1] = {0}; //user function __device__ void tea_leaf_axpby_kernel_gpu(ACC<double> & u, const ACC<double> & p, const double * alpha, const double * beta) { u(0,0) = (*alpha) * u(0,0) + (*beta)*p(0,0); } __global__ void ops_tea_leaf_axpby_kernel( double* __restrict arg0, double* __restrict arg1, const double arg2, const double arg3, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_axpby_kernel[0][0]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_axpby_kernel[1][0]; if (idx_x < size0 && idx_y < size1) { ACC<double> argp0(dims_tea_leaf_axpby_kernel[0][0], arg0); const ACC<double> argp1(dims_tea_leaf_axpby_kernel[1][0], arg1); tea_leaf_axpby_kernel_gpu(argp0, argp1, &arg2, &arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_tea_leaf_axpby_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_tea_leaf_axpby_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,27)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(27,"tea_leaf_axpby_kernel"); OPS_kernels[27].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[2]; #endif #ifdef OPS_MPI if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; if (xdim0 != dims_tea_leaf_axpby_kernel_h[0][0] || xdim1 != dims_tea_leaf_axpby_kernel_h[1][0]) { dims_tea_leaf_axpby_kernel_h[0][0] = xdim0; dims_tea_leaf_axpby_kernel_h[1][0] = xdim1; cutilSafeCall(cudaMemcpyToSymbol( dims_tea_leaf_axpby_kernel, dims_tea_leaf_axpby_kernel_h, sizeof(dims_tea_leaf_axpby_kernel))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[27].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) ops_tea_leaf_axpby_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], *(double *)arg2.data, *(double *)arg3.data,x_size, y_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[27].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[27].mpi_time += t2-t1; OPS_kernels[27].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[27].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_tea_leaf_axpby_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 27; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 27; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(1*sizeof(double)); memcpy(tmp, arg2.data,1*sizeof(double)); desc->args[2].data = tmp; desc->args[3] = arg3; tmp = (char*)malloc(1*sizeof(double)); memcpy(tmp, arg3.data,1*sizeof(double)); desc->args[3].data = tmp; desc->function = ops_par_loop_tea_leaf_axpby_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(27,"tea_leaf_axpby_kernel"); } ops_enqueue_kernel(desc); } #endif
77297e3928c5b6974463dcb9b2f890a338e8b380.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Modifications Copyright (c) Microsoft. */ // The code below is mostly copied from Pytorch PersistentSoftmax.cuh #include "hip/hip_runtime.h" #include "core/providers/rocm/cu_inc/common.cuh" #include "core/providers/rocm/math/softmax_warpwise_impl.cuh" #include "core/providers/rocm/math/softmax_blockwise_impl.cuh" #include "core/providers/rocm/math/softmax.h" #include <limits> namespace onnxruntime { namespace rocm { template <typename input_t, typename output_t, typename acc_t, bool is_log_softmax> void dispatch_warpwise_softmax_forward(hipStream_t stream, output_t* dst, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count) { if (softmax_elements == 0) { return; } else { int log2_elements = log2_ceil(softmax_elements); const int next_power_of_two = 1 << log2_elements; // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. int warp_size = (next_power_of_two < GPU_WARP_SIZE_HOST) ? next_power_of_two : GPU_WARP_SIZE_HOST; // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. int batches_per_warp = 1; // use 256 threads per block to maximimize gpu utilization constexpr int threads_per_block = 256; int warps_per_block = (threads_per_block / warp_size); int batches_per_block = warps_per_block * batches_per_warp; int blocks = (batch_count + batches_per_block - 1) / batches_per_block; dim3 threads(warp_size, warps_per_block, 1); // Launch code would be more elegant if C++ supported FOR CONSTEXPR switch (log2_elements) { case 0: // 1 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 0, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 1: // 2 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 1, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 2: // 4 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 2, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 3: // 8 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 3, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 4: // 16 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 4, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 5: // 32 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 5, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 6: // 64 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 6, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 7: // 128 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 7, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 8: // 256 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 8, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 9: // 512 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 9, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 10: // 1024 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 10, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; default: break; } } } #define SPECIALIZED_SOFTMAX_IMPL(input_t, output_t, acc_t) \ template void dispatch_warpwise_softmax_forward<input_t, output_t, acc_t, false>(hipStream_t stream, output_t * dst, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count); \ template void dispatch_warpwise_softmax_forward<input_t, output_t, acc_t, true>(hipStream_t stream, output_t * dst, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count); SPECIALIZED_SOFTMAX_IMPL(float, float, float) SPECIALIZED_SOFTMAX_IMPL(half, half, float) SPECIALIZED_SOFTMAX_IMPL(double, double, double) SPECIALIZED_SOFTMAX_IMPL(BFloat16, BFloat16, float) template <typename input_t, typename output_t, typename acc_t, bool is_log_softmax> void dispatch_blockwise_softmax_forward(hipStream_t stream, output_t* output, const input_t* input, int softmax_elements, int softmax_elements_stride, int batch_count) { dim3 grid(batch_count); constexpr int ILP = sizeof(float4) / sizeof(input_t); dim3 block = SoftMax_getBlockSize(ILP, softmax_elements); if (is_log_softmax) { hipLaunchKernelGGL(( softmax_block_forward<ILP, input_t, acc_t, output_t, LogSoftMaxForwardEpilogue>) , dim3(grid), dim3(block), block.x * sizeof(acc_t), stream, output, const_cast<input_t*>(input), softmax_elements); } else { hipLaunchKernelGGL(( softmax_block_forward<ILP, input_t, acc_t, output_t, SoftMaxForwardEpilogue>) , dim3(grid), dim3(block), block.x * sizeof(acc_t), stream, output, const_cast<input_t*>(input), softmax_elements); } } #define SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(input_t, output_t, acc_t) \ template void dispatch_blockwise_softmax_forward<input_t, output_t, acc_t, false>(hipStream_t stream, output_t* output, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count); \ template void dispatch_blockwise_softmax_forward<input_t, output_t, acc_t, true>(hipStream_t stream, output_t* output, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count); SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(float, float, float) SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(half, half, float) SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(double, double, double) SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(BFloat16, BFloat16, float) } }
77297e3928c5b6974463dcb9b2f890a338e8b380.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Modifications Copyright (c) Microsoft. */ // The code below is mostly copied from Pytorch PersistentSoftmax.cuh #include "hip/hip_runtime.h" #include "core/providers/rocm/cu_inc/common.cuh" #include "core/providers/rocm/math/softmax_warpwise_impl.cuh" #include "core/providers/rocm/math/softmax_blockwise_impl.cuh" #include "core/providers/rocm/math/softmax.h" #include <limits> namespace onnxruntime { namespace rocm { template <typename input_t, typename output_t, typename acc_t, bool is_log_softmax> void dispatch_warpwise_softmax_forward(hipStream_t stream, output_t* dst, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count) { if (softmax_elements == 0) { return; } else { int log2_elements = log2_ceil(softmax_elements); const int next_power_of_two = 1 << log2_elements; // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. int warp_size = (next_power_of_two < GPU_WARP_SIZE_HOST) ? next_power_of_two : GPU_WARP_SIZE_HOST; // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. int batches_per_warp = 1; // use 256 threads per block to maximimize gpu utilization constexpr int threads_per_block = 256; int warps_per_block = (threads_per_block / warp_size); int batches_per_block = warps_per_block * batches_per_warp; int blocks = (batch_count + batches_per_block - 1) / batches_per_block; dim3 threads(warp_size, warps_per_block, 1); // Launch code would be more elegant if C++ supported FOR CONSTEXPR switch (log2_elements) { case 0: // 1 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 0, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 1: // 2 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 1, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 2: // 4 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 2, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 3: // 8 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 3, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 4: // 16 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 4, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 5: // 32 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 5, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 6: // 64 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 6, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 7: // 128 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 7, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 8: // 256 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 8, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 9: // 512 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 9, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; case 10: // 1024 hipLaunchKernelGGL(HIP_KERNEL_NAME(softmax_warp_forward<input_t, output_t, acc_t, 10, is_log_softmax>), dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements); break; default: break; } } } #define SPECIALIZED_SOFTMAX_IMPL(input_t, output_t, acc_t) \ template void dispatch_warpwise_softmax_forward<input_t, output_t, acc_t, false>(hipStream_t stream, output_t * dst, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count); \ template void dispatch_warpwise_softmax_forward<input_t, output_t, acc_t, true>(hipStream_t stream, output_t * dst, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count); SPECIALIZED_SOFTMAX_IMPL(float, float, float) SPECIALIZED_SOFTMAX_IMPL(half, half, float) SPECIALIZED_SOFTMAX_IMPL(double, double, double) SPECIALIZED_SOFTMAX_IMPL(BFloat16, BFloat16, float) template <typename input_t, typename output_t, typename acc_t, bool is_log_softmax> void dispatch_blockwise_softmax_forward(hipStream_t stream, output_t* output, const input_t* input, int softmax_elements, int softmax_elements_stride, int batch_count) { dim3 grid(batch_count); constexpr int ILP = sizeof(float4) / sizeof(input_t); dim3 block = SoftMax_getBlockSize(ILP, softmax_elements); if (is_log_softmax) { softmax_block_forward<ILP, input_t, acc_t, output_t, LogSoftMaxForwardEpilogue> <<<grid, block, block.x * sizeof(acc_t), stream>>>(output, const_cast<input_t*>(input), softmax_elements); } else { softmax_block_forward<ILP, input_t, acc_t, output_t, SoftMaxForwardEpilogue> <<<grid, block, block.x * sizeof(acc_t), stream>>>(output, const_cast<input_t*>(input), softmax_elements); } } #define SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(input_t, output_t, acc_t) \ template void dispatch_blockwise_softmax_forward<input_t, output_t, acc_t, false>(hipStream_t stream, output_t* output, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count); \ template void dispatch_blockwise_softmax_forward<input_t, output_t, acc_t, true>(hipStream_t stream, output_t* output, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count); SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(float, float, float) SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(half, half, float) SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(double, double, double) SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(BFloat16, BFloat16, float) } }
80a1c8024af4a0dbfbc1a1fe3ec1fd798df90eb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Original source reproduced unmodified here from: // https://github.com/olcf/vector_addition_tutorials/blob/master/CUDA/vecAdd.cu #include <algorithm> #include <iostream> #include <vector> #include <CL/sycl.hpp> #include <CL/sycl/backend/cuda.hpp> /** * The CUDA device selector is reused from the previous exercise */ class CUDADeviceSelector : public sycl::device_selector { public: int operator()(const sycl::device &device) const override { if (device.get_platform().get_backend() == sycl::backend::cuda) return 1; else return -1; } }; /** * A simple CUDA vector addition kernel is provided, which we will later call * from within a SYCL command group. Each thread takes care of one element of c */ __global__ void vecAdd(double *a, double *b, double *c, int n) { // Get our global thread ID int id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds if (id < n) { c[id] = a[id] + b[id]; } } int main(int argc, char *argv[]) { /** * Exercise: * * Implement an interop_task that calls the "vecAdd" CUDA kernel defined above * * The queue and buffers are already constructed and initialized. * */ sycl::device myCUDADevice{CUDADeviceSelector().select_device()}; sycl::context myContext{myCUDADevice}; sycl::queue myQueue{myContext, myCUDADevice}; // Size of vectors int N = 10000; { sycl::buffer<double> bA{sycl::range<1>(N)}; sycl::buffer<double> bB{sycl::range<1>(N)}; sycl::buffer<double> bC{sycl::range<1>(N)}; { auto hA = bA.get_access<sycl::access::mode::write>(); auto hB = bB.get_access<sycl::access::mode::write>(); // Initialize vectors on host for (int i = 0; i < N; i++) { hA[i] = sin(i) * sin(i); hB[i] = cos(i) * cos(i); } } myQueue.submit([&](sycl::handler &h) { /** * Exercise */ auto accA = bA.get_access<sycl::access::mode::read>(h); auto accB = bB.get_access<sycl::access::mode::read>(h); auto accC = bC.get_access<sycl::access::mode::write>(h); h.interop_task([=](sycl::interop_handler ih) { auto dA = reinterpret_cast<double *>(ih.get_mem<sycl::backend::cuda>(accA)); auto dB = reinterpret_cast<double *>(ih.get_mem<sycl::backend::cuda>(accB)); auto dC = reinterpret_cast<double *>(ih.get_mem<sycl::backend::cuda>(accC)); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = static_cast<int>(ceil(static_cast<float>(N) / blockSize)); // Call the CUDA kernel directly from SYCL hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, dA, dB, dC, N); }); }); myQueue.wait_and_throw(); /** * Verification code provided */ { auto hC = bC.get_access<sycl::access::mode::read>(); // Sum up vector c and print result divided by n, this should equal 1 // within error double sum = 0; for (int i = 0; i < N; i++) { sum += hC[i]; } std::cout << "Final result " << sum / N << " : Expected result 1" << std::endl; } } return 0; }
80a1c8024af4a0dbfbc1a1fe3ec1fd798df90eb9.cu
// Original source reproduced unmodified here from: // https://github.com/olcf/vector_addition_tutorials/blob/master/CUDA/vecAdd.cu #include <algorithm> #include <iostream> #include <vector> #include <CL/sycl.hpp> #include <CL/sycl/backend/cuda.hpp> /** * The CUDA device selector is reused from the previous exercise */ class CUDADeviceSelector : public sycl::device_selector { public: int operator()(const sycl::device &device) const override { if (device.get_platform().get_backend() == sycl::backend::cuda) return 1; else return -1; } }; /** * A simple CUDA vector addition kernel is provided, which we will later call * from within a SYCL command group. Each thread takes care of one element of c */ __global__ void vecAdd(double *a, double *b, double *c, int n) { // Get our global thread ID int id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds if (id < n) { c[id] = a[id] + b[id]; } } int main(int argc, char *argv[]) { /** * Exercise: * * Implement an interop_task that calls the "vecAdd" CUDA kernel defined above * * The queue and buffers are already constructed and initialized. * */ sycl::device myCUDADevice{CUDADeviceSelector().select_device()}; sycl::context myContext{myCUDADevice}; sycl::queue myQueue{myContext, myCUDADevice}; // Size of vectors int N = 10000; { sycl::buffer<double> bA{sycl::range<1>(N)}; sycl::buffer<double> bB{sycl::range<1>(N)}; sycl::buffer<double> bC{sycl::range<1>(N)}; { auto hA = bA.get_access<sycl::access::mode::write>(); auto hB = bB.get_access<sycl::access::mode::write>(); // Initialize vectors on host for (int i = 0; i < N; i++) { hA[i] = sin(i) * sin(i); hB[i] = cos(i) * cos(i); } } myQueue.submit([&](sycl::handler &h) { /** * Exercise */ auto accA = bA.get_access<sycl::access::mode::read>(h); auto accB = bB.get_access<sycl::access::mode::read>(h); auto accC = bC.get_access<sycl::access::mode::write>(h); h.interop_task([=](sycl::interop_handler ih) { auto dA = reinterpret_cast<double *>(ih.get_mem<sycl::backend::cuda>(accA)); auto dB = reinterpret_cast<double *>(ih.get_mem<sycl::backend::cuda>(accB)); auto dC = reinterpret_cast<double *>(ih.get_mem<sycl::backend::cuda>(accC)); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = static_cast<int>(ceil(static_cast<float>(N) / blockSize)); // Call the CUDA kernel directly from SYCL vecAdd<<<gridSize, blockSize>>>(dA, dB, dC, N); }); }); myQueue.wait_and_throw(); /** * Verification code provided */ { auto hC = bC.get_access<sycl::access::mode::read>(); // Sum up vector c and print result divided by n, this should equal 1 // within error double sum = 0; for (int i = 0; i < N; i++) { sum += hC[i]; } std::cout << "Final result " << sum / N << " : Expected result 1" << std::endl; } } return 0; }
a99e364c277e5abdc334a46d317e4ce5d52890ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Prerequisites.cuh" namespace gtom { //////////////////////////// //CUDA kernel declarations// //////////////////////////// template<int mode, int ndims> __global__ void WindowMaskKernel(tfloat* d_input, tfloat* d_output, int3 dims, tfloat radius, tfloat3 center, int batch); template<int mode, int ndims> __global__ void WindowMaskBorderDistanceKernel(tfloat* d_input, tfloat* d_output, int3 dims, int falloff, int batch); //////////////// //Host methods// //////////////// void d_HannMask(tfloat* d_input, tfloat* d_output, int3 dims, tfloat* radius, tfloat3* center, int batch) { tfloat _radius = radius != NULL ? *radius : tmin(dims.z > 1 ? tmin(dims.x, dims.z) : dims.x, dims.y) / 2 - 1; tfloat3 _center = center != NULL ? *center : tfloat3(dims.x / 2, dims.y / 2, dims.z / 2); int TpB = tmin(NextMultipleOf(dims.x, 32), 256); dim3 grid = dim3(dims.y, dims.z, 1); if (DimensionCount(dims) == 1) WindowMaskKernel<0, 1> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); if (DimensionCount(dims) == 2) WindowMaskKernel<0, 2> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); if (DimensionCount(dims) == 3) WindowMaskKernel<0, 3> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); } void d_HammingMask(tfloat* d_input, tfloat* d_output, int3 dims, tfloat* radius, tfloat3* center, int batch) { tfloat _radius = radius != NULL ? *radius : tmin(dims.z > 1 ? tmin(dims.x, dims.z) : dims.x, dims.y) / 2 - 1; tfloat3 _center = center != NULL ? *center : tfloat3(dims.x / 2, dims.y / 2, dims.z / 2); int TpB = tmin(NextMultipleOf(dims.x, 32), 256); dim3 grid = dim3(dims.y, dims.z, 1); if (DimensionCount(dims) == 1) WindowMaskKernel<1, 1> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); if (DimensionCount(dims) == 2) WindowMaskKernel<1, 2> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); if (DimensionCount(dims) == 3) WindowMaskKernel<1, 3> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); } void d_GaussianMask(tfloat* d_input, tfloat* d_output, int3 dims, tfloat* sigma, tfloat3* center, int batch) { tfloat _sigma = sigma != NULL ? *sigma : (tfloat)1; tfloat3 _center = center != NULL ? *center : tfloat3(dims.x / 2, dims.y / 2, dims.z / 2); int TpB = tmin(NextMultipleOf(dims.x, 32), 256); dim3 grid = dim3(dims.y, dims.z, 1); if (DimensionCount(dims) == 1) WindowMaskKernel<2, 1> << <grid, TpB >> > (d_input, d_output, dims, (tfloat)2 * _sigma * _sigma, _center, batch); if (DimensionCount(dims) == 2) WindowMaskKernel<2, 2> << <grid, TpB >> > (d_input, d_output, dims, (tfloat)2 * _sigma * _sigma, _center, batch); if (DimensionCount(dims) == 3) WindowMaskKernel<2, 3> << <grid, TpB >> > (d_input, d_output, dims, (tfloat)2 * _sigma * _sigma, _center, batch); } void d_HannMaskBorderDistance(tfloat* d_input, tfloat* d_output, int3 dims, int falloff, int batch) { int TpB = tmin(NextMultipleOf(dims.x, 32), 256); dim3 grid = dim3(dims.y, dims.z, 1); if (DimensionCount(dims) == 1) WindowMaskBorderDistanceKernel<0, 1> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); if (DimensionCount(dims) == 2) WindowMaskBorderDistanceKernel<0, 2> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); if (DimensionCount(dims) == 3) WindowMaskBorderDistanceKernel<0, 3> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); } void d_HammingMaskBorderDistance(tfloat* d_input, tfloat* d_output, int3 dims, int falloff, int batch) { int TpB = tmin(NextMultipleOf(dims.x, 32), 256); dim3 grid = dim3(dims.y, dims.z, 1); if (DimensionCount(dims) == 1) WindowMaskBorderDistanceKernel<1, 1> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); if (DimensionCount(dims) == 2) WindowMaskBorderDistanceKernel<1, 2> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); if (DimensionCount(dims) == 3) WindowMaskBorderDistanceKernel<1, 3> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); } //////////////// //CUDA kernels// //////////////// template<int mode, int ndims> __global__ void WindowMaskKernel(tfloat* d_input, tfloat* d_output, int3 dims, tfloat radius, tfloat3 center, int batch) { tfloat xsq, ysq, zsq, length; if (ndims > 1) { ysq = (tfloat)blockIdx.x - center.y; ysq *= ysq; } else ysq = 0; if (ndims > 2) { zsq = (tfloat)blockIdx.y - center.z; zsq *= zsq; } else zsq = 0; for (int x = threadIdx.x; x < dims.x; x += blockDim.x) { xsq = (tfloat)x - center.x; xsq *= xsq; length = sqrt(xsq + ysq + zsq); tfloat val = 0; //Hann if (mode == 0) val = (tfloat)0.5 * ((tfloat)1 + cos(min(length / radius, (tfloat)1) * PI)); //Hamming else if (mode == 1) val = (tfloat)0.54 - (tfloat)0.46 * cos(((tfloat)1 - min(length / radius, (tfloat)1)) * PI); //Gaussian else if (mode == 2) val = exp(-(pow(length, (tfloat)2) / radius)); for (int b = 0; b < batch; b++) { if (ndims > 2) d_output[Elements(dims) * b + (blockIdx.y * dims.y + blockIdx.x) * dims.x + x] = val * d_input[Elements(dims) * b + (blockIdx.y * dims.y + blockIdx.x) * dims.x + x]; else d_output[Elements(dims) * b + blockIdx.x * dims.x + x] = val * d_input[Elements(dims) * b + blockIdx.x * dims.x + x]; } } } template<int mode, int ndims> __global__ void WindowMaskBorderDistanceKernel(tfloat* d_input, tfloat* d_output, int3 dims, int falloff, int batch) { int distx = 0, disty = 0, distz = 0; if (ndims > 1) { int y = blockIdx.x; int fromtop = max(0, falloff - y); int frombottom = max(0, falloff - (dims.y - 1 - y)); disty = max(fromtop, frombottom); } if (ndims > 2) { int z = blockIdx.y; int fromback = max(0, falloff - z); int fromfront = max(0, falloff - (dims.z - 1 - z)); distz = max(fromback, fromfront); } for (int idx = threadIdx.x; idx < dims.x; idx += blockDim.x) { int fromleft = max(0, falloff - idx); int fromright = max(0, falloff - (dims.x - 1 - idx)); distx = max(fromleft, fromright); float dist; if (ndims == 3) dist = sqrt(float(distx * distx + disty * disty + distz * distz)); else if (ndims == 2) dist = sqrt(float(distx * distx + disty * disty)); else dist = (float)distx; tfloat val = 0; //Hann if (mode == 0) { val = 0.5f * (1.0f + cos(min(dist / (float)falloff, 1.0f) * PI)); } //Hamming else if (mode == 1) { val = 0.54f - 0.46f * cos((1.0f - min(dist / (float)falloff, 1.0f)) * PI); } for (int b = 0; b < batch; b++) { if (ndims == 3) d_output[Elements(dims) * b + (blockIdx.y * dims.y + blockIdx.x) * dims.x + idx] = val * d_input[Elements(dims) * b + (blockIdx.y * dims.y + blockIdx.x) * dims.x + idx]; else d_output[Elements(dims) * b + blockIdx.x * dims.x + idx] = val * d_input[Elements(dims) * b + blockIdx.x * dims.x + idx]; } } } }
a99e364c277e5abdc334a46d317e4ce5d52890ca.cu
#include "Prerequisites.cuh" namespace gtom { //////////////////////////// //CUDA kernel declarations// //////////////////////////// template<int mode, int ndims> __global__ void WindowMaskKernel(tfloat* d_input, tfloat* d_output, int3 dims, tfloat radius, tfloat3 center, int batch); template<int mode, int ndims> __global__ void WindowMaskBorderDistanceKernel(tfloat* d_input, tfloat* d_output, int3 dims, int falloff, int batch); //////////////// //Host methods// //////////////// void d_HannMask(tfloat* d_input, tfloat* d_output, int3 dims, tfloat* radius, tfloat3* center, int batch) { tfloat _radius = radius != NULL ? *radius : tmin(dims.z > 1 ? tmin(dims.x, dims.z) : dims.x, dims.y) / 2 - 1; tfloat3 _center = center != NULL ? *center : tfloat3(dims.x / 2, dims.y / 2, dims.z / 2); int TpB = tmin(NextMultipleOf(dims.x, 32), 256); dim3 grid = dim3(dims.y, dims.z, 1); if (DimensionCount(dims) == 1) WindowMaskKernel<0, 1> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); if (DimensionCount(dims) == 2) WindowMaskKernel<0, 2> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); if (DimensionCount(dims) == 3) WindowMaskKernel<0, 3> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); } void d_HammingMask(tfloat* d_input, tfloat* d_output, int3 dims, tfloat* radius, tfloat3* center, int batch) { tfloat _radius = radius != NULL ? *radius : tmin(dims.z > 1 ? tmin(dims.x, dims.z) : dims.x, dims.y) / 2 - 1; tfloat3 _center = center != NULL ? *center : tfloat3(dims.x / 2, dims.y / 2, dims.z / 2); int TpB = tmin(NextMultipleOf(dims.x, 32), 256); dim3 grid = dim3(dims.y, dims.z, 1); if (DimensionCount(dims) == 1) WindowMaskKernel<1, 1> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); if (DimensionCount(dims) == 2) WindowMaskKernel<1, 2> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); if (DimensionCount(dims) == 3) WindowMaskKernel<1, 3> << <grid, TpB >> > (d_input, d_output, dims, _radius, _center, batch); } void d_GaussianMask(tfloat* d_input, tfloat* d_output, int3 dims, tfloat* sigma, tfloat3* center, int batch) { tfloat _sigma = sigma != NULL ? *sigma : (tfloat)1; tfloat3 _center = center != NULL ? *center : tfloat3(dims.x / 2, dims.y / 2, dims.z / 2); int TpB = tmin(NextMultipleOf(dims.x, 32), 256); dim3 grid = dim3(dims.y, dims.z, 1); if (DimensionCount(dims) == 1) WindowMaskKernel<2, 1> << <grid, TpB >> > (d_input, d_output, dims, (tfloat)2 * _sigma * _sigma, _center, batch); if (DimensionCount(dims) == 2) WindowMaskKernel<2, 2> << <grid, TpB >> > (d_input, d_output, dims, (tfloat)2 * _sigma * _sigma, _center, batch); if (DimensionCount(dims) == 3) WindowMaskKernel<2, 3> << <grid, TpB >> > (d_input, d_output, dims, (tfloat)2 * _sigma * _sigma, _center, batch); } void d_HannMaskBorderDistance(tfloat* d_input, tfloat* d_output, int3 dims, int falloff, int batch) { int TpB = tmin(NextMultipleOf(dims.x, 32), 256); dim3 grid = dim3(dims.y, dims.z, 1); if (DimensionCount(dims) == 1) WindowMaskBorderDistanceKernel<0, 1> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); if (DimensionCount(dims) == 2) WindowMaskBorderDistanceKernel<0, 2> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); if (DimensionCount(dims) == 3) WindowMaskBorderDistanceKernel<0, 3> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); } void d_HammingMaskBorderDistance(tfloat* d_input, tfloat* d_output, int3 dims, int falloff, int batch) { int TpB = tmin(NextMultipleOf(dims.x, 32), 256); dim3 grid = dim3(dims.y, dims.z, 1); if (DimensionCount(dims) == 1) WindowMaskBorderDistanceKernel<1, 1> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); if (DimensionCount(dims) == 2) WindowMaskBorderDistanceKernel<1, 2> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); if (DimensionCount(dims) == 3) WindowMaskBorderDistanceKernel<1, 3> << <grid, TpB >> > (d_input, d_output, dims, falloff, batch); } //////////////// //CUDA kernels// //////////////// template<int mode, int ndims> __global__ void WindowMaskKernel(tfloat* d_input, tfloat* d_output, int3 dims, tfloat radius, tfloat3 center, int batch) { tfloat xsq, ysq, zsq, length; if (ndims > 1) { ysq = (tfloat)blockIdx.x - center.y; ysq *= ysq; } else ysq = 0; if (ndims > 2) { zsq = (tfloat)blockIdx.y - center.z; zsq *= zsq; } else zsq = 0; for (int x = threadIdx.x; x < dims.x; x += blockDim.x) { xsq = (tfloat)x - center.x; xsq *= xsq; length = sqrt(xsq + ysq + zsq); tfloat val = 0; //Hann if (mode == 0) val = (tfloat)0.5 * ((tfloat)1 + cos(min(length / radius, (tfloat)1) * PI)); //Hamming else if (mode == 1) val = (tfloat)0.54 - (tfloat)0.46 * cos(((tfloat)1 - min(length / radius, (tfloat)1)) * PI); //Gaussian else if (mode == 2) val = exp(-(pow(length, (tfloat)2) / radius)); for (int b = 0; b < batch; b++) { if (ndims > 2) d_output[Elements(dims) * b + (blockIdx.y * dims.y + blockIdx.x) * dims.x + x] = val * d_input[Elements(dims) * b + (blockIdx.y * dims.y + blockIdx.x) * dims.x + x]; else d_output[Elements(dims) * b + blockIdx.x * dims.x + x] = val * d_input[Elements(dims) * b + blockIdx.x * dims.x + x]; } } } template<int mode, int ndims> __global__ void WindowMaskBorderDistanceKernel(tfloat* d_input, tfloat* d_output, int3 dims, int falloff, int batch) { int distx = 0, disty = 0, distz = 0; if (ndims > 1) { int y = blockIdx.x; int fromtop = max(0, falloff - y); int frombottom = max(0, falloff - (dims.y - 1 - y)); disty = max(fromtop, frombottom); } if (ndims > 2) { int z = blockIdx.y; int fromback = max(0, falloff - z); int fromfront = max(0, falloff - (dims.z - 1 - z)); distz = max(fromback, fromfront); } for (int idx = threadIdx.x; idx < dims.x; idx += blockDim.x) { int fromleft = max(0, falloff - idx); int fromright = max(0, falloff - (dims.x - 1 - idx)); distx = max(fromleft, fromright); float dist; if (ndims == 3) dist = sqrt(float(distx * distx + disty * disty + distz * distz)); else if (ndims == 2) dist = sqrt(float(distx * distx + disty * disty)); else dist = (float)distx; tfloat val = 0; //Hann if (mode == 0) { val = 0.5f * (1.0f + cos(min(dist / (float)falloff, 1.0f) * PI)); } //Hamming else if (mode == 1) { val = 0.54f - 0.46f * cos((1.0f - min(dist / (float)falloff, 1.0f)) * PI); } for (int b = 0; b < batch; b++) { if (ndims == 3) d_output[Elements(dims) * b + (blockIdx.y * dims.y + blockIdx.x) * dims.x + idx] = val * d_input[Elements(dims) * b + (blockIdx.y * dims.y + blockIdx.x) * dims.x + idx]; else d_output[Elements(dims) * b + blockIdx.x * dims.x + idx] = val * d_input[Elements(dims) * b + blockIdx.x * dims.x + idx]; } } } }
8e68af641f04cd39ef3f2d4fb56399cfec7ff754.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cudaKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cudaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cudaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cudaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8e68af641f04cd39ef3f2d4fb56399cfec7ff754.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cudaKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cudaKernel<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cudaKernel<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cudaKernel<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
90d146bd766d78d7604ef2c3f7733b791365b725.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudapars.h" #include "paramssteeringtest1.h" ///////////////////////////////////// // standard imports ///////////////////////////////////// #include <stdio.h> #include <math.h> #include "step.h" ///////////////////////////////////// // kernel function (CUDA device) ///////////////////////////////////// __device__ __host__ int encode (struct params *dp,int ix, int iy) { //int kSizeX=(dp)->ni; //int kSizeY=(dp)->nj; return ( iy * ((dp)->ni) + ix); } __device__ __host__ int fencode (struct params *dp,int ix, int iy, int field) { //int kSizeX=(dp)->ni; //int kSizeY=(dp)->nj; return ( (iy * ((dp)->ni) + ix)+(field*((dp)->ni)*((dp)->nj))); } __device__ __host__ float evalgrad(float fi, float fim1, float fip2, float fim2,struct params *p,int dir) { //float valgrad; if(dir == 0) { //valgrad=(2.0/(3.0*(p->dx)))*(fi-fim1)-(1.0/(12.0*(p->dx)))*(fip2-fim2); return((1.0/(1.0*(p->dx)))*(fi-fim1)); } else if(dir == 1) { // valgrad=(2.0/(3.0*(p->dy)))*(fi-fim1)-(1.0/(12.0*(p->dy)))*(fip2-fim2); return((1.0/(1.0*(p->dy)))*(fi-fim1)); } return -1; } __device__ __host__ float grad(float *wmod,struct params *p,int i,int j,int field,int dir) { //float valgrad; if(dir == 0) { // valgrad=(2.0/(3.0*(p->dx)))*(wmod[fencode(p,i,j,field)]-wmod[fencode(p,i-1,j,field)])-(1.0/(12.0*(p->dx)))*(wmod[fencode(p,i+2,j,field)]-wmod[fencode(p,i-2,j,field)]); return((1.0/(1.0*(p->dx)))*(wmod[fencode(p,i+1,j,field)]-wmod[fencode(p,i-1,j,field)])); } else if(dir == 1) { // valgrad=(2.0/(3.0*(p->dy)))*(wmod[fencode(p,i,j,field)]-wmod[fencode(p,i,j-1,field)])-(1.0/(12.0*(p->dy)))*(wmod[fencode(p,i,j+2,field)]-wmod[fencode(p,i,j-2,field)]); return((1.0/(1.0*(p->dy)))*(wmod[fencode(p,i,j+1,field)]-wmod[fencode(p,i,j-1,field)])); } return -1; } __device__ __host__ void computej(float *wmod,float *wd,struct params *p,int i,int j) { // int status=0; // float dbzdy, dbydz; // float dbzdx, dbxdz; // float dbydx, dbxdy; // dbzdy=grad(wmod,p,i,j,b3,1); // dbydz=0.0; // dbzdx=grad(wmod,p,i,j,b3,0); // dbxdz=0.0; // dbydx=grad(wmod,p,i,j,b2,0); // dbxdy=grad(wmod,p,i,j,b1,1); wd[fencode(p,i,j,0)]=(grad(wmod,p,i,j,b3,1))/(p->mu); wd[fencode(p,i,j,1)]=(grad(wmod,p,i,j,b3,0))/(p->mu); wd[fencode(p,i,j,2)]=(grad(wmod,p,i,j,b2,0)-grad(wmod,p,i,j,b1,1))/(p->mu); //return ( status); } __device__ __host__ void computebdotv(float *wmod,float *wd,struct params *p,int i,int j) { // int status=0; //float bsq=wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]; // wd[fencode(p,i,j,4)]= wd[fencode(p,i,j,3)]+0.5*(wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]); wd[fencode(p,i,j,bdotv)]=(wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,mom1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,mom2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,mom3)])/wmod[fencode(p,i,j,rho)]; // return ( status); } __device__ __host__ void computepk(float *wmod,float *wd,struct params *p,int i,int j) { // int status=0; //float bsq=wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]; wd[fencode(p,i,j,4)]= wd[fencode(p,i,j,3)]+0.5*(wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]); // return ( status); } __device__ __host__ void computept(float *wmod,float *wd,struct params *p,int i,int j) { //int status=0; //float momsq=wmod[fencode(p,i,j,mom1)]*wmod[fencode(p,i,j,mom1)]+wmod[fencode(p,i,j,mom2)]*wmod[fencode(p,i,j,mom2)]+wmod[fencode(p,i,j,mom3)]*wmod[fencode(p,i,j,mom3)]; //float bsq=wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]; wd[fencode(p,i,j,3)]=((p->gamma)-1)*(wmod[fencode(p,i,j,energy)]- 0.5*(wmod[fencode(p,i,j,mom1)]*wmod[fencode(p,i,j,mom1)]+wmod[fencode(p,i,j,mom2)]*wmod[fencode(p,i,j,mom2)]+wmod[fencode(p,i,j,mom3)]*wmod[fencode(p,i,j,mom3)])/wmod[fencode(p,i,j,rho)]-0.5*(wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]) ); //return ( status); } __device__ __host__ float sourcerho (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { // float src=0; // int field=rho; return 0; } __device__ __host__ float sourcemom (float *dw, float *wd, float *w, struct params *p,int ix, int iy,int field, int direction) { //float src=0; switch(direction) { case 0: return(w[fencode(p,ix,iy,rho)]*(p->g1))-grad(wd,p,ix,iy,pressuret,0); break; case 1: return(w[fencode(p,ix,iy,rho)]*(p->g2))-grad(wd,p,ix,iy,pressuret,1); break; case 2: return(w[fencode(p,ix,iy,rho)]*(p->g3))-grad(wd,p,ix,iy,pressuret,2); break; } return 0; } __device__ __host__ float sourceb (float *dw, float *wd, float *w, struct params *p,int ix, int iy,int field, int direction) { //float src=0; switch(direction) { case 0: return(p->eta)*grad(wd,p,ix,iy,current3,1); break; case 1: return -(p->eta)*grad(wd,p,ix,iy,current3,0); break; case 2: return (p->eta)*(grad(wd,p,ix,iy,current2,0)-grad(wd,p,ix,iy,current1,1)); break; } return 0; } __device__ __host__ float sourceenergy (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { // float src=0; float srcg,srcb; int field=energy; float ddcx,ddcy; float fi,fim1;//fip2,fim2; srcg=(p->g1)*w[fencode(p,ix,iy,mom1)]+(p->g2)*w[fencode(p,ix,iy,mom2)]+(p->g3)*w[fencode(p,ix,iy,mom3)]; fi=(w[fencode(p,ix+1,iy,b2)]*wd[fencode(p,ix+1,iy,current3)]-w[fencode(p,ix+1,iy,b3)]*wd[fencode(p,ix+1,iy,current2)]); fim1=(w[fencode(p,ix-1,iy,b2)]*wd[fencode(p,ix-1,iy,current3)]-w[fencode(p,ix-1,iy,b3)]*wd[fencode(p,ix-1,iy,current2)]); // fip2=(w[fencode(p,ix+2,iy,b2)]*wd[fencode(p,ix+2,iy,current3)]-w[fencode(p,ix+2,iy,b3)]*wd[fencode(p,ix+2,iy,current2)]); // fim2=(w[fencode(p,ix-2,iy,b2)]*wd[fencode(p,ix-2,iy,current3)]-w[fencode(p,ix-2,iy,b3)]*wd[fencode(p,ix-2,iy,current2)]); // ddcx=evalgrad(fi,fim1,fip2,fim2,p,0); ddcx=evalgrad(fi,fim1,0,0,p,0); fi=(w[fencode(p,ix+1,iy,b3)]*wd[fencode(p,ix+1,iy,current1)]-w[fencode(p,ix+1,iy,b1)]*wd[fencode(p,ix+1,iy,current3)]); fim1=(w[fencode(p,ix,iy-1,b3)]*wd[fencode(p,ix,iy-1,current1)]-w[fencode(p,ix,iy-1,b1)]*wd[fencode(p,ix,iy-1,current3)]); // fip2=(w[fencode(p,ix,iy+2,b3)]*wd[fencode(p,ix,iy+2,current1)]-w[fencode(p,ix,iy+2,b1)]*wd[fencode(p,ix,iy+2,current3)]); // fim2=(w[fencode(p,ix,iy-2,b3)]*wd[fencode(p,ix,iy-2,current1)]-w[fencode(p,ix,iy-2,b1)]*wd[fencode(p,ix,iy-2,current3)]); // ddcx=evalgrad(fi,fim1,fip2,fim2,p,0); ddcy=evalgrad(fi,fim1,0,0,p,1); srcb=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); // src=srcg+srcb; return ( srcg+srcb); } __device__ __host__ float ddotcurrentrho (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { float ddc=0; // int field=rho; ddc= grad(w,p,ix,iy,mom1,0)+grad(w,p,ix,iy,mom2,1); return ( isnan(ddc)?0:ddc); } __device__ __host__ float ddotcurrentmom (float *dw, float *wd, float *w, struct params *p,int ix, int iy,int field, int direction) { float ddc=0; float fi, fim1; //float fip2=0, fim2=0; float ddc1,ddc2; float ddcx,ddcy; // ddc= grad(w,p,ix,iy,mom1,0)+grad(w,p,ix,iy,mom2,1); //evalgrad(float fi, float fim1, float fip2, float fim2,struct params *p,int dir) //fi=w(fencode(p,ix,iy,rho)) //calculate momentum current //w[fencode(p,ix,iy,rho)])=1; //w[fencode(p,ix-1,iy,rho)])=1; //w[fencode(p,ix+2,iy,rho)])=1; //w[fencode(p,ix-2,iy,rho)])=1; //w[fencode(p,ix,iy,rho)])=1; //w[fencode(p,ix,iy-1,rho)])=1; //w[fencode(p,ix,iy+2,rho)])=1; //w[fencode(p,ix,iy-2,rho)])=1; switch(direction) { case 0: fi=(w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix+1,iy,rho)])*w[fencode(p,ix+1,iy,mom1)]; fim1=(w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)])*w[fencode(p,ix-1,iy,mom1)]; // fip2=(w[fencode(p,ix+2,iy,mom1)]/w[fencode(p,ix+2,iy,rho)])*w[fencode(p,ix+2,iy,mom1)]; // fim2=(w[fencode(p,ix-2,iy,mom1)]/w[fencode(p,ix-2,iy,rho)])*w[fencode(p,ix-2,iy,mom1)]; // ddcx=evalgrad(fi,fim1,fip2,fim2,p,0); ddcx=evalgrad(fi,fim1,0,0,p,0); //ddcx=fi-fim1; fi=(w[fencode(p,ix,iy+1,mom1)]/w[fencode(p,ix,iy+1,rho)])*w[fencode(p,ix,iy+1,mom2)]; fim1=(w[fencode(p,ix,iy-1,mom1)]/w[fencode(p,ix,iy-1,rho)])*w[fencode(p,ix,iy-1,mom2)]; // fip2=(w[fencode(p,ix,iy+2,mom1)]/w[fencode(p,ix,iy+2,rho)])*w[fencode(p,ix,iy+2,mom2)]; // fim2=(w[fencode(p,ix,iy-2,mom1)]/w[fencode(p,ix,iy-2,rho)])*w[fencode(p,ix,iy-2,mom2)]; //ddcy=fi; ddcy=evalgrad(fi,fim1,0,0,p,1); //ddcy=evalgrad(0,0,fip2,fim2,p,1); break; case 1: fi=(w[fencode(p,ix+1,iy,mom2)]/w[fencode(p,ix+1,iy,rho)])*w[fencode(p,ix+1,iy,mom1)]; fim1=(w[fencode(p,ix-1,iy,mom2)]/w[fencode(p,ix-1,iy,rho)])*w[fencode(p,ix-1,iy,mom1)]; // fip2=(w[fencode(p,ix+2,iy,mom2)]/w[fencode(p,ix+2,iy,rho)])*w[fencode(p,ix+2,iy,mom1)]; // fim2=(w[fencode(p,ix-2,iy,mom2)]/w[fencode(p,ix-2,iy,rho)])*w[fencode(p,ix-2,iy,mom1)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=(w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)])*w[fencode(p,ix,iy+1,mom2)]; fim1=(w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)])*w[fencode(p,ix,iy-1,mom2)]; // fip2=(w[fencode(p,ix,iy+2,mom2)]/w[fencode(p,ix,iy+2,rho)])*w[fencode(p,ix,iy+2,mom2)]; // fim2=(w[fencode(p,ix,iy-2,mom2)]/w[fencode(p,ix,iy-2,rho)])*w[fencode(p,ix,iy-2,mom2)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 2: fi=(w[fencode(p,ix+1,iy,mom3)]/w[fencode(p,ix+1,iy,rho)])*w[fencode(p,ix+1,iy,mom1)]; fim1=(w[fencode(p,ix-1,iy,mom3)]/w[fencode(p,ix-1,iy,rho)])*w[fencode(p,ix-1,iy,mom1)]; // fip2=(w[fencode(p,ix+2,iy,mom3)]/w[fencode(p,ix+2,iy,rho)])*w[fencode(p,ix+2,iy,mom1)]; // fim2=(w[fencode(p,ix-2,iy,mom3)]/w[fencode(p,ix-2,iy,rho)])*w[fencode(p,ix-2,iy,mom1)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=(w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)])*w[fencode(p,ix,iy+1,mom2)]; fim1=(w[fencode(p,ix,iy-1,mom3)]/w[fencode(p,ix,iy-1,rho)])*w[fencode(p,ix,iy-1,mom2)]; // fip2=(w[fencode(p,ix,iy+2,mom3)]/w[fencode(p,ix,iy+2,rho)])*w[fencode(p,ix,iy+2,mom2)]; // fim2=(w[fencode(p,ix,iy-2,mom3)]/w[fencode(p,ix,iy-2,rho)])*w[fencode(p,ix,iy-2,mom2)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; } ddc1=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); //fip2=0, fim2=0; //calculate bfield current switch(direction) { case 0: fi=w[fencode(p,ix+1,iy,b1)]*w[fencode(p,ix+1,iy,b1)]; fim1=w[fencode(p,ix-1,iy,b1)]*w[fencode(p,ix-1,iy,b1)]; // fip2=w[fencode(p,ix+2,iy,b1)]*w[fencode(p,ix+2,iy,b1)]; // fim2=w[fencode(p,ix-2,iy,b1)]*w[fencode(p,ix-2,iy,b1)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b1)]*w[fencode(p,ix,iy+1,b2)]; fim1=w[fencode(p,ix,iy-1,b1)]*w[fencode(p,ix,iy-1,b2)]; // fip2=w[fencode(p,ix,iy+2,b1)]*w[fencode(p,ix,iy+2,b2)]; // fim2=w[fencode(p,ix,iy-2,b1)]*w[fencode(p,ix,iy-2,b2)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 1: fi=w[fencode(p,ix+1,iy,b2)]*w[fencode(p,ix+1,iy,b1)]; fim1=w[fencode(p,ix-1,iy,b2)]*w[fencode(p,ix-1,iy,b1)]; // fip2=w[fencode(p,ix+2,iy,b2)]*w[fencode(p,ix+2,iy,b1)]; // fim2=w[fencode(p,ix-2,iy,b2)]*w[fencode(p,ix-2,iy,b1)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b2)]*w[fencode(p,ix,iy+1,b2)]; fim1=w[fencode(p,ix,iy-1,b2)]*w[fencode(p,ix,iy-1,b2)]; // fip2=w[fencode(p,ix,iy+2,b2)]*w[fencode(p,ix,iy+2,b2)]; // fim2=w[fencode(p,ix,iy-2,b2)]*w[fencode(p,ix,iy-2,b2)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 2: fi=w[fencode(p,ix+1,iy,b3)]*w[fencode(p,ix+1,iy,b1)]; fim1=w[fencode(p,ix-1,iy,b3)]*w[fencode(p,ix-1,iy,b1)]; // fip2=w[fencode(p,ix+2,iy,b3)]*w[fencode(p,ix+2,iy,b1)]; // fim2=w[fencode(p,ix-2,iy,b3)]*w[fencode(p,ix-2,iy,b1)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b3)]*w[fencode(p,ix,iy+1,b2)]; fim1=w[fencode(p,ix,iy-1,b3)]*w[fencode(p,ix,iy-1,b2)]; // fip2=w[fencode(p,ix,iy+2,b3)]*w[fencode(p,ix,iy+2,b2)]; // fim2=w[fencode(p,ix,iy-2,b3)]*w[fencode(p,ix,iy-2,b2)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; } //ddc2=ddcx+ddcy; ddc2=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); //ddc=ddc1-ddc2; return ( ddc1-ddc2); } __device__ __host__ float ddotcurrentb (float *dw, float *wd, float *w, struct params *p,int ix, int iy,int field, int direction) { //float ddc=0; float fi, fim1;// fip2=0, fim2=0; float ddc1,ddc2; float ddcx,ddcy; switch(direction) { case 0: fi=w[fencode(p,ix+1,iy,mom1)]*w[fencode(p,ix+1,iy,b1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,mom1)]*w[fencode(p,ix-1,iy,b1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,mom1)]*w[fencode(p,ix+2,iy,b1)]/w[fencode(p,ix+2,iy,rho)]; //fim2=w[fencode(p,ix-2,iy,mom1)]*w[fencode(p,ix-2,iy,b1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,mom1)]*w[fencode(p,ix,iy+1,b2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,mom1)]*w[fencode(p,ix,iy-1,b2)]/w[fencode(p,ix,iy-1,rho)]; //fip2=w[fencode(p,ix,iy+2,mom1)]*w[fencode(p,ix,iy+2,b2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,mom1)]*w[fencode(p,ix,iy-2,b2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 1: fi=w[fencode(p,ix+1,iy,mom2)]*w[fencode(p,ix+1,iy,b1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,mom2)]*w[fencode(p,ix-1,iy,b1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,mom2)]*w[fencode(p,ix+2,iy,b1)]/w[fencode(p,ix+2,iy,rho)]; //fim2=w[fencode(p,ix-2,iy,mom2)]*w[fencode(p,ix-2,iy,b1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,mom2)]*w[fencode(p,ix,iy+1,b2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,mom2)]*w[fencode(p,ix,iy-1,b2)]/w[fencode(p,ix,iy-1,rho)]; //fip2=w[fencode(p,ix,iy+2,mom2)]*w[fencode(p,ix,iy+2,b2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,mom2)]*w[fencode(p,ix,iy-2,b2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 2: fi=w[fencode(p,ix+1,iy,mom3)]*w[fencode(p,ix+1,iy,b1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,mom3)]*w[fencode(p,ix-1,iy,b1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,mom3)]*w[fencode(p,ix+2,iy,b1)]/w[fencode(p,ix+2,iy,rho)]; //fim2=w[fencode(p,ix-2,iy,mom3)]*w[fencode(p,ix-2,iy,b1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,mom3)]*w[fencode(p,ix,iy+1,b2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,mom3)]*w[fencode(p,ix,iy-1,b2)]/w[fencode(p,ix,iy-1,rho)]; //fip2=w[fencode(p,ix,iy+2,mom3)]*w[fencode(p,ix,iy+2,b2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,mom3)]*w[fencode(p,ix,iy-2,b2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; } ddc1=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); switch(direction) { case 0: fi=w[fencode(p,ix+1,iy,b1)]*w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,b1)]*w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,b1)]*w[fencode(p,ix+2,iy,mom1)]/w[fencode(p,ix+2,iy,rho)]; // fim2=w[fencode(p,ix-2,iy,b1)]*w[fencode(p,ix-2,iy,mom1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b1)]*w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,b1)]*w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)]; //fip2=w[fencode(p,ix,iy+2,b1)]*w[fencode(p,ix,iy+2,mom2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,b1)]*w[fencode(p,ix,iy-2,mom2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 1: fi=w[fencode(p,ix+1,iy,b2)]*w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,b2)]*w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,b2)]*w[fencode(p,ix+2,iy,mom1)]/w[fencode(p,ix+2,iy,rho)]; // fim2=w[fencode(p,ix-2,iy,b2)]*w[fencode(p,ix-2,iy,mom1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b2)]*w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,b2)]*w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)]; // fip2=w[fencode(p,ix,iy+2,b2)]*w[fencode(p,ix,iy+2,mom2)]/w[fencode(p,ix,iy+2,rho)]; // fim2=w[fencode(p,ix,iy-2,b2)]*w[fencode(p,ix,iy-2,mom2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 2: fi=w[fencode(p,ix+1,iy,b3)]*w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,b3)]*w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,b3)]*w[fencode(p,ix+2,iy,mom1)]/w[fencode(p,ix+2,iy,rho)]; //fim2=w[fencode(p,ix-2,iy,b3)]*w[fencode(p,ix-2,iy,mom1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b3)]*w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,b3)]*w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)]; //fip2=w[fencode(p,ix,iy+2,b3)]*w[fencode(p,ix,iy+2,mom2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,b3)]*w[fencode(p,ix,iy-2,mom2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; } ddc2=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); return(ddc1-ddc2); } __device__ __host__ float ddotcurrentenergy (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { // float ddc=0; float dd1,dd2,dd3; float ddcx,ddcy; //float fi, fim1;//fip2=0, fim2=0; //float dpi, dpim1;//, dpip2=0, dpim2=0; //int field=energy; //fi=w[fencode(p,ix+1,iy,energy)]*w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix,iy,rho)]; //fim1=w[fencode(p,ix-1,iy,energy)]*w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,energy)]*w[fencode(p,ix+2,iy,mom1)]/w[fencode(p,ix+2,iy,rho)]; // fim2=w[fencode(p,ix-2,iy,energy)]*w[fencode(p,ix-2,iy,mom1)]/w[fencode(p,ix-2,iy,rho)]; // ddcx=evalgrad(fi,fim1,0,0,p,0); ddcx=evalgrad(w[fencode(p,ix+1,iy,energy)]*w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix,iy,rho)],w[fencode(p,ix-1,iy,energy)]*w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)],0,0,p,0); // fi=w[fencode(p,ix,iy+1,energy)]*w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)]; // fim1=w[fencode(p,ix,iy-1,energy)]*w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)]; // fip2=w[fencode(p,ix,iy+2,energy)]*w[fencode(p,ix,iy+2,mom2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,energy)]*w[fencode(p,ix,iy-2,mom2)]/w[fencode(p,ix,iy-2,rho)]; //ddcy=evalgrad(fi,fim1,0,0,p,1); ddcy=evalgrad(w[fencode(p,ix,iy+1,energy)]*w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)],w[fencode(p,ix,iy-1,energy)]*w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)],0,0,p,1); dd1=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); // dpi=(w[fencode(p,ix+1,iy,b1)]*w[fencode(p,ix+1,iy,mom1)]+w[fencode(p,ix+1,iy,b2)]*w[fencode(p,ix+1,iy,mom2)]+w[fencode(p,ix+1,iy,b3)]*w[fencode(p,ix+1,iy,mom3)])/w[fencode(p,ix+1,iy,rho)]; // dpim1=(w[fencode(p,ix-1,iy,b1)]*w[fencode(p,ix-1,iy,mom1)]+w[fencode(p,ix-1,iy,b2)]*w[fencode(p,ix-1,iy,mom2)]+w[fencode(p,ix-1,iy,b3)]*w[fencode(p,ix-1,iy,mom3)])/w[fencode(p,ix-1,iy,rho)]; //dpip2=(w[fencode(p,ix+2,iy,b1)]*w[fencode(p,ix+2,iy,mom1)]+w[fencode(p,ix+2,iy,b2)]*w[fencode(p,ix+2,iy,mom2)]+w[fencode(p,ix+2,iy,b3)]*w[fencode(p,ix+2,iy,mom3)])/w[fencode(p,ix+2,iy,rho)]; // dpim2=(w[fencode(p,ix-2,iy,b1)]*w[fencode(p,ix-2,iy,mom1)]+w[fencode(p,ix-2,iy,b2)]*w[fencode(p,ix-2,iy,mom2)]+w[fencode(p,ix-2,iy,b3)]*w[fencode(p,ix-2,iy,mom3)])/w[fencode(p,ix-2,iy,rho)]; // fi=dpi*w[fencode(p,ix+1,iy,b1)]; // fim1=dpim1*w[fencode(p,ix-1,iy,b1)]; //fip2=dpip2*w[fencode(p,ix+2,iy,b1)]; // fim2=dpim2*w[fencode(p,ix-2,iy,b1)]; // ddcx=evalgrad(fi,fim1,0,0,p,0); // ddcx=evalgrad(((w[fencode(p,ix+1,iy,b1)]*w[fencode(p,ix+1,iy,mom1)]+w[fencode(p,ix+1,iy,b2)]*w[fencode(p,ix+1,iy,mom2)]+w[fencode(p,ix+1,iy,b3)]*w[fencode(p,ix+1,iy,mom3)])/w[fencode(p,ix+1,iy,rho)])*w[fencode(p,ix+1,iy,b1)],((w[fencode(p,ix-1,iy,b1)]*w[fencode(p,ix-1,iy,mom1)]+w[fencode(p,ix-1,iy,b2)]*w[fencode(p,ix-1,iy,mom2)]+w[fencode(p,ix-1,iy,b3)]*w[fencode(p,ix-1,iy,mom3)])/w[fencode(p,ix-1,iy,rho)])*w[fencode(p,ix-1,iy,b1)],0,0,p,0); ddcx=evalgrad(wd[fencode(p,ix+1,iy,bdotv)]*w[fencode(p,ix+1,iy,b1)],wd[fencode(p,ix-1,iy,bdotv)]*w[fencode(p,ix-1,iy,b1)],0,0,p,1); // dpi=(w[fencode(p,ix,iy+1,b1)]*w[fencode(p,ix,iy+1,mom1)]+w[fencode(p,ix,iy+1,b2)]*w[fencode(p,ix,iy+1,mom2)]+w[fencode(p,ix,iy+1,b3)]*w[fencode(p,ix,iy+1,mom3)])/w[fencode(p,ix,iy+1,rho)]; // dpim1=(w[fencode(p,ix,iy-1,b1)]*w[fencode(p,ix,iy-1,mom1)]+w[fencode(p,ix,iy-1,b2)]*w[fencode(p,ix,iy-1,mom2)]+w[fencode(p,ix,iy-1,b3)]*w[fencode(p,ix,iy-1,mom3)])/w[fencode(p,ix,iy-1,rho)]; //dpip2=(w[fencode(p,ix,iy+2,b1)]*w[fencode(p,ix,iy+2,mom1)]+w[fencode(p,ix,iy+2,b2)]*w[fencode(p,ix,iy+2,mom2)]+w[fencode(p,ix,iy+2,b3)]*w[fencode(p,ix,iy+2,mom3)])/w[fencode(p,ix,iy+2,rho)]; //dpim2=(w[fencode(p,ix,iy-2,b1)]*w[fencode(p,ix,iy-2,mom1)]+w[fencode(p,ix,iy-2,b2)]*w[fencode(p,ix,iy-2,mom2)]+w[fencode(p,ix,iy-2,b3)]*w[fencode(p,ix,iy-2,mom3)])/w[fencode(p,ix,iy-2,rho)]; // fi=dpi*w[fencode(p,ix,iy+1,b2)]; // fim1=dpim1*w[fencode(p,ix,iy-1,b2)]; //fip2=dpip2*w[fencode(p,ix,iy+2,b2)]; //fim2=dpim2*w[fencode(p,ix,iy-2,b2)]; //fi=w[fencode(p,ix,iy+1,b2)]; // fim1=w[fencode(p,ix,iy-1,b2)]; ddcy=evalgrad(wd[fencode(p,ix,iy+1,bdotv)]*w[fencode(p,ix,iy+1,b2)],wd[fencode(p,ix,iy-1,bdotv)]*w[fencode(p,ix,iy-1,b2)],0,0,p,1); //ddcx=0; //ddcy=evalgrad(((w[fencode(p,ix,iy+1,b1)]*w[fencode(p,ix,iy+1,mom1)]+w[fencode(p,ix,iy+1,b2)]*w[fencode(p,ix,iy+1,mom2)]+w[fencode(p,ix,iy+1,b3)]*w[fencode(p,ix,iy+1,mom3)])/w[fencode(p,ix,iy+1,rho)])*w[fencode(p,ix,iy+1,b2)],((w[fencode(p,ix,iy-1,b1)]*w[fencode(p,ix,iy-1,mom1)]+w[fencode(p,ix,iy-1,b2)]*w[fencode(p,ix,iy-1,mom2)]+w[fencode(p,ix,iy-1,b3)]*w[fencode(p,ix,iy-1,mom3)])/w[fencode(p,ix,iy-1,rho)])*w[fencode(p,ix,iy-1,b2)],0,0,p,1); dd2=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); ddcx=wd[fencode(p,ix,iy,pressuret)]*grad(w,p,ix,iy,mom1,0)/w[fencode(p,ix,iy,rho)]; ddcy=wd[fencode(p,ix,iy,pressuret)]*grad(w,p,ix,iy,mom2,1)/w[fencode(p,ix,iy,rho)]; dd3=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); return(dd1+dd2+dd3); //return dd1; // return ( ddc); } __device__ __host__ int derivrho (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { int status=0; int field=rho; dw[fencode(p,ix,iy,field)]=sourcerho(dw,wd,w,p,ix,iy)-ddotcurrentrho(dw,wd,w,p,ix,iy); //dw[fencode(p,ix,iy,field)]=w[fencode(p,ix,iy,field)]+10; return ( status); } __device__ __host__ int derivmom (float *dw, float *wd, float *w, struct params *p,int ix, int iy,int field, int direction) { int status=0; //dw[fencode(p,ix,iy,field)]=w[fencode(p,ix,iy,field)]+20+5*(2*direction+1); dw[fencode(p,ix,iy,field)]=sourcemom(dw,wd,w,p,ix,iy,field,direction)-ddotcurrentmom(dw,wd,w,p,ix,iy,field,direction); //dw[fencode(p,ix,iy,field)]=-ddotcurrentmom(dw,wd,w,p,ix,iy,field,direction); return ( status); } __device__ __host__ int derivb (float *dw, float *wd, float *w, struct params *p,int ix, int iy, int field, int direction) { int status=0; dw[fencode(p,ix,iy,field)]=sourceb(dw,wd,w,p,ix,iy,field,direction)-ddotcurrentb(dw,wd,w,p,ix,iy,field,direction); return ( status); } __device__ __host__ int derivenergy (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { int status=0; int field=energy; dw[fencode(p,ix,iy,field)]=sourceenergy(dw,wd,w,p,ix,iy)-ddotcurrentenergy(dw,wd,w,p,ix,iy); return ( status); } //rho, mom1, mom2, mom3, energy, b1, b2, b3 __device__ __host__ void deriv (float *dw, float *wd, float *w, struct params *p,int ix, int iy, int field) { //int status=0; switch(field) { case rho: derivrho(dw,wd,w,p,ix,iy); break; case mom1: derivmom(dw,wd,w,p,ix,iy,field,0); break; case mom2: derivmom(dw,wd,w,p,ix,iy,field,1); break; case mom3: derivmom(dw,wd,w,p,ix,iy,field,2); break; case energy: derivenergy(dw,wd,w,p,ix,iy); break; case b1: derivb(dw,wd,w,p,ix,iy,field,0); break; case b2: derivb(dw,wd,w,p,ix,iy,field,1); break; case b3: derivb(dw,wd,w,p,ix,iy,field,2); break; } //return ( status); } __global__ void derivcurrent_parallel(struct params *p, float *w, float *wnew, float *wmod, float *dwn1, float *wd) { // compute the global index in the vector from // the number of the current block, blockIdx, // the number of threads per block, blockDim, // and the number of the current thread within the block, threadIdx //int i = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.y * blockDim.y + threadIdx.y; int iindex = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int index,k; int ni=p->ni; int nj=p->nj; float dt=p->dt; float dy=p->dy; float dx=p->dx; float g=p->g; // dt=1.0; //dt=0.05; //enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3; j=iindex/ni; //i=iindex-j*(iindex/ni); i=iindex-(j*ni); if(i>1 && j >1 && i<((p->ni)-2) && j<((p->nj)-2)) { for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); computebdotv(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) { deriv(dwn1,wd,wmod,p,i,j,f); //dwn1[fencode(p,i,j,f)]=1.0; __syncthreads(); } /*for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+0.5*dt*dwn1[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn2,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+0.5*dt*dwn2[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn3,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+dt*dwn3[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn4,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) { wnew[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+(dt/6.0)*( dwn1[fencode(p,i,j,f)]+2.0*dwn2[fencode(p,i,j,f)] +2.0*dwn3[fencode(p,i,j,f)]+dwn4[fencode(p,i,j,f)]); }*/ __syncthreads(); /* for(int f=rho; f<=b3; f++) wnew[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+dt*dwn1[fencode(p,i,j,f)]; computej(wnew,wd,p,i,j); computepk(wnew,wd,p,i,j); computept(wnew,wd,p,i,j);*/ } __syncthreads(); } __global__ void derivsource_parallel(struct params *p, float *w, float *wnew, float *wmod, float *dwn1, float *wd) { // compute the global index in the vector from // the number of the current block, blockIdx, // the number of threads per block, blockDim, // and the number of the current thread within the block, threadIdx //int i = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.y * blockDim.y + threadIdx.y; int iindex = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int index,k; int ni=p->ni; int nj=p->nj; float dt=p->dt; float dy=p->dy; float dx=p->dx; float g=p->g; // dt=1.0; //dt=0.05; //enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3; j=iindex/ni; //i=iindex-j*(iindex/ni); i=iindex-(j*ni); if(i>1 && j >1 && i<((p->ni)-2) && j<((p->nj)-2)) { for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); computebdotv(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) { deriv(dwn1,wd,wmod,p,i,j,f); //dwn1[fencode(p,i,j,f)]=1.0; __syncthreads(); } /*for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+0.5*dt*dwn1[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn2,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+0.5*dt*dwn2[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn3,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+dt*dwn3[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn4,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) { wnew[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+(dt/6.0)*( dwn1[fencode(p,i,j,f)]+2.0*dwn2[fencode(p,i,j,f)] +2.0*dwn3[fencode(p,i,j,f)]+dwn4[fencode(p,i,j,f)]); }*/ __syncthreads(); /* for(int f=rho; f<=b3; f++) wnew[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+dt*dwn1[fencode(p,i,j,f)]; computej(wnew,wd,p,i,j); computepk(wnew,wd,p,i,j); computept(wnew,wd,p,i,j);*/ } __syncthreads(); } ///////////////////////////////////// // error checking routine ///////////////////////////////////// void checkErrors(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed hipError_t err; err = hipDeviceSynchronize(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = hipGetLastError(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } int cuderivcurrent(struct params **p, float **w, float **wnew, struct params **d_p, float **d_w, float **d_wnew, float **d_wmod, float **d_dwn1, float **d_wd) { //printf("calling propagate solution\n"); //dim3 dimBlock(blocksize, blocksize); //dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); dim3 dimBlock(dimblock, 1); //dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); int numBlocks = (((*p)->ni)*((*p)->nj)+numThreadsPerBlock-1) / numThreadsPerBlock; //__global__ void prop_parallel(struct params *p, float *b, float *w, float *wnew, float *wmod, // float *dwn1, float *dwn2, float *dwn3, float *dwn4, float *wd) //init_parallel(struct params *p, float *b, float *u, float *v, float *h) hipLaunchKernelGGL(( derivcurrent_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd); //prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h); //printf("called prop\n"); hipDeviceSynchronize(); //boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called boundary\n"); //hipDeviceSynchronize(); //update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called update\n"); // hipDeviceSynchronize(); // hipMemcpy(*w, *d_w, 8*((*p)->ni)* ((*p)->nj)*sizeof(float), hipMemcpyDeviceToHost); //hipMemcpy(*wnew, *d_wnew, 8*((*p)->ni)* ((*p)->nj)*sizeof(float), hipMemcpyDeviceToHost); //hipMemcpy(*b, *d_b, (((*p)->ni)* ((*p)->nj))*sizeof(float), hipMemcpyDeviceToHost); //checkErrors("copy data from device"); } int cuderivsource(struct params **p, float **w, float **wnew,struct params **d_p, float **d_w, float **d_wnew, float **d_wmod, float **d_dwn1, float **d_wd) { //printf("calling propagate solution\n"); //dim3 dimBlock(blocksize, blocksize); //dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); dim3 dimBlock(dimblock, 1); //dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); int numBlocks = (((*p)->ni)*((*p)->nj)+numThreadsPerBlock-1) / numThreadsPerBlock; //__global__ void prop_parallel(struct params *p, float *b, float *w, float *wnew, float *wmod, // float *dwn1, float *dwn2, float *dwn3, float *dwn4, float *wd) //init_parallel(struct params *p, float *b, float *u, float *v, float *h) hipLaunchKernelGGL(( derivsource_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd); //prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h); //printf("called prop\n"); hipDeviceSynchronize(); //boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called boundary\n"); //hipDeviceSynchronize(); //update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called update\n"); // hipDeviceSynchronize(); // hipMemcpy(*w, *d_w, 8*((*p)->ni)* ((*p)->nj)*sizeof(float), hipMemcpyDeviceToHost); //hipMemcpy(*wnew, *d_wnew, 8*((*p)->ni)* ((*p)->nj)*sizeof(float), hipMemcpyDeviceToHost); //hipMemcpy(*b, *d_b, (((*p)->ni)* ((*p)->nj))*sizeof(float), hipMemcpyDeviceToHost); //checkErrors("copy data from device"); }
90d146bd766d78d7604ef2c3f7733b791365b725.cu
#include "cudapars.h" #include "paramssteeringtest1.h" ///////////////////////////////////// // standard imports ///////////////////////////////////// #include <stdio.h> #include <math.h> #include "step.h" ///////////////////////////////////// // kernel function (CUDA device) ///////////////////////////////////// __device__ __host__ int encode (struct params *dp,int ix, int iy) { //int kSizeX=(dp)->ni; //int kSizeY=(dp)->nj; return ( iy * ((dp)->ni) + ix); } __device__ __host__ int fencode (struct params *dp,int ix, int iy, int field) { //int kSizeX=(dp)->ni; //int kSizeY=(dp)->nj; return ( (iy * ((dp)->ni) + ix)+(field*((dp)->ni)*((dp)->nj))); } __device__ __host__ float evalgrad(float fi, float fim1, float fip2, float fim2,struct params *p,int dir) { //float valgrad; if(dir == 0) { //valgrad=(2.0/(3.0*(p->dx)))*(fi-fim1)-(1.0/(12.0*(p->dx)))*(fip2-fim2); return((1.0/(1.0*(p->dx)))*(fi-fim1)); } else if(dir == 1) { // valgrad=(2.0/(3.0*(p->dy)))*(fi-fim1)-(1.0/(12.0*(p->dy)))*(fip2-fim2); return((1.0/(1.0*(p->dy)))*(fi-fim1)); } return -1; } __device__ __host__ float grad(float *wmod,struct params *p,int i,int j,int field,int dir) { //float valgrad; if(dir == 0) { // valgrad=(2.0/(3.0*(p->dx)))*(wmod[fencode(p,i,j,field)]-wmod[fencode(p,i-1,j,field)])-(1.0/(12.0*(p->dx)))*(wmod[fencode(p,i+2,j,field)]-wmod[fencode(p,i-2,j,field)]); return((1.0/(1.0*(p->dx)))*(wmod[fencode(p,i+1,j,field)]-wmod[fencode(p,i-1,j,field)])); } else if(dir == 1) { // valgrad=(2.0/(3.0*(p->dy)))*(wmod[fencode(p,i,j,field)]-wmod[fencode(p,i,j-1,field)])-(1.0/(12.0*(p->dy)))*(wmod[fencode(p,i,j+2,field)]-wmod[fencode(p,i,j-2,field)]); return((1.0/(1.0*(p->dy)))*(wmod[fencode(p,i,j+1,field)]-wmod[fencode(p,i,j-1,field)])); } return -1; } __device__ __host__ void computej(float *wmod,float *wd,struct params *p,int i,int j) { // int status=0; // float dbzdy, dbydz; // float dbzdx, dbxdz; // float dbydx, dbxdy; // dbzdy=grad(wmod,p,i,j,b3,1); // dbydz=0.0; // dbzdx=grad(wmod,p,i,j,b3,0); // dbxdz=0.0; // dbydx=grad(wmod,p,i,j,b2,0); // dbxdy=grad(wmod,p,i,j,b1,1); wd[fencode(p,i,j,0)]=(grad(wmod,p,i,j,b3,1))/(p->mu); wd[fencode(p,i,j,1)]=(grad(wmod,p,i,j,b3,0))/(p->mu); wd[fencode(p,i,j,2)]=(grad(wmod,p,i,j,b2,0)-grad(wmod,p,i,j,b1,1))/(p->mu); //return ( status); } __device__ __host__ void computebdotv(float *wmod,float *wd,struct params *p,int i,int j) { // int status=0; //float bsq=wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]; // wd[fencode(p,i,j,4)]= wd[fencode(p,i,j,3)]+0.5*(wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]); wd[fencode(p,i,j,bdotv)]=(wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,mom1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,mom2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,mom3)])/wmod[fencode(p,i,j,rho)]; // return ( status); } __device__ __host__ void computepk(float *wmod,float *wd,struct params *p,int i,int j) { // int status=0; //float bsq=wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]; wd[fencode(p,i,j,4)]= wd[fencode(p,i,j,3)]+0.5*(wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]); // return ( status); } __device__ __host__ void computept(float *wmod,float *wd,struct params *p,int i,int j) { //int status=0; //float momsq=wmod[fencode(p,i,j,mom1)]*wmod[fencode(p,i,j,mom1)]+wmod[fencode(p,i,j,mom2)]*wmod[fencode(p,i,j,mom2)]+wmod[fencode(p,i,j,mom3)]*wmod[fencode(p,i,j,mom3)]; //float bsq=wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]; wd[fencode(p,i,j,3)]=((p->gamma)-1)*(wmod[fencode(p,i,j,energy)]- 0.5*(wmod[fencode(p,i,j,mom1)]*wmod[fencode(p,i,j,mom1)]+wmod[fencode(p,i,j,mom2)]*wmod[fencode(p,i,j,mom2)]+wmod[fencode(p,i,j,mom3)]*wmod[fencode(p,i,j,mom3)])/wmod[fencode(p,i,j,rho)]-0.5*(wmod[fencode(p,i,j,b1)]*wmod[fencode(p,i,j,b1)]+wmod[fencode(p,i,j,b2)]*wmod[fencode(p,i,j,b2)]+wmod[fencode(p,i,j,b3)]*wmod[fencode(p,i,j,b3)]) ); //return ( status); } __device__ __host__ float sourcerho (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { // float src=0; // int field=rho; return 0; } __device__ __host__ float sourcemom (float *dw, float *wd, float *w, struct params *p,int ix, int iy,int field, int direction) { //float src=0; switch(direction) { case 0: return(w[fencode(p,ix,iy,rho)]*(p->g1))-grad(wd,p,ix,iy,pressuret,0); break; case 1: return(w[fencode(p,ix,iy,rho)]*(p->g2))-grad(wd,p,ix,iy,pressuret,1); break; case 2: return(w[fencode(p,ix,iy,rho)]*(p->g3))-grad(wd,p,ix,iy,pressuret,2); break; } return 0; } __device__ __host__ float sourceb (float *dw, float *wd, float *w, struct params *p,int ix, int iy,int field, int direction) { //float src=0; switch(direction) { case 0: return(p->eta)*grad(wd,p,ix,iy,current3,1); break; case 1: return -(p->eta)*grad(wd,p,ix,iy,current3,0); break; case 2: return (p->eta)*(grad(wd,p,ix,iy,current2,0)-grad(wd,p,ix,iy,current1,1)); break; } return 0; } __device__ __host__ float sourceenergy (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { // float src=0; float srcg,srcb; int field=energy; float ddcx,ddcy; float fi,fim1;//fip2,fim2; srcg=(p->g1)*w[fencode(p,ix,iy,mom1)]+(p->g2)*w[fencode(p,ix,iy,mom2)]+(p->g3)*w[fencode(p,ix,iy,mom3)]; fi=(w[fencode(p,ix+1,iy,b2)]*wd[fencode(p,ix+1,iy,current3)]-w[fencode(p,ix+1,iy,b3)]*wd[fencode(p,ix+1,iy,current2)]); fim1=(w[fencode(p,ix-1,iy,b2)]*wd[fencode(p,ix-1,iy,current3)]-w[fencode(p,ix-1,iy,b3)]*wd[fencode(p,ix-1,iy,current2)]); // fip2=(w[fencode(p,ix+2,iy,b2)]*wd[fencode(p,ix+2,iy,current3)]-w[fencode(p,ix+2,iy,b3)]*wd[fencode(p,ix+2,iy,current2)]); // fim2=(w[fencode(p,ix-2,iy,b2)]*wd[fencode(p,ix-2,iy,current3)]-w[fencode(p,ix-2,iy,b3)]*wd[fencode(p,ix-2,iy,current2)]); // ddcx=evalgrad(fi,fim1,fip2,fim2,p,0); ddcx=evalgrad(fi,fim1,0,0,p,0); fi=(w[fencode(p,ix+1,iy,b3)]*wd[fencode(p,ix+1,iy,current1)]-w[fencode(p,ix+1,iy,b1)]*wd[fencode(p,ix+1,iy,current3)]); fim1=(w[fencode(p,ix,iy-1,b3)]*wd[fencode(p,ix,iy-1,current1)]-w[fencode(p,ix,iy-1,b1)]*wd[fencode(p,ix,iy-1,current3)]); // fip2=(w[fencode(p,ix,iy+2,b3)]*wd[fencode(p,ix,iy+2,current1)]-w[fencode(p,ix,iy+2,b1)]*wd[fencode(p,ix,iy+2,current3)]); // fim2=(w[fencode(p,ix,iy-2,b3)]*wd[fencode(p,ix,iy-2,current1)]-w[fencode(p,ix,iy-2,b1)]*wd[fencode(p,ix,iy-2,current3)]); // ddcx=evalgrad(fi,fim1,fip2,fim2,p,0); ddcy=evalgrad(fi,fim1,0,0,p,1); srcb=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); // src=srcg+srcb; return ( srcg+srcb); } __device__ __host__ float ddotcurrentrho (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { float ddc=0; // int field=rho; ddc= grad(w,p,ix,iy,mom1,0)+grad(w,p,ix,iy,mom2,1); return ( isnan(ddc)?0:ddc); } __device__ __host__ float ddotcurrentmom (float *dw, float *wd, float *w, struct params *p,int ix, int iy,int field, int direction) { float ddc=0; float fi, fim1; //float fip2=0, fim2=0; float ddc1,ddc2; float ddcx,ddcy; // ddc= grad(w,p,ix,iy,mom1,0)+grad(w,p,ix,iy,mom2,1); //evalgrad(float fi, float fim1, float fip2, float fim2,struct params *p,int dir) //fi=w(fencode(p,ix,iy,rho)) //calculate momentum current //w[fencode(p,ix,iy,rho)])=1; //w[fencode(p,ix-1,iy,rho)])=1; //w[fencode(p,ix+2,iy,rho)])=1; //w[fencode(p,ix-2,iy,rho)])=1; //w[fencode(p,ix,iy,rho)])=1; //w[fencode(p,ix,iy-1,rho)])=1; //w[fencode(p,ix,iy+2,rho)])=1; //w[fencode(p,ix,iy-2,rho)])=1; switch(direction) { case 0: fi=(w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix+1,iy,rho)])*w[fencode(p,ix+1,iy,mom1)]; fim1=(w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)])*w[fencode(p,ix-1,iy,mom1)]; // fip2=(w[fencode(p,ix+2,iy,mom1)]/w[fencode(p,ix+2,iy,rho)])*w[fencode(p,ix+2,iy,mom1)]; // fim2=(w[fencode(p,ix-2,iy,mom1)]/w[fencode(p,ix-2,iy,rho)])*w[fencode(p,ix-2,iy,mom1)]; // ddcx=evalgrad(fi,fim1,fip2,fim2,p,0); ddcx=evalgrad(fi,fim1,0,0,p,0); //ddcx=fi-fim1; fi=(w[fencode(p,ix,iy+1,mom1)]/w[fencode(p,ix,iy+1,rho)])*w[fencode(p,ix,iy+1,mom2)]; fim1=(w[fencode(p,ix,iy-1,mom1)]/w[fencode(p,ix,iy-1,rho)])*w[fencode(p,ix,iy-1,mom2)]; // fip2=(w[fencode(p,ix,iy+2,mom1)]/w[fencode(p,ix,iy+2,rho)])*w[fencode(p,ix,iy+2,mom2)]; // fim2=(w[fencode(p,ix,iy-2,mom1)]/w[fencode(p,ix,iy-2,rho)])*w[fencode(p,ix,iy-2,mom2)]; //ddcy=fi; ddcy=evalgrad(fi,fim1,0,0,p,1); //ddcy=evalgrad(0,0,fip2,fim2,p,1); break; case 1: fi=(w[fencode(p,ix+1,iy,mom2)]/w[fencode(p,ix+1,iy,rho)])*w[fencode(p,ix+1,iy,mom1)]; fim1=(w[fencode(p,ix-1,iy,mom2)]/w[fencode(p,ix-1,iy,rho)])*w[fencode(p,ix-1,iy,mom1)]; // fip2=(w[fencode(p,ix+2,iy,mom2)]/w[fencode(p,ix+2,iy,rho)])*w[fencode(p,ix+2,iy,mom1)]; // fim2=(w[fencode(p,ix-2,iy,mom2)]/w[fencode(p,ix-2,iy,rho)])*w[fencode(p,ix-2,iy,mom1)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=(w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)])*w[fencode(p,ix,iy+1,mom2)]; fim1=(w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)])*w[fencode(p,ix,iy-1,mom2)]; // fip2=(w[fencode(p,ix,iy+2,mom2)]/w[fencode(p,ix,iy+2,rho)])*w[fencode(p,ix,iy+2,mom2)]; // fim2=(w[fencode(p,ix,iy-2,mom2)]/w[fencode(p,ix,iy-2,rho)])*w[fencode(p,ix,iy-2,mom2)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 2: fi=(w[fencode(p,ix+1,iy,mom3)]/w[fencode(p,ix+1,iy,rho)])*w[fencode(p,ix+1,iy,mom1)]; fim1=(w[fencode(p,ix-1,iy,mom3)]/w[fencode(p,ix-1,iy,rho)])*w[fencode(p,ix-1,iy,mom1)]; // fip2=(w[fencode(p,ix+2,iy,mom3)]/w[fencode(p,ix+2,iy,rho)])*w[fencode(p,ix+2,iy,mom1)]; // fim2=(w[fencode(p,ix-2,iy,mom3)]/w[fencode(p,ix-2,iy,rho)])*w[fencode(p,ix-2,iy,mom1)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=(w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)])*w[fencode(p,ix,iy+1,mom2)]; fim1=(w[fencode(p,ix,iy-1,mom3)]/w[fencode(p,ix,iy-1,rho)])*w[fencode(p,ix,iy-1,mom2)]; // fip2=(w[fencode(p,ix,iy+2,mom3)]/w[fencode(p,ix,iy+2,rho)])*w[fencode(p,ix,iy+2,mom2)]; // fim2=(w[fencode(p,ix,iy-2,mom3)]/w[fencode(p,ix,iy-2,rho)])*w[fencode(p,ix,iy-2,mom2)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; } ddc1=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); //fip2=0, fim2=0; //calculate bfield current switch(direction) { case 0: fi=w[fencode(p,ix+1,iy,b1)]*w[fencode(p,ix+1,iy,b1)]; fim1=w[fencode(p,ix-1,iy,b1)]*w[fencode(p,ix-1,iy,b1)]; // fip2=w[fencode(p,ix+2,iy,b1)]*w[fencode(p,ix+2,iy,b1)]; // fim2=w[fencode(p,ix-2,iy,b1)]*w[fencode(p,ix-2,iy,b1)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b1)]*w[fencode(p,ix,iy+1,b2)]; fim1=w[fencode(p,ix,iy-1,b1)]*w[fencode(p,ix,iy-1,b2)]; // fip2=w[fencode(p,ix,iy+2,b1)]*w[fencode(p,ix,iy+2,b2)]; // fim2=w[fencode(p,ix,iy-2,b1)]*w[fencode(p,ix,iy-2,b2)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 1: fi=w[fencode(p,ix+1,iy,b2)]*w[fencode(p,ix+1,iy,b1)]; fim1=w[fencode(p,ix-1,iy,b2)]*w[fencode(p,ix-1,iy,b1)]; // fip2=w[fencode(p,ix+2,iy,b2)]*w[fencode(p,ix+2,iy,b1)]; // fim2=w[fencode(p,ix-2,iy,b2)]*w[fencode(p,ix-2,iy,b1)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b2)]*w[fencode(p,ix,iy+1,b2)]; fim1=w[fencode(p,ix,iy-1,b2)]*w[fencode(p,ix,iy-1,b2)]; // fip2=w[fencode(p,ix,iy+2,b2)]*w[fencode(p,ix,iy+2,b2)]; // fim2=w[fencode(p,ix,iy-2,b2)]*w[fencode(p,ix,iy-2,b2)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 2: fi=w[fencode(p,ix+1,iy,b3)]*w[fencode(p,ix+1,iy,b1)]; fim1=w[fencode(p,ix-1,iy,b3)]*w[fencode(p,ix-1,iy,b1)]; // fip2=w[fencode(p,ix+2,iy,b3)]*w[fencode(p,ix+2,iy,b1)]; // fim2=w[fencode(p,ix-2,iy,b3)]*w[fencode(p,ix-2,iy,b1)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b3)]*w[fencode(p,ix,iy+1,b2)]; fim1=w[fencode(p,ix,iy-1,b3)]*w[fencode(p,ix,iy-1,b2)]; // fip2=w[fencode(p,ix,iy+2,b3)]*w[fencode(p,ix,iy+2,b2)]; // fim2=w[fencode(p,ix,iy-2,b3)]*w[fencode(p,ix,iy-2,b2)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; } //ddc2=ddcx+ddcy; ddc2=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); //ddc=ddc1-ddc2; return ( ddc1-ddc2); } __device__ __host__ float ddotcurrentb (float *dw, float *wd, float *w, struct params *p,int ix, int iy,int field, int direction) { //float ddc=0; float fi, fim1;// fip2=0, fim2=0; float ddc1,ddc2; float ddcx,ddcy; switch(direction) { case 0: fi=w[fencode(p,ix+1,iy,mom1)]*w[fencode(p,ix+1,iy,b1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,mom1)]*w[fencode(p,ix-1,iy,b1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,mom1)]*w[fencode(p,ix+2,iy,b1)]/w[fencode(p,ix+2,iy,rho)]; //fim2=w[fencode(p,ix-2,iy,mom1)]*w[fencode(p,ix-2,iy,b1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,mom1)]*w[fencode(p,ix,iy+1,b2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,mom1)]*w[fencode(p,ix,iy-1,b2)]/w[fencode(p,ix,iy-1,rho)]; //fip2=w[fencode(p,ix,iy+2,mom1)]*w[fencode(p,ix,iy+2,b2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,mom1)]*w[fencode(p,ix,iy-2,b2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 1: fi=w[fencode(p,ix+1,iy,mom2)]*w[fencode(p,ix+1,iy,b1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,mom2)]*w[fencode(p,ix-1,iy,b1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,mom2)]*w[fencode(p,ix+2,iy,b1)]/w[fencode(p,ix+2,iy,rho)]; //fim2=w[fencode(p,ix-2,iy,mom2)]*w[fencode(p,ix-2,iy,b1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,mom2)]*w[fencode(p,ix,iy+1,b2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,mom2)]*w[fencode(p,ix,iy-1,b2)]/w[fencode(p,ix,iy-1,rho)]; //fip2=w[fencode(p,ix,iy+2,mom2)]*w[fencode(p,ix,iy+2,b2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,mom2)]*w[fencode(p,ix,iy-2,b2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 2: fi=w[fencode(p,ix+1,iy,mom3)]*w[fencode(p,ix+1,iy,b1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,mom3)]*w[fencode(p,ix-1,iy,b1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,mom3)]*w[fencode(p,ix+2,iy,b1)]/w[fencode(p,ix+2,iy,rho)]; //fim2=w[fencode(p,ix-2,iy,mom3)]*w[fencode(p,ix-2,iy,b1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,mom3)]*w[fencode(p,ix,iy+1,b2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,mom3)]*w[fencode(p,ix,iy-1,b2)]/w[fencode(p,ix,iy-1,rho)]; //fip2=w[fencode(p,ix,iy+2,mom3)]*w[fencode(p,ix,iy+2,b2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,mom3)]*w[fencode(p,ix,iy-2,b2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; } ddc1=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); switch(direction) { case 0: fi=w[fencode(p,ix+1,iy,b1)]*w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,b1)]*w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,b1)]*w[fencode(p,ix+2,iy,mom1)]/w[fencode(p,ix+2,iy,rho)]; // fim2=w[fencode(p,ix-2,iy,b1)]*w[fencode(p,ix-2,iy,mom1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b1)]*w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,b1)]*w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)]; //fip2=w[fencode(p,ix,iy+2,b1)]*w[fencode(p,ix,iy+2,mom2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,b1)]*w[fencode(p,ix,iy-2,mom2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 1: fi=w[fencode(p,ix+1,iy,b2)]*w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,b2)]*w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,b2)]*w[fencode(p,ix+2,iy,mom1)]/w[fencode(p,ix+2,iy,rho)]; // fim2=w[fencode(p,ix-2,iy,b2)]*w[fencode(p,ix-2,iy,mom1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b2)]*w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,b2)]*w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)]; // fip2=w[fencode(p,ix,iy+2,b2)]*w[fencode(p,ix,iy+2,mom2)]/w[fencode(p,ix,iy+2,rho)]; // fim2=w[fencode(p,ix,iy-2,b2)]*w[fencode(p,ix,iy-2,mom2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; case 2: fi=w[fencode(p,ix+1,iy,b3)]*w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix+1,iy,rho)]; fim1=w[fencode(p,ix-1,iy,b3)]*w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,b3)]*w[fencode(p,ix+2,iy,mom1)]/w[fencode(p,ix+2,iy,rho)]; //fim2=w[fencode(p,ix-2,iy,b3)]*w[fencode(p,ix-2,iy,mom1)]/w[fencode(p,ix-2,iy,rho)]; ddcx=evalgrad(fi,fim1,0,0,p,0); fi=w[fencode(p,ix,iy+1,b3)]*w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)]; fim1=w[fencode(p,ix,iy-1,b3)]*w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)]; //fip2=w[fencode(p,ix,iy+2,b3)]*w[fencode(p,ix,iy+2,mom2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,b3)]*w[fencode(p,ix,iy-2,mom2)]/w[fencode(p,ix,iy-2,rho)]; ddcy=evalgrad(fi,fim1,0,0,p,1); break; } ddc2=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); return(ddc1-ddc2); } __device__ __host__ float ddotcurrentenergy (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { // float ddc=0; float dd1,dd2,dd3; float ddcx,ddcy; //float fi, fim1;//fip2=0, fim2=0; //float dpi, dpim1;//, dpip2=0, dpim2=0; //int field=energy; //fi=w[fencode(p,ix+1,iy,energy)]*w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix,iy,rho)]; //fim1=w[fencode(p,ix-1,iy,energy)]*w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)]; //fip2=w[fencode(p,ix+2,iy,energy)]*w[fencode(p,ix+2,iy,mom1)]/w[fencode(p,ix+2,iy,rho)]; // fim2=w[fencode(p,ix-2,iy,energy)]*w[fencode(p,ix-2,iy,mom1)]/w[fencode(p,ix-2,iy,rho)]; // ddcx=evalgrad(fi,fim1,0,0,p,0); ddcx=evalgrad(w[fencode(p,ix+1,iy,energy)]*w[fencode(p,ix+1,iy,mom1)]/w[fencode(p,ix,iy,rho)],w[fencode(p,ix-1,iy,energy)]*w[fencode(p,ix-1,iy,mom1)]/w[fencode(p,ix-1,iy,rho)],0,0,p,0); // fi=w[fencode(p,ix,iy+1,energy)]*w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)]; // fim1=w[fencode(p,ix,iy-1,energy)]*w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)]; // fip2=w[fencode(p,ix,iy+2,energy)]*w[fencode(p,ix,iy+2,mom2)]/w[fencode(p,ix,iy+2,rho)]; //fim2=w[fencode(p,ix,iy-2,energy)]*w[fencode(p,ix,iy-2,mom2)]/w[fencode(p,ix,iy-2,rho)]; //ddcy=evalgrad(fi,fim1,0,0,p,1); ddcy=evalgrad(w[fencode(p,ix,iy+1,energy)]*w[fencode(p,ix,iy+1,mom2)]/w[fencode(p,ix,iy+1,rho)],w[fencode(p,ix,iy-1,energy)]*w[fencode(p,ix,iy-1,mom2)]/w[fencode(p,ix,iy-1,rho)],0,0,p,1); dd1=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); // dpi=(w[fencode(p,ix+1,iy,b1)]*w[fencode(p,ix+1,iy,mom1)]+w[fencode(p,ix+1,iy,b2)]*w[fencode(p,ix+1,iy,mom2)]+w[fencode(p,ix+1,iy,b3)]*w[fencode(p,ix+1,iy,mom3)])/w[fencode(p,ix+1,iy,rho)]; // dpim1=(w[fencode(p,ix-1,iy,b1)]*w[fencode(p,ix-1,iy,mom1)]+w[fencode(p,ix-1,iy,b2)]*w[fencode(p,ix-1,iy,mom2)]+w[fencode(p,ix-1,iy,b3)]*w[fencode(p,ix-1,iy,mom3)])/w[fencode(p,ix-1,iy,rho)]; //dpip2=(w[fencode(p,ix+2,iy,b1)]*w[fencode(p,ix+2,iy,mom1)]+w[fencode(p,ix+2,iy,b2)]*w[fencode(p,ix+2,iy,mom2)]+w[fencode(p,ix+2,iy,b3)]*w[fencode(p,ix+2,iy,mom3)])/w[fencode(p,ix+2,iy,rho)]; // dpim2=(w[fencode(p,ix-2,iy,b1)]*w[fencode(p,ix-2,iy,mom1)]+w[fencode(p,ix-2,iy,b2)]*w[fencode(p,ix-2,iy,mom2)]+w[fencode(p,ix-2,iy,b3)]*w[fencode(p,ix-2,iy,mom3)])/w[fencode(p,ix-2,iy,rho)]; // fi=dpi*w[fencode(p,ix+1,iy,b1)]; // fim1=dpim1*w[fencode(p,ix-1,iy,b1)]; //fip2=dpip2*w[fencode(p,ix+2,iy,b1)]; // fim2=dpim2*w[fencode(p,ix-2,iy,b1)]; // ddcx=evalgrad(fi,fim1,0,0,p,0); // ddcx=evalgrad(((w[fencode(p,ix+1,iy,b1)]*w[fencode(p,ix+1,iy,mom1)]+w[fencode(p,ix+1,iy,b2)]*w[fencode(p,ix+1,iy,mom2)]+w[fencode(p,ix+1,iy,b3)]*w[fencode(p,ix+1,iy,mom3)])/w[fencode(p,ix+1,iy,rho)])*w[fencode(p,ix+1,iy,b1)],((w[fencode(p,ix-1,iy,b1)]*w[fencode(p,ix-1,iy,mom1)]+w[fencode(p,ix-1,iy,b2)]*w[fencode(p,ix-1,iy,mom2)]+w[fencode(p,ix-1,iy,b3)]*w[fencode(p,ix-1,iy,mom3)])/w[fencode(p,ix-1,iy,rho)])*w[fencode(p,ix-1,iy,b1)],0,0,p,0); ddcx=evalgrad(wd[fencode(p,ix+1,iy,bdotv)]*w[fencode(p,ix+1,iy,b1)],wd[fencode(p,ix-1,iy,bdotv)]*w[fencode(p,ix-1,iy,b1)],0,0,p,1); // dpi=(w[fencode(p,ix,iy+1,b1)]*w[fencode(p,ix,iy+1,mom1)]+w[fencode(p,ix,iy+1,b2)]*w[fencode(p,ix,iy+1,mom2)]+w[fencode(p,ix,iy+1,b3)]*w[fencode(p,ix,iy+1,mom3)])/w[fencode(p,ix,iy+1,rho)]; // dpim1=(w[fencode(p,ix,iy-1,b1)]*w[fencode(p,ix,iy-1,mom1)]+w[fencode(p,ix,iy-1,b2)]*w[fencode(p,ix,iy-1,mom2)]+w[fencode(p,ix,iy-1,b3)]*w[fencode(p,ix,iy-1,mom3)])/w[fencode(p,ix,iy-1,rho)]; //dpip2=(w[fencode(p,ix,iy+2,b1)]*w[fencode(p,ix,iy+2,mom1)]+w[fencode(p,ix,iy+2,b2)]*w[fencode(p,ix,iy+2,mom2)]+w[fencode(p,ix,iy+2,b3)]*w[fencode(p,ix,iy+2,mom3)])/w[fencode(p,ix,iy+2,rho)]; //dpim2=(w[fencode(p,ix,iy-2,b1)]*w[fencode(p,ix,iy-2,mom1)]+w[fencode(p,ix,iy-2,b2)]*w[fencode(p,ix,iy-2,mom2)]+w[fencode(p,ix,iy-2,b3)]*w[fencode(p,ix,iy-2,mom3)])/w[fencode(p,ix,iy-2,rho)]; // fi=dpi*w[fencode(p,ix,iy+1,b2)]; // fim1=dpim1*w[fencode(p,ix,iy-1,b2)]; //fip2=dpip2*w[fencode(p,ix,iy+2,b2)]; //fim2=dpim2*w[fencode(p,ix,iy-2,b2)]; //fi=w[fencode(p,ix,iy+1,b2)]; // fim1=w[fencode(p,ix,iy-1,b2)]; ddcy=evalgrad(wd[fencode(p,ix,iy+1,bdotv)]*w[fencode(p,ix,iy+1,b2)],wd[fencode(p,ix,iy-1,bdotv)]*w[fencode(p,ix,iy-1,b2)],0,0,p,1); //ddcx=0; //ddcy=evalgrad(((w[fencode(p,ix,iy+1,b1)]*w[fencode(p,ix,iy+1,mom1)]+w[fencode(p,ix,iy+1,b2)]*w[fencode(p,ix,iy+1,mom2)]+w[fencode(p,ix,iy+1,b3)]*w[fencode(p,ix,iy+1,mom3)])/w[fencode(p,ix,iy+1,rho)])*w[fencode(p,ix,iy+1,b2)],((w[fencode(p,ix,iy-1,b1)]*w[fencode(p,ix,iy-1,mom1)]+w[fencode(p,ix,iy-1,b2)]*w[fencode(p,ix,iy-1,mom2)]+w[fencode(p,ix,iy-1,b3)]*w[fencode(p,ix,iy-1,mom3)])/w[fencode(p,ix,iy-1,rho)])*w[fencode(p,ix,iy-1,b2)],0,0,p,1); dd2=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); ddcx=wd[fencode(p,ix,iy,pressuret)]*grad(w,p,ix,iy,mom1,0)/w[fencode(p,ix,iy,rho)]; ddcy=wd[fencode(p,ix,iy,pressuret)]*grad(w,p,ix,iy,mom2,1)/w[fencode(p,ix,iy,rho)]; dd3=(isnan(ddcx)?0:ddcx)+(isnan(ddcy)?0:ddcy); return(dd1+dd2+dd3); //return dd1; // return ( ddc); } __device__ __host__ int derivrho (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { int status=0; int field=rho; dw[fencode(p,ix,iy,field)]=sourcerho(dw,wd,w,p,ix,iy)-ddotcurrentrho(dw,wd,w,p,ix,iy); //dw[fencode(p,ix,iy,field)]=w[fencode(p,ix,iy,field)]+10; return ( status); } __device__ __host__ int derivmom (float *dw, float *wd, float *w, struct params *p,int ix, int iy,int field, int direction) { int status=0; //dw[fencode(p,ix,iy,field)]=w[fencode(p,ix,iy,field)]+20+5*(2*direction+1); dw[fencode(p,ix,iy,field)]=sourcemom(dw,wd,w,p,ix,iy,field,direction)-ddotcurrentmom(dw,wd,w,p,ix,iy,field,direction); //dw[fencode(p,ix,iy,field)]=-ddotcurrentmom(dw,wd,w,p,ix,iy,field,direction); return ( status); } __device__ __host__ int derivb (float *dw, float *wd, float *w, struct params *p,int ix, int iy, int field, int direction) { int status=0; dw[fencode(p,ix,iy,field)]=sourceb(dw,wd,w,p,ix,iy,field,direction)-ddotcurrentb(dw,wd,w,p,ix,iy,field,direction); return ( status); } __device__ __host__ int derivenergy (float *dw, float *wd, float *w, struct params *p,int ix, int iy) { int status=0; int field=energy; dw[fencode(p,ix,iy,field)]=sourceenergy(dw,wd,w,p,ix,iy)-ddotcurrentenergy(dw,wd,w,p,ix,iy); return ( status); } //rho, mom1, mom2, mom3, energy, b1, b2, b3 __device__ __host__ void deriv (float *dw, float *wd, float *w, struct params *p,int ix, int iy, int field) { //int status=0; switch(field) { case rho: derivrho(dw,wd,w,p,ix,iy); break; case mom1: derivmom(dw,wd,w,p,ix,iy,field,0); break; case mom2: derivmom(dw,wd,w,p,ix,iy,field,1); break; case mom3: derivmom(dw,wd,w,p,ix,iy,field,2); break; case energy: derivenergy(dw,wd,w,p,ix,iy); break; case b1: derivb(dw,wd,w,p,ix,iy,field,0); break; case b2: derivb(dw,wd,w,p,ix,iy,field,1); break; case b3: derivb(dw,wd,w,p,ix,iy,field,2); break; } //return ( status); } __global__ void derivcurrent_parallel(struct params *p, float *w, float *wnew, float *wmod, float *dwn1, float *wd) { // compute the global index in the vector from // the number of the current block, blockIdx, // the number of threads per block, blockDim, // and the number of the current thread within the block, threadIdx //int i = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.y * blockDim.y + threadIdx.y; int iindex = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int index,k; int ni=p->ni; int nj=p->nj; float dt=p->dt; float dy=p->dy; float dx=p->dx; float g=p->g; // dt=1.0; //dt=0.05; //enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3; j=iindex/ni; //i=iindex-j*(iindex/ni); i=iindex-(j*ni); if(i>1 && j >1 && i<((p->ni)-2) && j<((p->nj)-2)) { for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); computebdotv(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) { deriv(dwn1,wd,wmod,p,i,j,f); //dwn1[fencode(p,i,j,f)]=1.0; __syncthreads(); } /*for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+0.5*dt*dwn1[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn2,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+0.5*dt*dwn2[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn3,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+dt*dwn3[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn4,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) { wnew[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+(dt/6.0)*( dwn1[fencode(p,i,j,f)]+2.0*dwn2[fencode(p,i,j,f)] +2.0*dwn3[fencode(p,i,j,f)]+dwn4[fencode(p,i,j,f)]); }*/ __syncthreads(); /* for(int f=rho; f<=b3; f++) wnew[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+dt*dwn1[fencode(p,i,j,f)]; computej(wnew,wd,p,i,j); computepk(wnew,wd,p,i,j); computept(wnew,wd,p,i,j);*/ } __syncthreads(); } __global__ void derivsource_parallel(struct params *p, float *w, float *wnew, float *wmod, float *dwn1, float *wd) { // compute the global index in the vector from // the number of the current block, blockIdx, // the number of threads per block, blockDim, // and the number of the current thread within the block, threadIdx //int i = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.y * blockDim.y + threadIdx.y; int iindex = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int index,k; int ni=p->ni; int nj=p->nj; float dt=p->dt; float dy=p->dy; float dx=p->dx; float g=p->g; // dt=1.0; //dt=0.05; //enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3; j=iindex/ni; //i=iindex-j*(iindex/ni); i=iindex-(j*ni); if(i>1 && j >1 && i<((p->ni)-2) && j<((p->nj)-2)) { for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); computebdotv(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) { deriv(dwn1,wd,wmod,p,i,j,f); //dwn1[fencode(p,i,j,f)]=1.0; __syncthreads(); } /*for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+0.5*dt*dwn1[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn2,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+0.5*dt*dwn2[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn3,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) wmod[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+dt*dwn3[fencode(p,i,j,f)]; computej(wmod,wd,p,i,j); computepk(wmod,wd,p,i,j); computept(wmod,wd,p,i,j); for(int f=rho; f<=b3; f++) deriv(dwn4,wd,wmod,p,i,j,f); for(int f=rho; f<=b3; f++) { wnew[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+(dt/6.0)*( dwn1[fencode(p,i,j,f)]+2.0*dwn2[fencode(p,i,j,f)] +2.0*dwn3[fencode(p,i,j,f)]+dwn4[fencode(p,i,j,f)]); }*/ __syncthreads(); /* for(int f=rho; f<=b3; f++) wnew[fencode(p,i,j,f)]=w[fencode(p,i,j,f)]+dt*dwn1[fencode(p,i,j,f)]; computej(wnew,wd,p,i,j); computepk(wnew,wd,p,i,j); computept(wnew,wd,p,i,j);*/ } __syncthreads(); } ///////////////////////////////////// // error checking routine ///////////////////////////////////// void checkErrors(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed cudaError_t err; err = cudaThreadSynchronize(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = cudaGetLastError(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } int cuderivcurrent(struct params **p, float **w, float **wnew, struct params **d_p, float **d_w, float **d_wnew, float **d_wmod, float **d_dwn1, float **d_wd) { //printf("calling propagate solution\n"); //dim3 dimBlock(blocksize, blocksize); //dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); dim3 dimBlock(dimblock, 1); //dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); int numBlocks = (((*p)->ni)*((*p)->nj)+numThreadsPerBlock-1) / numThreadsPerBlock; //__global__ void prop_parallel(struct params *p, float *b, float *w, float *wnew, float *wmod, // float *dwn1, float *dwn2, float *dwn3, float *dwn4, float *wd) //init_parallel(struct params *p, float *b, float *u, float *v, float *h) derivcurrent_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd); //prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h); //printf("called prop\n"); cudaThreadSynchronize(); //boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called boundary\n"); //cudaThreadSynchronize(); //update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called update\n"); // cudaThreadSynchronize(); // cudaMemcpy(*w, *d_w, 8*((*p)->ni)* ((*p)->nj)*sizeof(float), cudaMemcpyDeviceToHost); //cudaMemcpy(*wnew, *d_wnew, 8*((*p)->ni)* ((*p)->nj)*sizeof(float), cudaMemcpyDeviceToHost); //cudaMemcpy(*b, *d_b, (((*p)->ni)* ((*p)->nj))*sizeof(float), cudaMemcpyDeviceToHost); //checkErrors("copy data from device"); } int cuderivsource(struct params **p, float **w, float **wnew,struct params **d_p, float **d_w, float **d_wnew, float **d_wmod, float **d_dwn1, float **d_wd) { //printf("calling propagate solution\n"); //dim3 dimBlock(blocksize, blocksize); //dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); dim3 dimBlock(dimblock, 1); //dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); dim3 dimGrid(((*p)->ni)/dimBlock.x,((*p)->nj)/dimBlock.y); int numBlocks = (((*p)->ni)*((*p)->nj)+numThreadsPerBlock-1) / numThreadsPerBlock; //__global__ void prop_parallel(struct params *p, float *b, float *w, float *wnew, float *wmod, // float *dwn1, float *dwn2, float *dwn3, float *dwn4, float *wd) //init_parallel(struct params *p, float *b, float *u, float *v, float *h) derivsource_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd); //prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h); //printf("called prop\n"); cudaThreadSynchronize(); //boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called boundary\n"); //cudaThreadSynchronize(); //update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called update\n"); // cudaThreadSynchronize(); // cudaMemcpy(*w, *d_w, 8*((*p)->ni)* ((*p)->nj)*sizeof(float), cudaMemcpyDeviceToHost); //cudaMemcpy(*wnew, *d_wnew, 8*((*p)->ni)* ((*p)->nj)*sizeof(float), cudaMemcpyDeviceToHost); //cudaMemcpy(*b, *d_b, (((*p)->ni)* ((*p)->nj))*sizeof(float), cudaMemcpyDeviceToHost); //checkErrors("copy data from device"); }
024cffe89452f220df19e3947afd05ab9f5d193c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hipfft.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include "gettime.h" #define CUDA_CALL(x) do { if((x) != hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ exit(-1);}hipDeviceSynchronize();} while(0) #define PI 3.1415926535897932384626433832795 my_time t; int inline n_blocks(int dim, int block_size) { return dim / block_size + ((dim % block_size == 0)? 0 : 1); } template<typename Real> __global__ static void _my_addvat2(Real *AP, Real *x, int dim, Real alpha) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j && j < dim) { AP[j * (j + 1) / 2 + i] = x[j] * x[i] * alpha; __syncthreads(); } } template<typename Real> __global__ static void _scale(Real *A, int dim, Real alpha) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) { A[i] = A[i] * alpha; } } template<typename Real> __global__ static void _my_addvat3(Real *AP, int numA, Real *x, int cols, Real alpha) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.z * blockDim.z + threadIdx.z; int stride = cols * (cols + 1) / 2; if (i < cols && j < cols && k < numA) { if(i <= j) { AP[k * stride + j * (j + 1) / 2 + i] = x[k * cols + j] * x[k * cols + i] * alpha; } Real scal = 0.1; hipLaunchKernelGGL(( _scale), dim3(stride / 256 + 1), dim3(256), 0, 0, AP, stride, scal); __syncthreads(); } } template<typename Real> __host__ void scale(Real *A, int dim, Real alpha) { Real *devA; CUDA_CALL(hipMalloc((void **)&devA, dim * sizeof(Real))); CUDA_CALL(hipMemcpy(devA, A, dim * sizeof(Real), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( _scale), dim3(n_blocks(dim, 256)), dim3(256), 0, 0, devA, dim, float(0.5)); CUDA_CALL(hipMemcpy(A, devA, dim * sizeof(Real), hipMemcpyDeviceToHost)); CUDA_CALL(hipFree(devA)); } template<typename Real> __host__ static void addvat2(Real *A, int dimA, Real *x, int dimx, Real alpha) { Real *devA; Real *devx; CUDA_CALL(hipMalloc((void **)&devA, dimA * sizeof(Real))); CUDA_CALL(hipMalloc((void **)&devx, dimx * sizeof(Real))); CUDA_CALL(hipMemcpy(devA, A, dimA * sizeof(Real), hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(devx, x, dimx * sizeof(Real), hipMemcpyHostToDevice)); t.start(); hipLaunchKernelGGL(( _my_addvat2), dim3(dim3(n_blocks(dimx * dimx, 16))), dim3(dim3(16, 16)), 0, 0, devA, devx, dimx, alpha); t.end(); printf("Compute addvat2 time: %lldms\n", t.used_time()); CUDA_CALL(hipMemcpy(A, devA, dimA * sizeof(Real), hipMemcpyDeviceToHost)); CUDA_CALL(hipFree(devA)); CUDA_CALL(hipFree(devx)); } template<typename Real> __host__ static void addvat3(Real *A,int numA, int dimA, Real *x, int rowsx, int colsx, Real alpha) { Real *devA; Real *devx; CUDA_CALL(hipMalloc((void **)&devA, numA * dimA * sizeof(Real))); CUDA_CALL(hipMalloc((void **)&devx, rowsx * colsx * sizeof(Real))); CUDA_CALL(hipMemcpy(devA, A, numA * dimA * sizeof(Real), hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(devx, x, rowsx * colsx * sizeof(Real), hipMemcpyHostToDevice)); t.start(); hipLaunchKernelGGL(( _my_addvat3), dim3(dim3(n_blocks(numA * dimA, 512))), dim3(dim3(8, 8, 8)), 0, 0, devA, numA, devx, colsx, alpha); t.end(); printf("Compute addvat3 time: %lldms\n", t.used_time()); CUDA_CALL(hipMemcpy(A, devA, numA * dimA * sizeof(Real), hipMemcpyDeviceToHost)); CUDA_CALL(hipFree(devA)); CUDA_CALL(hipFree(devx)); } int main() { t.start(); CUDA_CALL(hipSetDevice(4)); t.end(); printf("Init Gpu time: %lldms\n", t.used_time()); // compute addvat3 int NA = 5; int NUMA = 5; float *A; int lensq = NA * (NA + 1) / 2; A = (float *)malloc(NUMA * lensq * sizeof(float)); for (int i = 0; i < NUMA * lensq;i++) A[i] = 0; float *x; x = (float *)malloc(NA * NUMA * sizeof(float)); for (int i = 0; i < NUMA; i++) for (int j = 0; j < NA; j++) x[i * NA + j] = float(j + 1); addvat3(A, NUMA, lensq, x, NUMA, NA, float(1.0)); scale(A, NUMA * lensq, float(0.5)); for (int i = 0; i < NUMA; i++) { std::cout << i << " line:"; for (int j = 0; j < lensq; j++) std::cout << " " << A[i * lensq + j]; std::cout << std::endl; } //compute addvat2 float *B; int N = 2600; int lensqB = N * (N + 1) / 2; B = (float *)malloc(lensqB * sizeof(float)); for (int i = 0; i < lensqB;i++) B[i] = 0; float *y; y = (float *)malloc(N * sizeof(float)); for (int i = 0; i < N; i++) y[i] = float(i + 1); addvat2(B, lensqB, y, N, float(1.0)); /* std::cout << "data:"; */ /* for (int j = 0; j < lensq; j++) */ /* std::cout << " " << B[j]; */ /* std::cout << std::endl; */ return 0; }
024cffe89452f220df19e3947afd05ab9f5d193c.cu
#include <cuda_runtime.h> #include <cufft.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include "gettime.h" #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ exit(-1);}cudaThreadSynchronize();} while(0) #define PI 3.1415926535897932384626433832795 my_time t; int inline n_blocks(int dim, int block_size) { return dim / block_size + ((dim % block_size == 0)? 0 : 1); } template<typename Real> __global__ static void _my_addvat2(Real *AP, Real *x, int dim, Real alpha) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j && j < dim) { AP[j * (j + 1) / 2 + i] = x[j] * x[i] * alpha; __syncthreads(); } } template<typename Real> __global__ static void _scale(Real *A, int dim, Real alpha) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) { A[i] = A[i] * alpha; } } template<typename Real> __global__ static void _my_addvat3(Real *AP, int numA, Real *x, int cols, Real alpha) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.z * blockDim.z + threadIdx.z; int stride = cols * (cols + 1) / 2; if (i < cols && j < cols && k < numA) { if(i <= j) { AP[k * stride + j * (j + 1) / 2 + i] = x[k * cols + j] * x[k * cols + i] * alpha; } Real scal = 0.1; _scale<<<stride / 256 + 1, 256>>>(AP, stride, scal); __syncthreads(); } } template<typename Real> __host__ void scale(Real *A, int dim, Real alpha) { Real *devA; CUDA_CALL(cudaMalloc((void **)&devA, dim * sizeof(Real))); CUDA_CALL(cudaMemcpy(devA, A, dim * sizeof(Real), cudaMemcpyHostToDevice)); _scale<<<n_blocks(dim, 256), 256>>>(devA, dim, float(0.5)); CUDA_CALL(cudaMemcpy(A, devA, dim * sizeof(Real), cudaMemcpyDeviceToHost)); CUDA_CALL(cudaFree(devA)); } template<typename Real> __host__ static void addvat2(Real *A, int dimA, Real *x, int dimx, Real alpha) { Real *devA; Real *devx; CUDA_CALL(cudaMalloc((void **)&devA, dimA * sizeof(Real))); CUDA_CALL(cudaMalloc((void **)&devx, dimx * sizeof(Real))); CUDA_CALL(cudaMemcpy(devA, A, dimA * sizeof(Real), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(devx, x, dimx * sizeof(Real), cudaMemcpyHostToDevice)); t.start(); _my_addvat2<<<dim3(n_blocks(dimx * dimx, 16)), dim3(16, 16)>>>(devA, devx, dimx, alpha); t.end(); printf("Compute addvat2 time: %lldms\n", t.used_time()); CUDA_CALL(cudaMemcpy(A, devA, dimA * sizeof(Real), cudaMemcpyDeviceToHost)); CUDA_CALL(cudaFree(devA)); CUDA_CALL(cudaFree(devx)); } template<typename Real> __host__ static void addvat3(Real *A,int numA, int dimA, Real *x, int rowsx, int colsx, Real alpha) { Real *devA; Real *devx; CUDA_CALL(cudaMalloc((void **)&devA, numA * dimA * sizeof(Real))); CUDA_CALL(cudaMalloc((void **)&devx, rowsx * colsx * sizeof(Real))); CUDA_CALL(cudaMemcpy(devA, A, numA * dimA * sizeof(Real), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(devx, x, rowsx * colsx * sizeof(Real), cudaMemcpyHostToDevice)); t.start(); _my_addvat3<<<dim3(n_blocks(numA * dimA, 512)), dim3(8, 8, 8)>>>(devA, numA, devx, colsx, alpha); t.end(); printf("Compute addvat3 time: %lldms\n", t.used_time()); CUDA_CALL(cudaMemcpy(A, devA, numA * dimA * sizeof(Real), cudaMemcpyDeviceToHost)); CUDA_CALL(cudaFree(devA)); CUDA_CALL(cudaFree(devx)); } int main() { t.start(); CUDA_CALL(cudaSetDevice(4)); t.end(); printf("Init Gpu time: %lldms\n", t.used_time()); // compute addvat3 int NA = 5; int NUMA = 5; float *A; int lensq = NA * (NA + 1) / 2; A = (float *)malloc(NUMA * lensq * sizeof(float)); for (int i = 0; i < NUMA * lensq;i++) A[i] = 0; float *x; x = (float *)malloc(NA * NUMA * sizeof(float)); for (int i = 0; i < NUMA; i++) for (int j = 0; j < NA; j++) x[i * NA + j] = float(j + 1); addvat3(A, NUMA, lensq, x, NUMA, NA, float(1.0)); scale(A, NUMA * lensq, float(0.5)); for (int i = 0; i < NUMA; i++) { std::cout << i << " line:"; for (int j = 0; j < lensq; j++) std::cout << " " << A[i * lensq + j]; std::cout << std::endl; } //compute addvat2 float *B; int N = 2600; int lensqB = N * (N + 1) / 2; B = (float *)malloc(lensqB * sizeof(float)); for (int i = 0; i < lensqB;i++) B[i] = 0; float *y; y = (float *)malloc(N * sizeof(float)); for (int i = 0; i < N; i++) y[i] = float(i + 1); addvat2(B, lensqB, y, N, float(1.0)); /* std::cout << "data:"; */ /* for (int j = 0; j < lensq; j++) */ /* std::cout << " " << B[j]; */ /* std::cout << std::endl; */ return 0; }
711899b8611923d0a7a64bccf442f5148d3c8432.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "print_gpu.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( print_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( print_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( print_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
711899b8611923d0a7a64bccf442f5148d3c8432.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "print_gpu.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); print_gpu<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { print_gpu<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { print_gpu<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d6b6216f72aceae2ff56cb2e34183ad6c1678858.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2015 by Contributors * \file count_sketch.cu * \brief count_sketch op * \author Chen Zhu, Yang Shi */ #include "./count_sketch-inl.h" #include <mshadow/tensor.h> #include <stdio.h> #include <algorithm> #define WARPS_PER_BLOCK 1 #define THREADS_PER_BLOCK 512 #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { // wrappers to deal with atomic add // supporting only single precision __device__ void atomic_add(float* dst, float val) { atomicAdd(dst, val); } // for double precision __device__ void atomic_add(double* address, double val) { // code example in the official document at: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // #atomic-functions // NOLINT_NEXT_LINE(runtime/int) unsigned long long int* address_as_ull = (unsigned long long int*) address; // NOLINT(*) unsigned long long int old = *address_as_ull, assumed; // NOLINT(*) do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN // (since NaN != NaN) } while (assumed != old); } template <typename DType> __global__ void sketch_forward_kernel(const int nthreads, DType *out, const DType *h, const DType *s, const DType *in, const int n_smaples, const int in_dim, const int out_dim) { // input: n_smaples * in_dim // output: n_smaples * out_dim const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= nthreads) { return; } // nthreads is the maximum of thread indices, should be equal to in_dim // index is point index const int i_indim = index % in_dim; const int i_sample = index / in_dim; // get the target location in the output const int target = i_sample*out_dim + h[i_indim]; atomic_add(out + target, s[i_indim] * in[index]); } template <typename DType> __global__ void sketch_backward_kernel(const int nthreads, DType *in_grad, const DType *h, const DType *s, const DType *out_grad, const int n_smaples, const int in_dim, const int out_dim) { // only calculate gradient regarding x // can also calculate gradient regarding s if needed const int index = blockIdx.x * blockDim.x + threadIdx.x; const int i_indim = index % in_dim; const int i_sample = index / in_dim; const int i_outdim = i_sample*out_dim + h[i_indim]; in_grad[index] = out_grad[i_outdim] * s[i_indim]; } } // namespace cuda // CountSketch Forward template <typename DType> inline void CountSketchForward(const Tensor<gpu, 2, DType> &out, const Tensor<gpu, 2, DType> &in, const Tensor<gpu, 1, DType> &h, const Tensor<gpu, 1, DType> &s, const int n_samples, const int processing_batch_size, const int in_dim, const int out_dim) { DType *out_ptr = out.dptr_; const DType *in_ptr = in.dptr_; const DType *h_ptr = h.dptr_; const DType *s_ptr = s.dptr_; int upper_bound = n_samples/processing_batch_size; if (n_samples%processing_batch_size == 0) { upper_bound = upper_bound-1; } // guarantee there are at least one iteration upper_bound = upper_bound > 0? upper_bound:0; int bstart = 0; for ( int i = 0; i <= upper_bound; i++ ) { const int batchlen = min(processing_batch_size, n_samples - bstart); const int nthreads = batchlen * in_dim; // to make number of threads the same as input const int threads_per_block = min(THREADS_PER_BLOCK, nthreads); int nblocks = (nthreads + threads_per_block - 1) / threads_per_block; hipLaunchKernelGGL(( cuda::sketch_forward_kernel<DType>), dim3(nblocks), dim3(threads_per_block), 0, 0, nthreads, out_ptr+bstart*out_dim, h_ptr, s_ptr, in_ptr+bstart*in_dim, batchlen, in_dim, out_dim); // hipDeviceSynchronize(); bstart = (i+1)*batchlen; } } template<typename DType> inline void CountSketchBackward(const Tensor<gpu, 2, DType> &in_grad, const Tensor<gpu, 2, DType> &out_grad, const Tensor<gpu, 1, DType> &h, const Tensor<gpu, 1, DType> &s, const int n_samples, const int processing_batch_size, const int in_dim, const int out_dim) { DType *in_grad_ptr = in_grad.dptr_; const DType *out_grad_ptr = out_grad.dptr_; const DType *h_ptr = h.dptr_; const DType *s_ptr = s.dptr_; int upper_bound = n_samples/processing_batch_size; if (n_samples%processing_batch_size == 0) { upper_bound = upper_bound-1; } // guarantee there are at least one iteration upper_bound = upper_bound > 0? upper_bound:0; int bstart = 0; for ( int i = 0; i <= upper_bound; i++ ) { const int batchlen = min(processing_batch_size, n_samples - bstart); const int nthreads = batchlen * in_dim; // to make number of threads the same as input const int threads_per_block = min(THREADS_PER_BLOCK, nthreads); int nblocks = (nthreads + threads_per_block - 1) / threads_per_block; hipLaunchKernelGGL(( cuda::sketch_backward_kernel<DType>), dim3(nblocks), dim3(threads_per_block), 0, 0, nthreads, in_grad_ptr+bstart*in_dim, h_ptr, s_ptr, out_grad_ptr+bstart*out_dim, batchlen, in_dim, out_dim); bstart = (i+1)*batchlen; } } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(CountSketchParam param, int dtype) { Operator *op = NULL; switch (dtype) { case mshadow::kFloat32: op = new CountSketchOp<gpu, float>(param); break; case mshadow::kFloat64: op = new CountSketchOp<gpu, double>(param); break; case mshadow::kFloat16: LOG(FATAL) << "float16 count sketch layer is currently" "not supported."; break; default: LOG(FATAL) << "Unsupported type " << dtype; } return op; } } // namespace op } // namespace mxnet
d6b6216f72aceae2ff56cb2e34183ad6c1678858.cu
/*! * Copyright (c) 2015 by Contributors * \file count_sketch.cu * \brief count_sketch op * \author Chen Zhu, Yang Shi */ #include "./count_sketch-inl.h" #include <mshadow/tensor.h> #include <stdio.h> #include <algorithm> #define WARPS_PER_BLOCK 1 #define THREADS_PER_BLOCK 512 #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { // wrappers to deal with atomic add // supporting only single precision __device__ void atomic_add(float* dst, float val) { atomicAdd(dst, val); } // for double precision __device__ void atomic_add(double* address, double val) { // code example in the official document at: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // #atomic-functions // NOLINT_NEXT_LINE(runtime/int) unsigned long long int* address_as_ull = (unsigned long long int*) address; // NOLINT(*) unsigned long long int old = *address_as_ull, assumed; // NOLINT(*) do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN // (since NaN != NaN) } while (assumed != old); } template <typename DType> __global__ void sketch_forward_kernel(const int nthreads, DType *out, const DType *h, const DType *s, const DType *in, const int n_smaples, const int in_dim, const int out_dim) { // input: n_smaples * in_dim // output: n_smaples * out_dim const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= nthreads) { return; } // nthreads is the maximum of thread indices, should be equal to in_dim // index is point index const int i_indim = index % in_dim; const int i_sample = index / in_dim; // get the target location in the output const int target = i_sample*out_dim + h[i_indim]; atomic_add(out + target, s[i_indim] * in[index]); } template <typename DType> __global__ void sketch_backward_kernel(const int nthreads, DType *in_grad, const DType *h, const DType *s, const DType *out_grad, const int n_smaples, const int in_dim, const int out_dim) { // only calculate gradient regarding x // can also calculate gradient regarding s if needed const int index = blockIdx.x * blockDim.x + threadIdx.x; const int i_indim = index % in_dim; const int i_sample = index / in_dim; const int i_outdim = i_sample*out_dim + h[i_indim]; in_grad[index] = out_grad[i_outdim] * s[i_indim]; } } // namespace cuda // CountSketch Forward template <typename DType> inline void CountSketchForward(const Tensor<gpu, 2, DType> &out, const Tensor<gpu, 2, DType> &in, const Tensor<gpu, 1, DType> &h, const Tensor<gpu, 1, DType> &s, const int n_samples, const int processing_batch_size, const int in_dim, const int out_dim) { DType *out_ptr = out.dptr_; const DType *in_ptr = in.dptr_; const DType *h_ptr = h.dptr_; const DType *s_ptr = s.dptr_; int upper_bound = n_samples/processing_batch_size; if (n_samples%processing_batch_size == 0) { upper_bound = upper_bound-1; } // guarantee there are at least one iteration upper_bound = upper_bound > 0? upper_bound:0; int bstart = 0; for ( int i = 0; i <= upper_bound; i++ ) { const int batchlen = min(processing_batch_size, n_samples - bstart); const int nthreads = batchlen * in_dim; // to make number of threads the same as input const int threads_per_block = min(THREADS_PER_BLOCK, nthreads); int nblocks = (nthreads + threads_per_block - 1) / threads_per_block; cuda::sketch_forward_kernel<DType><<<nblocks, threads_per_block>>>( nthreads, out_ptr+bstart*out_dim, h_ptr, s_ptr, in_ptr+bstart*in_dim, batchlen, in_dim, out_dim); // cudaThreadSynchronize(); bstart = (i+1)*batchlen; } } template<typename DType> inline void CountSketchBackward(const Tensor<gpu, 2, DType> &in_grad, const Tensor<gpu, 2, DType> &out_grad, const Tensor<gpu, 1, DType> &h, const Tensor<gpu, 1, DType> &s, const int n_samples, const int processing_batch_size, const int in_dim, const int out_dim) { DType *in_grad_ptr = in_grad.dptr_; const DType *out_grad_ptr = out_grad.dptr_; const DType *h_ptr = h.dptr_; const DType *s_ptr = s.dptr_; int upper_bound = n_samples/processing_batch_size; if (n_samples%processing_batch_size == 0) { upper_bound = upper_bound-1; } // guarantee there are at least one iteration upper_bound = upper_bound > 0? upper_bound:0; int bstart = 0; for ( int i = 0; i <= upper_bound; i++ ) { const int batchlen = min(processing_batch_size, n_samples - bstart); const int nthreads = batchlen * in_dim; // to make number of threads the same as input const int threads_per_block = min(THREADS_PER_BLOCK, nthreads); int nblocks = (nthreads + threads_per_block - 1) / threads_per_block; cuda::sketch_backward_kernel<DType><<<nblocks, threads_per_block>>>( nthreads, in_grad_ptr+bstart*in_dim, h_ptr, s_ptr, out_grad_ptr+bstart*out_dim, batchlen, in_dim, out_dim); bstart = (i+1)*batchlen; } } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(CountSketchParam param, int dtype) { Operator *op = NULL; switch (dtype) { case mshadow::kFloat32: op = new CountSketchOp<gpu, float>(param); break; case mshadow::kFloat64: op = new CountSketchOp<gpu, double>(param); break; case mshadow::kFloat16: LOG(FATAL) << "float16 count sketch layer is currently" "not supported."; break; default: LOG(FATAL) << "Unsupported type " << dtype; } return op; } } // namespace op } // namespace mxnet
ac6dd111d905b14a7384120c73764839dc2c143a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=64 --blockDim=256 --warp-sync=32 template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n); template __global__ void reduce6<int,256,false>(int *g_idata, int *g_odata, unsigned int n); #include "common.h" template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T *smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
ac6dd111d905b14a7384120c73764839dc2c143a.cu
//pass //--gridDim=64 --blockDim=256 --warp-sync=32 template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n); template __global__ void reduce6<int,256,false>(int *g_idata, int *g_odata, unsigned int n); #include "common.h" template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T *smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
d93663d34eb49a5168b2765535da0d857c659978.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * JCuda - Java bindings for NVIDIA CUDA driver and runtime API * http://www.jcuda.org * * * This code is based on the NVIDIA 'reduction' CUDA sample, * Copyright 1993-2010 NVIDIA Corporation. */ extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" __global__ void backwardError(int n, double *actual, double *target, double* out) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { out[i] += (actual[i] - target[i]); } }
d93663d34eb49a5168b2765535da0d857c659978.cu
#include "includes.h" /* * JCuda - Java bindings for NVIDIA CUDA driver and runtime API * http://www.jcuda.org * * * This code is based on the NVIDIA 'reduction' CUDA sample, * Copyright 1993-2010 NVIDIA Corporation. */ extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" __global__ void backwardError(int n, double *actual, double *target, double* out) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { out[i] += (actual[i] - target[i]); } }
9c63c6f5be4197433e97a19b22f8fe56e9359224.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <iostream> #include <cstdio> #include <fstream> #include "utils.h" #include "timer.h" #include <cstdio> #if defined(_WIN16) || defined(_WIN32) || defined(_WIN64) #include <Windows.h> #else #include <sys/time.h> #endif #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/normal_distribution.h> #include <thrust/random/uniform_int_distribution.h> #include "reference_calc.h" void computeHistogram(const unsigned int *const d_vals, unsigned int* const d_histo, const unsigned int numBins, const unsigned int numElems); int main(void) { const unsigned int numBins = 1024; const unsigned int numElems = 10000 * numBins; const float stddev = 100.f; unsigned int *vals = new unsigned int[numElems]; unsigned int *h_vals = new unsigned int[numElems]; unsigned int *h_studentHisto = new unsigned int[numBins]; unsigned int *h_refHisto = new unsigned int[numBins]; #if defined(_WIN16) || defined(_WIN32) || defined(_WIN64) srand(GetTickCount()); #else timeval tv; gettimeofday(&tv, NULL); srand(tv.tv_usec); #endif //make the mean unpredictable, but close enough to the middle //so that timings are unaffected unsigned int mean = rand() % 100 + 462; //Output mean so that grading can happen with the same inputs std::cout << mean << std::endl; thrust::minstd_rand rng; // random::experimental went away in thrust 1.7.0 #if (THRUST_MAJOR_VERSION <= 1) && (THRUST_MINOR_VERSION < 7) thrust::random::experimental::normal_distribution<float> normalDist((float)mean, stddev); #else thrust::random::normal_distribution<float> normalDist((float)mean, stddev); #endif // Generate the random values for (size_t i = 0; i < numElems; ++i) { vals[i] = ::min((unsigned int) ::max((int)normalDist(rng), 0), numBins - 1); } unsigned int *d_vals, *d_histo; GpuTimer timer; checkCudaErrors(hipMalloc(&d_vals, sizeof(unsigned int) * numElems)); checkCudaErrors(hipMalloc(&d_histo, sizeof(unsigned int) * numBins)); checkCudaErrors(hipMemset(d_histo, 0, sizeof(unsigned int) * numBins)); checkCudaErrors(hipMemcpy(d_vals, vals, sizeof(unsigned int) * numElems, hipMemcpyHostToDevice)); timer.Start(); computeHistogram(d_vals, d_histo, numBins, numElems); timer.Stop(); int err = printf("Your code ran in: %f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } // copy the student-computed histogram back to the host checkCudaErrors(hipMemcpy(h_studentHisto, d_histo, sizeof(unsigned int) * numBins, hipMemcpyDeviceToHost)); //generate reference for the given mean reference_calculation(vals, h_refHisto, numBins, numElems); //Now do the comparison checkResultsExact(h_refHisto, h_studentHisto, numBins); delete[] h_vals; delete[] h_refHisto; delete[] h_studentHisto; hipFree(d_vals); hipFree(d_histo); return 0; }
9c63c6f5be4197433e97a19b22f8fe56e9359224.cu
#include <cstdlib> #include <iostream> #include <cstdio> #include <fstream> #include "utils.h" #include "timer.h" #include <cstdio> #if defined(_WIN16) || defined(_WIN32) || defined(_WIN64) #include <Windows.h> #else #include <sys/time.h> #endif #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/normal_distribution.h> #include <thrust/random/uniform_int_distribution.h> #include "reference_calc.h" void computeHistogram(const unsigned int *const d_vals, unsigned int* const d_histo, const unsigned int numBins, const unsigned int numElems); int main(void) { const unsigned int numBins = 1024; const unsigned int numElems = 10000 * numBins; const float stddev = 100.f; unsigned int *vals = new unsigned int[numElems]; unsigned int *h_vals = new unsigned int[numElems]; unsigned int *h_studentHisto = new unsigned int[numBins]; unsigned int *h_refHisto = new unsigned int[numBins]; #if defined(_WIN16) || defined(_WIN32) || defined(_WIN64) srand(GetTickCount()); #else timeval tv; gettimeofday(&tv, NULL); srand(tv.tv_usec); #endif //make the mean unpredictable, but close enough to the middle //so that timings are unaffected unsigned int mean = rand() % 100 + 462; //Output mean so that grading can happen with the same inputs std::cout << mean << std::endl; thrust::minstd_rand rng; // random::experimental went away in thrust 1.7.0 #if (THRUST_MAJOR_VERSION <= 1) && (THRUST_MINOR_VERSION < 7) thrust::random::experimental::normal_distribution<float> normalDist((float)mean, stddev); #else thrust::random::normal_distribution<float> normalDist((float)mean, stddev); #endif // Generate the random values for (size_t i = 0; i < numElems; ++i) { vals[i] = std::min((unsigned int) std::max((int)normalDist(rng), 0), numBins - 1); } unsigned int *d_vals, *d_histo; GpuTimer timer; checkCudaErrors(cudaMalloc(&d_vals, sizeof(unsigned int) * numElems)); checkCudaErrors(cudaMalloc(&d_histo, sizeof(unsigned int) * numBins)); checkCudaErrors(cudaMemset(d_histo, 0, sizeof(unsigned int) * numBins)); checkCudaErrors(cudaMemcpy(d_vals, vals, sizeof(unsigned int) * numElems, cudaMemcpyHostToDevice)); timer.Start(); computeHistogram(d_vals, d_histo, numBins, numElems); timer.Stop(); int err = printf("Your code ran in: %f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } // copy the student-computed histogram back to the host checkCudaErrors(cudaMemcpy(h_studentHisto, d_histo, sizeof(unsigned int) * numBins, cudaMemcpyDeviceToHost)); //generate reference for the given mean reference_calculation(vals, h_refHisto, numBins, numElems); //Now do the comparison checkResultsExact(h_refHisto, h_studentHisto, numBins); delete[] h_vals; delete[] h_refHisto; delete[] h_studentHisto; cudaFree(d_vals); cudaFree(d_histo); return 0; }
cf6a2d78bf1c4c6f20f28cb37fedf92968fffaf8.hip
// !!! This is a file automatically generated by hipify!!! // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE #define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/allocators.cu", line) #include "awkward/kernel-utils.h" void* awkward_malloc(int64_t bytelength) { if (bytelength == 0) { // std::cout << "CUDA malloc at nullptr (0 bytes)" << std::endl; return nullptr; } else { void* out = nullptr; hipError_t err = hipMallocManaged(&out, bytelength); if (err != hipError_t::hipSuccess) { // std::cout << "CUDA malloc failed (" << bytelength << " bytes)" << std::endl; return nullptr; } // std::cout << "CUDA malloc at " << out << " (" << bytelength << " bytes)" << std::endl; return out; } } void awkward_free(void const *ptr) { // std::cout << "CUDA free at " << ptr << std::endl; hipFree((void*)ptr); }
cf6a2d78bf1c4c6f20f28cb37fedf92968fffaf8.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE #define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/allocators.cu", line) #include "awkward/kernel-utils.h" void* awkward_malloc(int64_t bytelength) { if (bytelength == 0) { // std::cout << "CUDA malloc at nullptr (0 bytes)" << std::endl; return nullptr; } else { void* out = nullptr; cudaError_t err = cudaMallocManaged(&out, bytelength); if (err != cudaError::cudaSuccess) { // std::cout << "CUDA malloc failed (" << bytelength << " bytes)" << std::endl; return nullptr; } // std::cout << "CUDA malloc at " << out << " (" << bytelength << " bytes)" << std::endl; return out; } } void awkward_free(void const *ptr) { // std::cout << "CUDA free at " << ptr << std::endl; cudaFree((void*)ptr); }
d0b8ca01b002df0d93cb16e13064c9ae655534c5.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cm.h" struct cmp_functor_str { const char * source; const char *str; bool * dest; const unsigned int * len; cmp_functor_str(const char * _source, const char * _str, bool * _dest, const unsigned int * _len): source(_source), str(_str), dest(_dest), len(_len) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int length = len[0]; unsigned int start = i*length; for(unsigned int z = 0; z < length ; z++) { if(source[start+z] != str[z]) { dest[i] = 0; return; }; }; dest[i] = 1; } }; struct cmp_functor_str_like_left { const char * source; const char *str; bool * dest; const unsigned int * len; cmp_functor_str_like_left(const char * _source, const char * _str, bool * _dest, const unsigned int * _len): source(_source), str(_str), dest(_dest), len(_len) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int length = len[0]; unsigned int str_length = len[1]; unsigned int start = i*length; for(unsigned int z = 0; z < str_length ; z++) { if(source[start+z] != str[z]) { dest[i] = 0; return; }; }; dest[i] = 1; } }; struct cmp_functor_str_like_right { const char * source; const char *str; bool * dest; const unsigned int * len; cmp_functor_str_like_right(const char * _source, const char * _str, bool * _dest, const unsigned int * _len): source(_source), str(_str), dest(_dest), len(_len) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int length = len[0]; unsigned int str_length = len[1]; unsigned int start = i*length; unsigned int j = 1; while(source[(start+length)-j] == 0) j++; for(unsigned int z = 0; z < str_length ; z++) { if(source[((start+length)-j)-z] != str[str_length-z-1]) { dest[i] = 0; return; }; }; dest[i] = 1; } }; void filter(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, CudaSet* b, unsigned int segment) { stack<string> exe_type; stack<string> exe_value; stack<int_type*> exe_vectors; stack<float_type*> exe_vectors_f; stack<int_type> exe_nums; stack<bool*> bool_vectors; stack<float_type> exe_nums_f; string s1, s2, s1_val, s2_val; int_type n1, n2, res; float_type n1_f, n2_f, res_f; for(int i=0; !op_type.empty(); ++i, op_type.pop()) { string ss = op_type.front(); if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("VECTOR") == 0 || ss.compare("FLOAT") == 0 || ss.compare("STRING") == 0) { exe_type.push(ss); if (ss.compare("NUMBER") == 0) { exe_nums.push(op_nums.front()); op_nums.pop(); } else if (ss.compare("NAME") == 0 || ss.compare("STRING") == 0) { exe_value.push(op_value.front()); op_value.pop(); } if (ss.compare("FLOAT") == 0) { exe_nums_f.push(op_nums_f.front()); op_nums_f.pop(); } } else { if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) { // get 2 values from the stack s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); if (ss.compare("ADD") == 0 ) res = n1+n2; else if (ss.compare("MUL") == 0 ) res = n1*n2; else if (ss.compare("DIV") == 0 ) res = n1/n2; else res = n1-n2; thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount); thrust::sequence(p, p+(a->mRecCount),res,(int_type)0); exe_type.push("VECTOR"); exe_vectors.push(thrust::raw_pointer_cast(p)); } else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2_f = exe_nums_f.top(); exe_nums_f.pop(); if (ss.compare("ADD") == 0 ) res_f = n1_f+n2_f; else if (ss.compare("MUL") == 0 ) res_f = n1_f*n2_f; else if (ss.compare("DIV") == 0 ) res_f = n1_f/n2_f; else res_f = n1_f-n2_f; thrust::device_ptr<float_type> p = thrust::device_malloc<float_type>(a->mRecCount); thrust::sequence(p, p+(a->mRecCount),res_f,(float_type)0); exe_type.push("VECTOR F"); exe_vectors_f.push(thrust::raw_pointer_cast(p)); } else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR F"); if (a->type[(a->columnNames)[s1_val]] == 1) { float_type* t = a->get_float_type_by_name(s1_val); exe_vectors_f.push(a->op(t,n1_f,ss,1)); } else { int_type* t = a->get_int_by_name(s1_val); exe_vectors_f.push(a->op(t,n1_f,ss,1)); }; } else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR F"); if (a->type[(a->columnNames)[s2_val]] == 1) { float_type* t = a->get_float_type_by_name(s2_val); exe_vectors_f.push(a->op(t,n1_f,ss,0)); } else { int_type* t = a->get_int_by_name(s2_val); exe_vectors_f.push(a->op(t,n1_f,ss,0)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1 = exe_nums.top(); exe_nums.pop(); if (a->type[(a->columnNames)[s1_val]] == 1) { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,(float_type)n1,ss,1)); } else { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,n1,ss,1)); }; } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 1) { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,(float_type)n1,ss,0)); } else { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,n1,ss,0)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t1 = a->get_int_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,t1,ss,0)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t1,t,ss,0)); }; } else { float_type* t = a->get_float_type_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t1,t,ss,0)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,t1,ss,0)); }; } } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) { s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,s3,ss,0)); //free s3 hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,0)); hipFree(s3); } } else { float_type* t = a->get_float_type_by_name(s2_val); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,t, ss,0)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,0)); hipFree(s3); } }; } else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,s3,ss,1)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,1)); hipFree(s3); } } else { float_type* t = a->get_float_type_by_name(s1_val); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,t,ss,1)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,1)); hipFree(s3); } }; } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3,n1, ss,1)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,(float_type)n1, ss,1)); hipFree(s3); } } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") || s2.compare("VECTOR F") == 0) { n1 = exe_nums.top(); exe_nums.pop(); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3,n1, ss,0)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,(float_type)n1, ss,0)); hipFree(s3); } } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,1)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,1)); hipFree(s3); } } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) { n1_f = exe_nums_f.top(); exe_nums.pop(); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,0)); hipFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,0)); hipFree(s3); } } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); int_type* s4 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3, s4,ss,1)); hipFree(s3); hipFree(s4); } else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,1)); hipFree(s3); hipFree(s4); } else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,0)); hipFree(s3); hipFree(s4); } else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,1)); hipFree(s3); hipFree(s4); } } else if (ss.compare("CMP") == 0) { int_type cmp_type = op_nums.front(); op_nums.pop(); s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1,n2,cmp_type)); } else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,n2_f,cmp_type)); } else if (s1.compare("FLOAT") == 0 && s2.compare("NUMBER") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,float_type(n2),cmp_type)); } else if (s1.compare("NUMBER") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,float_type(n2),cmp_type)); } else if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); unsigned int like_start = 0; if(s1_val[0] == '%') { like_start = 1; s1_val.erase(0,1); }; exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); unsigned int colIndex1 = (a->columnNames).find(s2_val)->second; void* d_v; hipMalloc((void **) &d_v, 8); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); dd_v[0] = a->char_size[a->type_index[colIndex1]]; dd_v[1] = (unsigned int)s1_val.length(); void* d_res; hipMalloc((void **) &d_res, a->mRecCount); void* d_str; thrust::counting_iterator<unsigned int> begin(0); if(!like_start) { hipMalloc((void **) &d_str, a->char_size[a->type_index[colIndex1]]); hipMemset(d_str,0,a->char_size[a->type_index[colIndex1]]); hipMemcpy( d_str, (void *) s1_val.c_str(), s1_val.length(), hipMemcpyHostToDevice); cmp_functor_str ff(a->d_columns_char[a->type_index[colIndex1]], (char*)d_str, (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); } else { hipMalloc((void **) &d_str, s1_val.length()); hipMemcpy( d_str, (void *) s1_val.c_str(), s1_val.length(), hipMemcpyHostToDevice); cmp_functor_str_like_right ff(a->d_columns_char[a->type_index[colIndex1]], (char*)d_str, (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); }; exe_type.push("VECTOR"); bool_vectors.push((bool*)d_res); hipFree(d_v); hipFree(d_str); } else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); bool like_start = 0; if(s1_val[0] == '%') { like_start = 1; s1_val.erase(0,1); }; unsigned int colIndex1 = (a->columnNames).find(s1_val)->second; void* d_v; hipMalloc((void **) &d_v, 4); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); dd_v[0] = a->char_size[a->type_index[colIndex1]]; void* d_res; hipMalloc((void **) &d_res, a->mRecCount); void* d_str; hipMalloc((void **) &d_str, a->char_size[a->type_index[colIndex1]]); hipMemset(d_str,0,a->char_size[a->type_index[colIndex1]]); hipMemcpy( d_str, (void *) s1_val.c_str(), s1_val.length(), hipMemcpyHostToDevice); thrust::counting_iterator<unsigned int> begin(0); if(!like_start) { cmp_functor_str ff(a->d_columns_char[a->type_index[colIndex1]], (char*)d_str, (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); } else { cmp_functor_str_like_right ff(a->d_columns_char[a->type_index[colIndex1]], (char*)d_str, (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); }; exe_type.push("VECTOR"); bool_vectors.push((bool*)d_res); hipFree(d_v); hipFree(d_str); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(float_type)n1,cmp_type)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(float_type)n1,cmp_type)); }; } else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(int_type)n1_f,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); }; } else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); n1_f = exe_nums_f.top(); exe_nums_f.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(int_type)n1_f,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); }; } else if (s1.compare("VECTOR F") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,(float_type)n1,cmp_type)); hipFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); hipFree(s3); } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,(float_type)n1,cmp_type)); hipFree(s3); } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); hipFree(s3); } else if (s1.compare("VECTOR F") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); hipFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,(int_type)n1_f,cmp_type)); hipFree(s3); } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); hipFree(s3); } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,(int_type)n1_f,cmp_type)); hipFree(s3); } else if (s1.compare("VECTOR F") == 0 && s2.compare("NAME") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(s3,t,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; hipFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("NAME") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; hipFree(s3); } else if (s1.compare("NAME") == 0 && s2.compare("VECTOR F") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(s3,t,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; hipFree(s3); } else if (s1.compare("NAME") == 0 && s2.compare("VECTOR") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; hipFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s2,s3,cmp_type)); hipFree(s3); hipFree(s2); } else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); float_type* s2 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s2,s3,cmp_type)); hipFree(s3); hipFree(s2); } else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,s2,cmp_type)); hipFree(s3); hipFree(s2); } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,s2,cmp_type)); hipFree(s3); hipFree(s2); } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t1,t,cmp_type)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t1,t,cmp_type)); }; } else { cmp_type = reverse_op(cmp_type); float_type* t = a->get_float_type_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,t1,cmp_type)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,t1,cmp_type)); }; } } } else if (ss.compare("AND") == 0) { bool* s3 = bool_vectors.top(); bool_vectors.pop(); bool* s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->logical_and(s2,s3)); } else if (ss.compare("OR") == 0) { bool* s3 = bool_vectors.top(); bool_vectors.pop(); bool* s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->logical_or(s2,s3)); } else { cout << "found nothing " << endl; } }; }; thrust::device_ptr<bool> bp((bool*)bool_vectors.top()); b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1); b->prm_index = 'R'; thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount), bp, b->prm_d.begin(), thrust::identity<bool>()); //hipMemcpy((void**)b->prm[segment], (void**)(thrust::raw_pointer_cast(dev_p.data())), 4*count, hipMemcpyDeviceToHost); if(segment == a->segCount-1) b->type_index = a->type_index; hipFree(bool_vectors.top()); return; }
d0b8ca01b002df0d93cb16e13064c9ae655534c5.cu
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cm.h" struct cmp_functor_str { const char * source; const char *str; bool * dest; const unsigned int * len; cmp_functor_str(const char * _source, const char * _str, bool * _dest, const unsigned int * _len): source(_source), str(_str), dest(_dest), len(_len) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int length = len[0]; unsigned int start = i*length; for(unsigned int z = 0; z < length ; z++) { if(source[start+z] != str[z]) { dest[i] = 0; return; }; }; dest[i] = 1; } }; struct cmp_functor_str_like_left { const char * source; const char *str; bool * dest; const unsigned int * len; cmp_functor_str_like_left(const char * _source, const char * _str, bool * _dest, const unsigned int * _len): source(_source), str(_str), dest(_dest), len(_len) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int length = len[0]; unsigned int str_length = len[1]; unsigned int start = i*length; for(unsigned int z = 0; z < str_length ; z++) { if(source[start+z] != str[z]) { dest[i] = 0; return; }; }; dest[i] = 1; } }; struct cmp_functor_str_like_right { const char * source; const char *str; bool * dest; const unsigned int * len; cmp_functor_str_like_right(const char * _source, const char * _str, bool * _dest, const unsigned int * _len): source(_source), str(_str), dest(_dest), len(_len) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int length = len[0]; unsigned int str_length = len[1]; unsigned int start = i*length; unsigned int j = 1; while(source[(start+length)-j] == 0) j++; for(unsigned int z = 0; z < str_length ; z++) { if(source[((start+length)-j)-z] != str[str_length-z-1]) { dest[i] = 0; return; }; }; dest[i] = 1; } }; void filter(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, CudaSet* b, unsigned int segment) { stack<string> exe_type; stack<string> exe_value; stack<int_type*> exe_vectors; stack<float_type*> exe_vectors_f; stack<int_type> exe_nums; stack<bool*> bool_vectors; stack<float_type> exe_nums_f; string s1, s2, s1_val, s2_val; int_type n1, n2, res; float_type n1_f, n2_f, res_f; for(int i=0; !op_type.empty(); ++i, op_type.pop()) { string ss = op_type.front(); if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("VECTOR") == 0 || ss.compare("FLOAT") == 0 || ss.compare("STRING") == 0) { exe_type.push(ss); if (ss.compare("NUMBER") == 0) { exe_nums.push(op_nums.front()); op_nums.pop(); } else if (ss.compare("NAME") == 0 || ss.compare("STRING") == 0) { exe_value.push(op_value.front()); op_value.pop(); } if (ss.compare("FLOAT") == 0) { exe_nums_f.push(op_nums_f.front()); op_nums_f.pop(); } } else { if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) { // get 2 values from the stack s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); if (ss.compare("ADD") == 0 ) res = n1+n2; else if (ss.compare("MUL") == 0 ) res = n1*n2; else if (ss.compare("DIV") == 0 ) res = n1/n2; else res = n1-n2; thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount); thrust::sequence(p, p+(a->mRecCount),res,(int_type)0); exe_type.push("VECTOR"); exe_vectors.push(thrust::raw_pointer_cast(p)); } else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2_f = exe_nums_f.top(); exe_nums_f.pop(); if (ss.compare("ADD") == 0 ) res_f = n1_f+n2_f; else if (ss.compare("MUL") == 0 ) res_f = n1_f*n2_f; else if (ss.compare("DIV") == 0 ) res_f = n1_f/n2_f; else res_f = n1_f-n2_f; thrust::device_ptr<float_type> p = thrust::device_malloc<float_type>(a->mRecCount); thrust::sequence(p, p+(a->mRecCount),res_f,(float_type)0); exe_type.push("VECTOR F"); exe_vectors_f.push(thrust::raw_pointer_cast(p)); } else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR F"); if (a->type[(a->columnNames)[s1_val]] == 1) { float_type* t = a->get_float_type_by_name(s1_val); exe_vectors_f.push(a->op(t,n1_f,ss,1)); } else { int_type* t = a->get_int_by_name(s1_val); exe_vectors_f.push(a->op(t,n1_f,ss,1)); }; } else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR F"); if (a->type[(a->columnNames)[s2_val]] == 1) { float_type* t = a->get_float_type_by_name(s2_val); exe_vectors_f.push(a->op(t,n1_f,ss,0)); } else { int_type* t = a->get_int_by_name(s2_val); exe_vectors_f.push(a->op(t,n1_f,ss,0)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1 = exe_nums.top(); exe_nums.pop(); if (a->type[(a->columnNames)[s1_val]] == 1) { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,(float_type)n1,ss,1)); } else { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,n1,ss,1)); }; } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 1) { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,(float_type)n1,ss,0)); } else { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,n1,ss,0)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t1 = a->get_int_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,t1,ss,0)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t1,t,ss,0)); }; } else { float_type* t = a->get_float_type_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t1,t,ss,0)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,t1,ss,0)); }; } } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) { s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,s3,ss,0)); //free s3 cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,0)); cudaFree(s3); } } else { float_type* t = a->get_float_type_by_name(s2_val); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,t, ss,0)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,0)); cudaFree(s3); } }; } else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(t,s3,ss,1)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,1)); cudaFree(s3); } } else { float_type* t = a->get_float_type_by_name(s1_val); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,t,ss,1)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(t,s3,ss,1)); cudaFree(s3); } }; } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3,n1, ss,1)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,(float_type)n1, ss,1)); cudaFree(s3); } } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") || s2.compare("VECTOR F") == 0) { n1 = exe_nums.top(); exe_nums.pop(); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3,n1, ss,0)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,(float_type)n1, ss,0)); cudaFree(s3); } } else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); if (s1.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,1)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,1)); cudaFree(s3); } } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) { n1_f = exe_nums_f.top(); exe_nums.pop(); if (s2.compare("VECTOR") == 0 ) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,0)); cudaFree(s3); } else { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3,n1_f, ss,0)); cudaFree(s3); } } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); int_type* s4 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); exe_vectors.push(a->op(s3, s4,ss,1)); cudaFree(s3); cudaFree(s4); } else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,1)); cudaFree(s3); cudaFree(s4); } else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,0)); cudaFree(s3); cudaFree(s4); } else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); float_type* s4 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR F"); exe_vectors_f.push(a->op(s3, s4,ss,1)); cudaFree(s3); cudaFree(s4); } } else if (ss.compare("CMP") == 0) { int_type cmp_type = op_nums.front(); op_nums.pop(); s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1,n2,cmp_type)); } else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,n2_f,cmp_type)); } else if (s1.compare("FLOAT") == 0 && s2.compare("NUMBER") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,float_type(n2),cmp_type)); } else if (s1.compare("NUMBER") == 0 && s2.compare("FLOAT") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); n2 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(n1_f,float_type(n2),cmp_type)); } else if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); unsigned int like_start = 0; if(s1_val[0] == '%') { like_start = 1; s1_val.erase(0,1); }; exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); unsigned int colIndex1 = (a->columnNames).find(s2_val)->second; void* d_v; cudaMalloc((void **) &d_v, 8); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); dd_v[0] = a->char_size[a->type_index[colIndex1]]; dd_v[1] = (unsigned int)s1_val.length(); void* d_res; cudaMalloc((void **) &d_res, a->mRecCount); void* d_str; thrust::counting_iterator<unsigned int> begin(0); if(!like_start) { cudaMalloc((void **) &d_str, a->char_size[a->type_index[colIndex1]]); cudaMemset(d_str,0,a->char_size[a->type_index[colIndex1]]); cudaMemcpy( d_str, (void *) s1_val.c_str(), s1_val.length(), cudaMemcpyHostToDevice); cmp_functor_str ff(a->d_columns_char[a->type_index[colIndex1]], (char*)d_str, (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); } else { cudaMalloc((void **) &d_str, s1_val.length()); cudaMemcpy( d_str, (void *) s1_val.c_str(), s1_val.length(), cudaMemcpyHostToDevice); cmp_functor_str_like_right ff(a->d_columns_char[a->type_index[colIndex1]], (char*)d_str, (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); }; exe_type.push("VECTOR"); bool_vectors.push((bool*)d_res); cudaFree(d_v); cudaFree(d_str); } else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); bool like_start = 0; if(s1_val[0] == '%') { like_start = 1; s1_val.erase(0,1); }; unsigned int colIndex1 = (a->columnNames).find(s1_val)->second; void* d_v; cudaMalloc((void **) &d_v, 4); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); dd_v[0] = a->char_size[a->type_index[colIndex1]]; void* d_res; cudaMalloc((void **) &d_res, a->mRecCount); void* d_str; cudaMalloc((void **) &d_str, a->char_size[a->type_index[colIndex1]]); cudaMemset(d_str,0,a->char_size[a->type_index[colIndex1]]); cudaMemcpy( d_str, (void *) s1_val.c_str(), s1_val.length(), cudaMemcpyHostToDevice); thrust::counting_iterator<unsigned int> begin(0); if(!like_start) { cmp_functor_str ff(a->d_columns_char[a->type_index[colIndex1]], (char*)d_str, (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); } else { cmp_functor_str_like_right ff(a->d_columns_char[a->type_index[colIndex1]], (char*)d_str, (bool*)d_res, (unsigned int*)d_v); thrust::for_each(begin, begin + a->mRecCount, ff); }; exe_type.push("VECTOR"); bool_vectors.push((bool*)d_res); cudaFree(d_v); cudaFree(d_str); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(float_type)n1,cmp_type)); }; } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); n1 = exe_nums.top(); exe_nums.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(float_type)n1,cmp_type)); }; } else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) { n1_f = exe_nums_f.top(); exe_nums_f.pop(); s1_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(int_type)n1_f,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s1_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); }; } else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); n1_f = exe_nums_f.top(); exe_nums_f.pop(); s2_val = exe_value.top(); exe_value.pop(); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,(int_type)n1_f,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); exe_type.push("VECTOR"); bool_vectors.push(a->compare(t,n1_f,cmp_type)); }; } else if (s1.compare("VECTOR F") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,(float_type)n1,cmp_type)); cudaFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("NUMBER") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); cudaFree(s3); } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,(float_type)n1,cmp_type)); cudaFree(s3); } else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1 = exe_nums.top(); exe_nums.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1,cmp_type)); cudaFree(s3); } else if (s1.compare("VECTOR F") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); cudaFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("FLOAT") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,(int_type)n1_f,cmp_type)); cudaFree(s3); } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,n1_f,cmp_type)); cudaFree(s3); } else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); n1_f = exe_nums_f.top(); exe_nums_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,(int_type)n1_f,cmp_type)); cudaFree(s3); } else if (s1.compare("VECTOR F") == 0 && s2.compare("NAME") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(s3,t,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; cudaFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("NAME") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; cudaFree(s3); } else if (s1.compare("NAME") == 0 && s2.compare("VECTOR F") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(s3,t,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; cudaFree(s3); } else if (s1.compare("NAME") == 0 && s2.compare("VECTOR") == 0) { cmp_type = reverse_op(cmp_type); int_type* s3 = exe_vectors.top(); exe_vectors.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); } else { float_type* t = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,s3,cmp_type)); }; cudaFree(s3); } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s2,s3,cmp_type)); cudaFree(s3); cudaFree(s2); } else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); float_type* s2 = exe_vectors_f.top(); exe_vectors_f.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s2,s3,cmp_type)); cudaFree(s3); cudaFree(s2); } else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) { cmp_type = reverse_op(cmp_type); float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,s2,cmp_type)); cudaFree(s3); cudaFree(s2); } else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) { float_type* s3 = exe_vectors_f.top(); exe_vectors_f.pop(); int_type* s2 = exe_vectors.top(); exe_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->compare(s3,s2,cmp_type)); cudaFree(s3); cudaFree(s2); } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); exe_type.push("VECTOR"); if (a->type[(a->columnNames)[s1_val]] == 0) { int_type* t = a->get_int_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t1,t,cmp_type)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t1,t,cmp_type)); }; } else { cmp_type = reverse_op(cmp_type); float_type* t = a->get_float_type_by_name(s1_val); if (a->type[(a->columnNames)[s2_val]] == 0) { int_type* t1 = a->get_int_by_name(s2_val); bool_vectors.push(a->compare(t,t1,cmp_type)); } else { float_type* t1 = a->get_float_type_by_name(s2_val); bool_vectors.push(a->compare(t,t1,cmp_type)); }; } } } else if (ss.compare("AND") == 0) { bool* s3 = bool_vectors.top(); bool_vectors.pop(); bool* s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->logical_and(s2,s3)); } else if (ss.compare("OR") == 0) { bool* s3 = bool_vectors.top(); bool_vectors.pop(); bool* s2 = bool_vectors.top(); bool_vectors.pop(); exe_type.push("VECTOR"); bool_vectors.push(a->logical_or(s2,s3)); } else { cout << "found nothing " << endl; } }; }; thrust::device_ptr<bool> bp((bool*)bool_vectors.top()); b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1); b->prm_index = 'R'; thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount), bp, b->prm_d.begin(), thrust::identity<bool>()); //cudaMemcpy((void**)b->prm[segment], (void**)(thrust::raw_pointer_cast(dev_p.data())), 4*count, cudaMemcpyDeviceToHost); if(segment == a->segCount-1) b->type_index = a->type_index; cudaFree(bool_vectors.top()); return; }
a21932830d00e672059b130d7d5c4968d31fdfb4.hip
// !!! This is a file automatically generated by hipify!!! //====================================== // // // GPU //====================================== #include"stdafx.h" #include"Residual_DATA.hpp" #include"Residual_FUNC.hpp" #include"Residual_Base.h" #include"Residual_GPU.cuh" #include"Residual_LayerData_GPU.cuh" using namespace Gravisbell; using namespace Gravisbell::Layer::NeuralNetwork; namespace Gravisbell { namespace Layer { namespace NeuralNetwork { /** */ Residual_GPU::Residual_GPU(Gravisbell::GUID guid, Residual_LayerData_GPU& i_layerData, const std::vector<IODataStruct>& i_lpInputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager) : Residual_Base (guid, i_lpInputDataStruct, i_layerData.GetOutputDataStruct(&i_lpInputDataStruct[0], (U32)i_lpInputDataStruct.size())) , layerData (i_layerData) /**< */ , outputBufferCount (0) /**< */ { hipblasCreate(&cublasHandle); } /** */ Residual_GPU::~Residual_GPU() { hipblasDestroy(cublasHandle); } //================================ // //================================ /** */ U32 Residual_GPU::GetLayerKind()const { return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase(); } /** . @return 0 */ ErrorCode Residual_GPU::Initialize(void) { return this->layerData.Initialize(); } //=========================== // //=========================== /** */ Residual_LayerData_Base& Residual_GPU::GetLayerData() { return this->layerData; } const Residual_LayerData_Base& Residual_GPU::GetLayerData()const { return this->layerData; } //================================ // //================================ /** .() @param batchSize . NN. PreProcessLearnLoop. */ ErrorCode Residual_GPU::PreProcessLearn() { ErrorCode errorCode = this->PreProcessCalculate(); if(errorCode != ErrorCode::ERROR_CODE_NONE) return errorCode; // this->m_lppDInputBuffer.resize(this->GetInputDataCount()); return ErrorCode::ERROR_CODE_NONE; } /** .() @param batchSize . NN. Calculate. */ ErrorCode Residual_GPU::PreProcessCalculate() { // this->lpInputBufferCount.resize(this->GetInputDataCount()); for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++) { this->lpInputBufferCount[inputNum] = this->GetInputBufferCount(inputNum); if(this->lpInputBufferCount[inputNum] == 0) return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT; } // this->outputBufferCount = this->GetOutputBufferCount(); if(this->outputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT; return ErrorCode::ERROR_CODE_NONE; } /** . Calculate. */ ErrorCode Residual_GPU::PreProcessLoop() { return ErrorCode::ERROR_CODE_NONE; } //================================ // //================================ /** . @param lpInputBuffer . GetInputBufferCount @return 0 */ ErrorCode Residual_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppOutputBuffer) { // hipMemset(thrust::raw_pointer_cast(&o_lppOutputBuffer[0]), 0, sizeof(F32)*this->outputBufferCount*this->GetBatchSize()); F32 alpha = 1.0f; for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++) { for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++) { hipblasStatus_t err = hipblasSaxpy( this->cublasHandle, this->lpInputBufferCount[inputNum], &alpha, &i_lppInputBuffer[inputNum][batchNum * this->lpInputBufferCount[inputNum]], 1, thrust::raw_pointer_cast(&o_lppOutputBuffer[batchNum*this->outputBufferCount]), 1); if(err != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } } hipDeviceSynchronize(); return ErrorCode::ERROR_CODE_NONE; } //================================ // //================================ /** .. Calculate. @param o_lppDInputBuffer . [GetBatchSize()][GetInputBufferCount()]. @param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()]. */ ErrorCode Residual_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppDInputBuffer[], CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { if(o_lppDInputBuffer) { // for(U32 inputNum=0; inputNum<this->GetInputDataCount(); inputNum++) { this->m_lppDInputBuffer[inputNum] = o_lppDInputBuffer[inputNum]; } for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++) { for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++) { hipError_t err = hipMemcpyAsync( &this->m_lppDInputBuffer[inputNum][batchNum*this->lpInputBufferCount[inputNum]], &i_lppDOutputBuffer[batchNum*this->outputBufferCount], sizeof(F32) * this->lpInputBufferCount[inputNum], hipMemcpyDeviceToDevice); if(err != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } } hipDeviceSynchronize(); } #ifdef _DEBUG std::vector<float> lpTmpInputBuffer0(this->GetBatchSize() * this->lpInputBufferCount[0]); hipMemcpy(&lpTmpInputBuffer0[0], i_lppInputBuffer[0], sizeof(float)*lpTmpInputBuffer0.size(), hipMemcpyDeviceToHost); std::vector<float> lpTmpInputBuffer1(this->GetBatchSize() * this->lpInputBufferCount[1]); hipMemcpy(&lpTmpInputBuffer1[0], i_lppInputBuffer[1], sizeof(float)*lpTmpInputBuffer1.size(), hipMemcpyDeviceToHost); std::vector<float> lpTmpOutputBuffer(this->GetBatchSize() * this->outputBufferCount); hipMemcpy(&lpTmpOutputBuffer[0], i_lppOutputBuffer, sizeof(float)*lpTmpOutputBuffer.size(), hipMemcpyDeviceToHost); std::vector<float> lpTmpDOutputBuffer(this->GetBatchSize() * this->outputBufferCount); hipMemcpy(&lpTmpDOutputBuffer[0], i_lppDOutputBuffer, sizeof(float)*lpTmpDOutputBuffer.size(), hipMemcpyDeviceToHost); std::vector<float> lpTmpDInputBuffer0(this->GetBatchSize() * this->lpInputBufferCount[0]); hipMemcpy(&lpTmpDInputBuffer0[0], o_lppDInputBuffer[0], sizeof(float)*lpTmpDInputBuffer0.size(), hipMemcpyDeviceToHost); std::vector<float> lpTmpDInputBuffer1(this->GetBatchSize() * this->lpInputBufferCount[1]); hipMemcpy(&lpTmpDInputBuffer1[0], o_lppDInputBuffer[1], sizeof(float)*lpTmpDInputBuffer1.size(), hipMemcpyDeviceToHost); #endif return ErrorCode::ERROR_CODE_NONE; } /** . Calculate. @param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()]. */ ErrorCode Residual_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppDInputBuffer[], CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer); } } // Gravisbell; } // Layer; } // NeuralNetwork;
a21932830d00e672059b130d7d5c4968d31fdfb4.cu
//====================================== // フィードフォワードニューラルネットワークの統合処理レイヤー // 結合、活性化 // GPU処理用 //====================================== #include"stdafx.h" #include"Residual_DATA.hpp" #include"Residual_FUNC.hpp" #include"Residual_Base.h" #include"Residual_GPU.cuh" #include"Residual_LayerData_GPU.cuh" using namespace Gravisbell; using namespace Gravisbell::Layer::NeuralNetwork; namespace Gravisbell { namespace Layer { namespace NeuralNetwork { /** コンストラクタ */ Residual_GPU::Residual_GPU(Gravisbell::GUID guid, Residual_LayerData_GPU& i_layerData, const std::vector<IODataStruct>& i_lpInputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager) : Residual_Base (guid, i_lpInputDataStruct, i_layerData.GetOutputDataStruct(&i_lpInputDataStruct[0], (U32)i_lpInputDataStruct.size())) , layerData (i_layerData) /**< レイヤーデータ */ , outputBufferCount (0) /**< 出力バッファ数 */ { cublasCreate(&cublasHandle); } /** デストラクタ */ Residual_GPU::~Residual_GPU() { cublasDestroy(cublasHandle); } //================================ // 基本処理 //================================ /** レイヤー種別の取得 */ U32 Residual_GPU::GetLayerKind()const { return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase(); } /** 初期化. 各ニューロンの値をランダムに初期化 @return 成功した場合0 */ ErrorCode Residual_GPU::Initialize(void) { return this->layerData.Initialize(); } //=========================== // レイヤーデータ関連 //=========================== /** レイヤーデータを取得する */ Residual_LayerData_Base& Residual_GPU::GetLayerData() { return this->layerData; } const Residual_LayerData_Base& Residual_GPU::GetLayerData()const { return this->layerData; } //================================ // 演算処理 //================================ /** 演算前処理を実行する.(学習用) @param batchSize 同時に演算を行うバッチのサイズ. NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない. 失敗した場合はPreProcessLearnLoop以降の処理は実行不可. */ ErrorCode Residual_GPU::PreProcessLearn() { ErrorCode errorCode = this->PreProcessCalculate(); if(errorCode != ErrorCode::ERROR_CODE_NONE) return errorCode; // 入力誤差バッファ格納用のアドレス配列を作成 this->m_lppDInputBuffer.resize(this->GetInputDataCount()); return ErrorCode::ERROR_CODE_NONE; } /** 演算前処理を実行する.(演算用) @param batchSize 同時に演算を行うバッチのサイズ. NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない. 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode Residual_GPU::PreProcessCalculate() { // 入力バッファ数を確認 this->lpInputBufferCount.resize(this->GetInputDataCount()); for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++) { this->lpInputBufferCount[inputNum] = this->GetInputBufferCount(inputNum); if(this->lpInputBufferCount[inputNum] == 0) return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT; } // 出力バッファ数を確認 this->outputBufferCount = this->GetOutputBufferCount(); if(this->outputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT; return ErrorCode::ERROR_CODE_NONE; } /** ループの初期化処理.データセットの実行開始前に実行する 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode Residual_GPU::PreProcessLoop() { return ErrorCode::ERROR_CODE_NONE; } //================================ // 演算処理 //================================ /** 演算処理を実行する. @param lpInputBuffer 入力データバッファ. GetInputBufferCountで取得した値の要素数が必要 @return 成功した場合0が返る */ ErrorCode Residual_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppOutputBuffer) { // 出力バッファを初期化 cudaMemset(thrust::raw_pointer_cast(&o_lppOutputBuffer[0]), 0, sizeof(F32)*this->outputBufferCount*this->GetBatchSize()); F32 alpha = 1.0f; for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++) { for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++) { cublasStatus_t err = cublasSaxpy_v2( this->cublasHandle, this->lpInputBufferCount[inputNum], &alpha, &i_lppInputBuffer[inputNum][batchNum * this->lpInputBufferCount[inputNum]], 1, thrust::raw_pointer_cast(&o_lppOutputBuffer[batchNum*this->outputBufferCount]), 1); if(err != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } } cudaThreadSynchronize(); return ErrorCode::ERROR_CODE_NONE; } //================================ // 学習処理 //================================ /** 入力誤差計算をを実行する.学習せずに入力誤差を取得したい場合に使用する. 入力信号、出力信号は直前のCalculateの値を参照する. @param o_lppDInputBuffer 入力誤差差分格納先レイヤー. [GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の要素数が必要. @param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要. 直前の計算結果を使用する */ ErrorCode Residual_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppDInputBuffer[], CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { if(o_lppDInputBuffer) { // 入力誤差バッファのアドレスを配列に格納 for(U32 inputNum=0; inputNum<this->GetInputDataCount(); inputNum++) { this->m_lppDInputBuffer[inputNum] = o_lppDInputBuffer[inputNum]; } for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++) { for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++) { cudaError_t err = cudaMemcpyAsync( &this->m_lppDInputBuffer[inputNum][batchNum*this->lpInputBufferCount[inputNum]], &i_lppDOutputBuffer[batchNum*this->outputBufferCount], sizeof(F32) * this->lpInputBufferCount[inputNum], cudaMemcpyDeviceToDevice); if(err != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } } cudaThreadSynchronize(); } #ifdef _DEBUG std::vector<float> lpTmpInputBuffer0(this->GetBatchSize() * this->lpInputBufferCount[0]); cudaMemcpy(&lpTmpInputBuffer0[0], i_lppInputBuffer[0], sizeof(float)*lpTmpInputBuffer0.size(), cudaMemcpyDeviceToHost); std::vector<float> lpTmpInputBuffer1(this->GetBatchSize() * this->lpInputBufferCount[1]); cudaMemcpy(&lpTmpInputBuffer1[0], i_lppInputBuffer[1], sizeof(float)*lpTmpInputBuffer1.size(), cudaMemcpyDeviceToHost); std::vector<float> lpTmpOutputBuffer(this->GetBatchSize() * this->outputBufferCount); cudaMemcpy(&lpTmpOutputBuffer[0], i_lppOutputBuffer, sizeof(float)*lpTmpOutputBuffer.size(), cudaMemcpyDeviceToHost); std::vector<float> lpTmpDOutputBuffer(this->GetBatchSize() * this->outputBufferCount); cudaMemcpy(&lpTmpDOutputBuffer[0], i_lppDOutputBuffer, sizeof(float)*lpTmpDOutputBuffer.size(), cudaMemcpyDeviceToHost); std::vector<float> lpTmpDInputBuffer0(this->GetBatchSize() * this->lpInputBufferCount[0]); cudaMemcpy(&lpTmpDInputBuffer0[0], o_lppDInputBuffer[0], sizeof(float)*lpTmpDInputBuffer0.size(), cudaMemcpyDeviceToHost); std::vector<float> lpTmpDInputBuffer1(this->GetBatchSize() * this->lpInputBufferCount[1]); cudaMemcpy(&lpTmpDInputBuffer1[0], o_lppDInputBuffer[1], sizeof(float)*lpTmpDInputBuffer1.size(), cudaMemcpyDeviceToHost); #endif return ErrorCode::ERROR_CODE_NONE; } /** 学習処理を実行する. 入力信号、出力信号は直前のCalculateの値を参照する. @param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要. 直前の計算結果を使用する */ ErrorCode Residual_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppDInputBuffer[], CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer); } } // Gravisbell; } // Layer; } // NeuralNetwork;
86d00cd2650dc8a4d345e6e218b1f1205d40e434.hip
// !!! This is a file automatically generated by hipify!!! // // Created by zeyi on 1/9/19. // #include <fstream> #include <thundergbm/tree.h> #include <thundergbm/updater/exact_updater.h> #include <thundergbm/updater/hist_updater.h> #include <thundergbm/trainer.h> #include <thundergbm/metric/metric.h> #include "thundergbm/util/device_lambda.cuh" #include "thrust/reduce.h" float_type TreeTrainer::compute_rmse(const InsStat &stats) { TIMED_FUNC(timerObj); SyncArray<float_type> sq_err(stats.n_instances); auto sq_err_data = sq_err.device_data(); const float_type *y_data = stats.y.device_data(); const float_type *y_predict_data = stats.y_predict.device_data(); device_loop(stats.n_instances, [=] __device__(int i) { float_type e = y_predict_data[i] - y_data[i]; sq_err_data[i] = e * e; }); float_type rmse = sqrt(thrust::reduce(thrust::hip::par, sq_err.device_data(), sq_err.device_end()) / stats.n_instances); return rmse; } void TreeTrainer::save_trees(GBMParam &param, vector<Tree> &trees) { std::ofstream out(param.out_model_name); int round = 0; for (Tree &tree:trees) { string str_tree = string_format("booster[%d]:", round) + tree.dump(param.depth); //LOG(INFO) << "\n" << str_tree; out << str_tree; round++; } out.close(); } float_type TreeTrainer::train(GBMParam &param) { dataSet.load_from_file(param.path, param); float_type rmse; if (param.tree_method.compare("exact") == 0) rmse = train_exact(param); else if (param.tree_method.compare("hist") == 0) rmse = train_hist(param); else { bool exact_sp_producer = false; if (dataSet.n_features() > 20000)//#TODO: use data set density ratio exact_sp_producer = true; if (exact_sp_producer == true) rmse = train_exact(param); else rmse = train_hist(param); } return rmse; } float_type TreeTrainer::train_exact(GBMParam &param) { LOG(INFO) << "using exact split to train the trees"; int n_instances = dataSet.n_instances(); vector<Tree> trees; trees.resize(param.n_trees); ExactUpdater updater(param); updater.init(dataSet); int round = 0; float_type rmse = 0; SyncMem::clear_cache(); { TIMED_SCOPE(timerObj, "construct tree"); for (Tree &tree:trees) { updater.grow(tree); //next round round++; rmse = compute_rmse(updater.shards.front()->stats); LOG(INFO) << "rmse = " << rmse; } save_trees(param, trees); } return rmse; } ///// upgrading //float_type TreeTrainer::train_exact(GBMParam &param) { // DataSet dataSet; // dataSet.load_from_file(param.path, param); // int n_instances = dataSet.n_instances(); // vector<Tree> trees; // trees.resize(param.n_trees); // // ExactUpdater updater(param); // updater.init(dataSet); // int round = 0; // float_type rmse = 0; // SyncMem::clear_cache(); // { // TIMED_SCOPE(timerObj, "construct tree"); // for (Tree &tree:trees) { // updater.grow(tree); // //next round // round++; // rmse = compute_rmse(updater.shards.front()->stats); // LOG(INFO) << "rmse = " << rmse; // } // save_trees(param, trees); // } // return rmse; //} float_type TreeTrainer::train_hist(GBMParam &param) { LOG(INFO) << "using histogram based approach to find split"; SyncMem::clear_cache(); vector<vector<Tree>> trees; vector<HistUpdater::ShardT> shards(param.n_device); //TODO refactor these SparseColumns columns; columns.from_dataset(dataSet); vector<std::unique_ptr<SparseColumns>> v_columns(param.n_device); for (int i = 0; i < param.n_device; ++i) { v_columns[i].reset(&shards[i].columns); } columns.to_multi_devices(v_columns); HistUpdater updater(param); HistUpdater::for_each_shard(shards, [&](Shard &shard) { int n_instances = shard.columns.n_row; shard.stats.resize(n_instances); shard.stats.y.copy_from(dataSet.y.data(), n_instances); shard.stats.obj.reset(ObjectiveFunction::create(param.objective)); shard.stats.obj->configure(param, dataSet); shard.param = param; shard.param.learning_rate /= param.n_parallel_trees;//average trees in one iteration }); updater.init(shards); SyncMem::clear_cache(); std::unique_ptr<Metric> metric; metric.reset(Metric::create(shards.front().stats.obj->default_metric())); metric->configure(param, dataSet); int round = 0; float_type score = 0; { TIMED_SCOPE(timerObj, "construct tree"); int n_instances = shards.front().stats.n_instances; SyncArray<GHPair> all_gh_pair(n_instances * param.num_class); SyncArray<float_type> all_y(n_instances * param.num_class); for (int iter = 0; iter < param.n_trees; iter++) { //one boosting iteration trees.emplace_back(); vector<Tree> &tree = trees.back(); tree.resize(param.n_parallel_trees); if (param.num_class == 1) { //update gradient HistUpdater::for_each_shard(shards, [&](Shard &shard) { shard.stats.update_gradient(); LOG(DEBUG) << "gh = " << shard.stats.gh_pair; if (updater.param.bagging) { shard.stats.gh_pair_backup.resize(shard.stats.n_instances); shard.stats.gh_pair_backup.copy_from(shard.stats.gh_pair); } }); updater.grow(tree, shards); //next round round++; score = metric->get_score(shards.front().stats.y_predict); } else { shards.front().stats.obj->get_gradient(shards.front().stats.y, all_y, all_gh_pair); for (int i = 0; i < param.num_class; ++i) { trees.emplace_back(); vector<Tree> &tree = trees.back(); tree.resize(param.n_parallel_trees); HistUpdater::for_each_shard(shards, [&](Shard &shard) { shard.stats.gh_pair.copy_from(all_gh_pair.device_data() + i * n_instances, n_instances); shard.stats.y_predict.copy_from(all_y.device_data() + i * n_instances, n_instances); }); updater.grow(tree, shards); CUDA_CHECK(hipMemcpy(all_y.device_data() + i * n_instances, shards.front().stats.y_predict.device_data(), sizeof(float_type) * n_instances, hipMemcpyDefault)); } score = metric->get_score(all_y); } LOG(INFO) << metric->get_name() << " = " << score; } // LOG(INFO) << trees.back().back().dump(param.depth); } for (int i = 0; i < param.n_device; ++i) { v_columns[i].release(); } return score; }
86d00cd2650dc8a4d345e6e218b1f1205d40e434.cu
// // Created by zeyi on 1/9/19. // #include <fstream> #include <thundergbm/tree.h> #include <thundergbm/updater/exact_updater.h> #include <thundergbm/updater/hist_updater.h> #include <thundergbm/trainer.h> #include <thundergbm/metric/metric.h> #include "thundergbm/util/device_lambda.cuh" #include "thrust/reduce.h" float_type TreeTrainer::compute_rmse(const InsStat &stats) { TIMED_FUNC(timerObj); SyncArray<float_type> sq_err(stats.n_instances); auto sq_err_data = sq_err.device_data(); const float_type *y_data = stats.y.device_data(); const float_type *y_predict_data = stats.y_predict.device_data(); device_loop(stats.n_instances, [=] __device__(int i) { float_type e = y_predict_data[i] - y_data[i]; sq_err_data[i] = e * e; }); float_type rmse = sqrt(thrust::reduce(thrust::cuda::par, sq_err.device_data(), sq_err.device_end()) / stats.n_instances); return rmse; } void TreeTrainer::save_trees(GBMParam &param, vector<Tree> &trees) { std::ofstream out(param.out_model_name); int round = 0; for (Tree &tree:trees) { string str_tree = string_format("booster[%d]:", round) + tree.dump(param.depth); //LOG(INFO) << "\n" << str_tree; out << str_tree; round++; } out.close(); } float_type TreeTrainer::train(GBMParam &param) { dataSet.load_from_file(param.path, param); float_type rmse; if (param.tree_method.compare("exact") == 0) rmse = train_exact(param); else if (param.tree_method.compare("hist") == 0) rmse = train_hist(param); else { bool exact_sp_producer = false; if (dataSet.n_features() > 20000)//#TODO: use data set density ratio exact_sp_producer = true; if (exact_sp_producer == true) rmse = train_exact(param); else rmse = train_hist(param); } return rmse; } float_type TreeTrainer::train_exact(GBMParam &param) { LOG(INFO) << "using exact split to train the trees"; int n_instances = dataSet.n_instances(); vector<Tree> trees; trees.resize(param.n_trees); ExactUpdater updater(param); updater.init(dataSet); int round = 0; float_type rmse = 0; SyncMem::clear_cache(); { TIMED_SCOPE(timerObj, "construct tree"); for (Tree &tree:trees) { updater.grow(tree); //next round round++; rmse = compute_rmse(updater.shards.front()->stats); LOG(INFO) << "rmse = " << rmse; } save_trees(param, trees); } return rmse; } ///// upgrading //float_type TreeTrainer::train_exact(GBMParam &param) { // DataSet dataSet; // dataSet.load_from_file(param.path, param); // int n_instances = dataSet.n_instances(); // vector<Tree> trees; // trees.resize(param.n_trees); // // ExactUpdater updater(param); // updater.init(dataSet); // int round = 0; // float_type rmse = 0; // SyncMem::clear_cache(); // { // TIMED_SCOPE(timerObj, "construct tree"); // for (Tree &tree:trees) { // updater.grow(tree); // //next round // round++; // rmse = compute_rmse(updater.shards.front()->stats); // LOG(INFO) << "rmse = " << rmse; // } // save_trees(param, trees); // } // return rmse; //} float_type TreeTrainer::train_hist(GBMParam &param) { LOG(INFO) << "using histogram based approach to find split"; SyncMem::clear_cache(); vector<vector<Tree>> trees; vector<HistUpdater::ShardT> shards(param.n_device); //TODO refactor these SparseColumns columns; columns.from_dataset(dataSet); vector<std::unique_ptr<SparseColumns>> v_columns(param.n_device); for (int i = 0; i < param.n_device; ++i) { v_columns[i].reset(&shards[i].columns); } columns.to_multi_devices(v_columns); HistUpdater updater(param); HistUpdater::for_each_shard(shards, [&](Shard &shard) { int n_instances = shard.columns.n_row; shard.stats.resize(n_instances); shard.stats.y.copy_from(dataSet.y.data(), n_instances); shard.stats.obj.reset(ObjectiveFunction::create(param.objective)); shard.stats.obj->configure(param, dataSet); shard.param = param; shard.param.learning_rate /= param.n_parallel_trees;//average trees in one iteration }); updater.init(shards); SyncMem::clear_cache(); std::unique_ptr<Metric> metric; metric.reset(Metric::create(shards.front().stats.obj->default_metric())); metric->configure(param, dataSet); int round = 0; float_type score = 0; { TIMED_SCOPE(timerObj, "construct tree"); int n_instances = shards.front().stats.n_instances; SyncArray<GHPair> all_gh_pair(n_instances * param.num_class); SyncArray<float_type> all_y(n_instances * param.num_class); for (int iter = 0; iter < param.n_trees; iter++) { //one boosting iteration trees.emplace_back(); vector<Tree> &tree = trees.back(); tree.resize(param.n_parallel_trees); if (param.num_class == 1) { //update gradient HistUpdater::for_each_shard(shards, [&](Shard &shard) { shard.stats.update_gradient(); LOG(DEBUG) << "gh = " << shard.stats.gh_pair; if (updater.param.bagging) { shard.stats.gh_pair_backup.resize(shard.stats.n_instances); shard.stats.gh_pair_backup.copy_from(shard.stats.gh_pair); } }); updater.grow(tree, shards); //next round round++; score = metric->get_score(shards.front().stats.y_predict); } else { shards.front().stats.obj->get_gradient(shards.front().stats.y, all_y, all_gh_pair); for (int i = 0; i < param.num_class; ++i) { trees.emplace_back(); vector<Tree> &tree = trees.back(); tree.resize(param.n_parallel_trees); HistUpdater::for_each_shard(shards, [&](Shard &shard) { shard.stats.gh_pair.copy_from(all_gh_pair.device_data() + i * n_instances, n_instances); shard.stats.y_predict.copy_from(all_y.device_data() + i * n_instances, n_instances); }); updater.grow(tree, shards); CUDA_CHECK(cudaMemcpy(all_y.device_data() + i * n_instances, shards.front().stats.y_predict.device_data(), sizeof(float_type) * n_instances, cudaMemcpyDefault)); } score = metric->get_score(all_y); } LOG(INFO) << metric->get_name() << " = " << score; } // LOG(INFO) << trees.back().back().dump(param.depth); } for (int i = 0; i < param.n_device; ++i) { v_columns[i].release(); } return score; }
fac607990d595ea3582c13784487bef2f2ee7559.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <stdint.h> #include "config.h" #include "helpers.h" #define STATE_SIZE 4 #define TOTAL_ROUNDS 10 #define RIGHT 1 #define LEFT 2 #define ENCRYPTION 1 #define DECRYPTION 2 // SubBytes, . #ifdef __CUDA_ARCH__ __device__ #endif static const uint8_t SBOX[256] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; __device__ static const uint8_t RSBOX[256] = { 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d }; // (KeyExpansion) static const uint8_t RCON[TOTAL_ROUNDS] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; // MixColumns. // , // . __device__ static const int MIX_COLUMNS_MULTIPLIERS[STATE_SIZE][STATE_SIZE] = { {2, 3, 1, 1}, {1, 2, 3, 1}, {1, 1, 2, 3}, {3, 1, 1, 2} }; // (RevMixColumns) __device__ static const int REV_MIX_COLUMNS_MULTIPLIERS[STATE_SIZE][STATE_SIZE] = { {14, 11, 13, 9}, { 9, 14, 11, 13}, {13, 9, 14, 11}, {11, 13, 9, 14} }; /** * S-. * AES - SubBytes. */ __device__ static inline void sub_bytes(uint8_t state[STATE_SIZE][STATE_SIZE], const uint8_t *sbox) { for (int i = 0; i < STATE_SIZE; ++i) { for (int j = 0; j < STATE_SIZE; ++j) { state[i][j] = sbox[state[i][j]]; } } } /** * . * 0 0 , 1 - , 2 - 2, 3 - 3. * AES - ShiftRows. */ __device__ static inline void shift_rows(uint8_t state[STATE_SIZE][STATE_SIZE], int direction) { uint8_t tmp[STATE_SIZE]; for (int row = 1; row < STATE_SIZE; ++row) { for (int i = 0; i < STATE_SIZE; ++i) { if (direction == LEFT) { tmp[i] = state[row][(i + row) % STATE_SIZE]; } else if (direction == RIGHT) { tmp[(i + row) % STATE_SIZE] = state[row][i]; } } for (int i = 0; i < STATE_SIZE; ++i) { state[row][i] = tmp[i]; } } } /** * */ __device__ static inline uint8_t gmul(uint8_t a, uint8_t b) { uint8_t p = 0; while (b) { if (b & 1) { p ^= a; } if (a & 0x80) { a = (a << 1) ^ 0x11b; } else { a *= 2; } b /= 2; } return p; } /** * state MIX_COLUMNS_MULTIPLIERS. * new_column. */ __device__ static inline void multiply_column( uint8_t *new_column, const uint8_t *column_copy, int encr_or_decr) { for (int row = 0; row < STATE_SIZE; ++row) { uint8_t sum = 0; for (int pos = 0; pos < STATE_SIZE; ++pos) { uint8_t a = column_copy[pos]; uint8_t b; if (encr_or_decr == ENCRYPTION) { b = MIX_COLUMNS_MULTIPLIERS[row][pos]; } else if (encr_or_decr == DECRYPTION) { b = REV_MIX_COLUMNS_MULTIPLIERS[row][pos]; } sum ^= gmul(a, b); } new_column[row] = sum; } } /** * , * . * AES - MixColumns */ __device__ static void mix_columns(uint8_t state[STATE_SIZE][STATE_SIZE], int encr_or_decr) { for (int col = 0; col < STATE_SIZE; ++col) { uint8_t column_copy[STATE_SIZE] = {0}; uint8_t new_column[STATE_SIZE] = {0}; // // . // , . for (int i = 0; i < STATE_SIZE; ++i) { column_copy[i] = state[i][col]; } multiply_column(new_column, column_copy, encr_or_decr); for (int i = 0; i < STATE_SIZE; ++i) { state[i][col] = new_column[i]; } } } /** * XOR' . * first. */ __device__ static inline void add_round_key( uint8_t first[STATE_SIZE][STATE_SIZE], uint8_t second[STATE_SIZE][STATE_SIZE]) { for (int i = 0; i < STATE_SIZE; ++i) { for (int j = 0; j < STATE_SIZE; ++j) { first[i][j] ^= second[i][j]; } } } /** * () . */ static inline void key_rot_word_left(uint8_t *word) { uint8_t tmp = word[0]; for (int i = 0; i < STATE_SIZE - 1; ++i) { word[i] = word[i + 1]; } word[STATE_SIZE - 1] = tmp; } /** * ( ). * AES - KeyExpansion. * key - ( ), * prev_key - , * round_num - . * * : * - () , * - : * - RotWord * - SubBytes, * - XOR , * - XOR RCON ( * ). * - XOR * . */ static void key_expansion(uint8_t key[STATE_SIZE][STATE_SIZE], uint8_t prev_key[STATE_SIZE][STATE_SIZE], int round_num) { uint8_t first_col[STATE_SIZE] = {0}; for (int i = 0; i < STATE_SIZE; ++i) { first_col[i] = prev_key[i][STATE_SIZE - 1]; } key_rot_word_left(first_col); for (int i = 0; i < STATE_SIZE; ++i) { first_col[i] = SBOX[first_col[i]]; } for (int i = 0; i < STATE_SIZE; ++i) { first_col[i] ^= prev_key[i][0]; } first_col[0] ^= RCON[round_num]; for (int i = 0; i < STATE_SIZE; ++i) { key[i][0] = first_col[i]; } for (int col = 1; col < STATE_SIZE; ++col) { for (int i = 0; i < STATE_SIZE; ++i) { key[i][col] = key[i][col - 1] ^ prev_key[i][col]; } } } /** * . * aes_key - , 128-, * round_keys - [11/4/4], */ static void gen_round_keys( const uint8_t *aes_key, uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE]) { // aes_key. for (int i = 0; i < STATE_SIZE; ++i) { for (int j = 0; j < STATE_SIZE; ++j) { round_keys[0][j][i] = aes_key[i * STATE_SIZE + j]; } } // KeyExpansion for (int round_num = 0; round_num < TOTAL_ROUNDS; ++round_num) { key_expansion( round_keys[round_num + 1], round_keys[round_num], round_num); } } /** * . * state - . * . * round_keys - ( ). * * : * - AddRoundKey . * - 9 SubBytes, ShiftRows, MixColumns, AddRoundKey. * - 10 , MixColumns. */ __device__ static void aes_encrypt_block( uint8_t state[STATE_SIZE][STATE_SIZE], uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE]) { add_round_key(state, round_keys[0]); for (int round_num = 0; round_num < TOTAL_ROUNDS - 1; ++round_num) { sub_bytes(state, SBOX); shift_rows(state, LEFT); mix_columns(state, ENCRYPTION); add_round_key(state, round_keys[round_num + 1]); } sub_bytes(state, SBOX); shift_rows(state, LEFT); add_round_key(state, round_keys[TOTAL_ROUNDS]); } /** * . * state - . * . * round_keys - ( ). * * , * . aes_encrypt_block(). */ __device__ static void aes_decrypt_block( uint8_t state[STATE_SIZE][STATE_SIZE], uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE]) { add_round_key(state, round_keys[TOTAL_ROUNDS]); shift_rows(state, RIGHT); sub_bytes(state, RSBOX); for (int round_num = TOTAL_ROUNDS - 2; round_num >= 0; --round_num) { add_round_key(state, round_keys[round_num + 1]); mix_columns(state, DECRYPTION); shift_rows(state, RIGHT); sub_bytes(state, RSBOX); } add_round_key(state, round_keys[0]); } /** * . * all_data - , * len - , * encr_or_decr - . */ __global__ static void map_aes( uint8_t *all_data, int len, uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE], int encr_or_decr, int device_loops) { for (int iteration = 0; iteration < device_loops; ++iteration) { int worker_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; int offset = THREADS_PER_BLOCK * BLOCKS_PER_GRID * iteration; int idx = offset + worker_idx; if (idx < len) { int bytes_offset = idx * STATE_SIZE * STATE_SIZE; // TODO: , . uint8_t state[STATE_SIZE][STATE_SIZE] = {0}; for (int i = 0; i < STATE_SIZE; ++i) { for (int j = 0; j < STATE_SIZE; ++j) { state[i][j] = all_data[bytes_offset + i * STATE_SIZE + j]; } } if (encr_or_decr == ENCRYPTION) { aes_encrypt_block(state, round_keys); } else if (encr_or_decr == DECRYPTION) { aes_decrypt_block(state, round_keys); } for (int i = 0; i < STATE_SIZE; ++i) { for (int j = 0; j < STATE_SIZE; ++j) { all_data[bytes_offset + i * STATE_SIZE + j] = state[i][j]; } } } } } /** * GPU. * data - , * len - ( 4x4), * round_keys - , * encr_or_decr - . * , .. */ static void aes_run_device( uint8_t *data, size_t len, uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE], int encr_or_decr) { uint8_t *data_dev = NULL; GPU_CHECK_ERROR( hipHostGetDevicePointer(&data_dev, data, 0) ); uint8_t ***round_keys_dev = NULL; GPU_CHECK_ERROR( hipMalloc((void **) &round_keys_dev, sizeof(round_keys_dev)) ); GPU_CHECK_ERROR( hipMemcpy( round_keys_dev, round_keys, sizeof(round_keys_dev), hipMemcpyHostToDevice ) ); // int grid_size = THREADS_PER_BLOCK * BLOCKS_PER_GRID; int device_loops = (len + grid_size - 1) / grid_size; hipLaunchKernelGGL(( map_aes), dim3(BLOCKS_PER_GRID), dim3(THREADS_PER_BLOCK), 0, 0, data_dev, len, (uint8_t (*)[4][4]) round_keys_dev, encr_or_decr, device_loops ); GPU_CHECK_ERROR_STATE(); GPU_CHECK_ERROR( hipFree(round_keys_dev) ); GPU_CHECK_ERROR( hipDeviceSynchronize() ); } /** * AES. * data - ( 16 , ), * len - (4x4 ), * key - (128-, 16- ). */ void aes_encrypt(void *data, size_t len, const void *key) { uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE]; gen_round_keys((const uint8_t *) key, round_keys); aes_run_device((uint8_t *) data, len, round_keys, ENCRYPTION); } /** * AES. * cipher - ( 16 , ), * len - (4x4 ), * key - (128-, 16- ). */ void aes_decrypt(void *cipher, size_t len, const void *key) { uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE]; gen_round_keys((const uint8_t *) key, round_keys); aes_run_device((uint8_t *) cipher, len, round_keys, DECRYPTION); }
fac607990d595ea3582c13784487bef2f2ee7559.cu
#include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <stdint.h> #include "config.h" #include "helpers.h" #define STATE_SIZE 4 #define TOTAL_ROUNDS 10 #define RIGHT 1 #define LEFT 2 #define ENCRYPTION 1 #define DECRYPTION 2 // Таблицы трансформации для SubBytes, прямая и обратная. #ifdef __CUDA_ARCH__ __device__ #endif static const uint8_t SBOX[256] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; __device__ static const uint8_t RSBOX[256] = { 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d }; // Используется при генерации ключей раундов (KeyExpansion) static const uint8_t RCON[TOTAL_ROUNDS] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; // Множители при операции MixColumns. // Данная матрица может быть вычислена, однако для // ясности задана в виде константы. __device__ static const int MIX_COLUMNS_MULTIPLIERS[STATE_SIZE][STATE_SIZE] = { {2, 3, 1, 1}, {1, 2, 3, 1}, {1, 1, 2, 3}, {3, 1, 1, 2} }; // При обратном преобразовании (RevMixColumns) __device__ static const int REV_MIX_COLUMNS_MULTIPLIERS[STATE_SIZE][STATE_SIZE] = { {14, 11, 13, 9}, { 9, 14, 11, 13}, {13, 9, 14, 11}, {11, 13, 9, 14} }; /** * Заменяет байты в матрице согласно S-боксу. * В терминах AES - SubBytes. */ __device__ static inline void sub_bytes(uint8_t state[STATE_SIZE][STATE_SIZE], const uint8_t *sbox) { for (int i = 0; i < STATE_SIZE; ++i) { for (int j = 0; j < STATE_SIZE; ++j) { state[i][j] = sbox[state[i][j]]; } } } /** * Сдвигает строки матрицы влево или вправо. * 0 строку на 0 байт, 1 - на один, 2 - на 2, 3 - на 3. * В терминах AES - ShiftRows. */ __device__ static inline void shift_rows(uint8_t state[STATE_SIZE][STATE_SIZE], int direction) { uint8_t tmp[STATE_SIZE]; for (int row = 1; row < STATE_SIZE; ++row) { for (int i = 0; i < STATE_SIZE; ++i) { if (direction == LEFT) { tmp[i] = state[row][(i + row) % STATE_SIZE]; } else if (direction == RIGHT) { tmp[(i + row) % STATE_SIZE] = state[row][i]; } } for (int i = 0; i < STATE_SIZE; ++i) { state[row][i] = tmp[i]; } } } /** * Умножение в поле Галуа */ __device__ static inline uint8_t gmul(uint8_t a, uint8_t b) { uint8_t p = 0; while (b) { if (b & 1) { p ^= a; } if (a & 0x80) { a = (a << 1) ^ 0x11b; } else { a *= 2; } b /= 2; } return p; } /** * Умножает столбец матрицы state на строку из MIX_COLUMNS_MULTIPLIERS. * Результат помещает в new_column. */ __device__ static inline void multiply_column( uint8_t *new_column, const uint8_t *column_copy, int encr_or_decr) { for (int row = 0; row < STATE_SIZE; ++row) { uint8_t sum = 0; for (int pos = 0; pos < STATE_SIZE; ++pos) { uint8_t a = column_copy[pos]; uint8_t b; if (encr_or_decr == ENCRYPTION) { b = MIX_COLUMNS_MULTIPLIERS[row][pos]; } else if (encr_or_decr == DECRYPTION) { b = REV_MIX_COLUMNS_MULTIPLIERS[row][pos]; } sum ^= gmul(a, b); } new_column[row] = sum; } } /** * Выполняет преобразование столбцов матрицы, умножая каждый * столбец на матрицу коэффициентов. * В терминах AES - MixColumns */ __device__ static void mix_columns(uint8_t state[STATE_SIZE][STATE_SIZE], int encr_or_decr) { for (int col = 0; col < STATE_SIZE; ++col) { uint8_t column_copy[STATE_SIZE] = {0}; uint8_t new_column[STATE_SIZE] = {0}; // Копируем столбец матрицы в отдельный массив и выполняем // его умножение на матрицу с коэффициентами. // Это неэффективно, зато очевидно. for (int i = 0; i < STATE_SIZE; ++i) { column_copy[i] = state[i][col]; } multiply_column(new_column, column_copy, encr_or_decr); for (int i = 0; i < STATE_SIZE; ++i) { state[i][col] = new_column[i]; } } } /** * XOR'ит все элементы двух матриц между собой. * Результат помещает в first. */ __device__ static inline void add_round_key( uint8_t first[STATE_SIZE][STATE_SIZE], uint8_t second[STATE_SIZE][STATE_SIZE]) { for (int i = 0; i < STATE_SIZE; ++i) { for (int j = 0; j < STATE_SIZE; ++j) { first[i][j] ^= second[i][j]; } } } /** * Вращает (сдвигает) массив на один байт влево. */ static inline void key_rot_word_left(uint8_t *word) { uint8_t tmp = word[0]; for (int i = 0; i < STATE_SIZE - 1; ++i) { word[i] = word[i + 1]; } word[STATE_SIZE - 1] = tmp; } /** * Генерирует очередной ключ раунда (расширяет ключ). * В терминологии AES - KeyExpansion. * key - матрица для результата (нового ключа раунда), * prev_key - ключ предыдущего раунда, * round_num - номер раунда. * * Расширение ключа выполняется следующим образом: * - Берется последний (четвертый) столбец предыдущего ключа, * - Над ним выполняются: * - RotWord * - SubBytes, * - XOR с первым столбцом предыдущего ключа, * - XOR первого байта с элементов вектора RCON (номер элемента * соответствует текущему раунду). * - Следующие три столбца получаются XORом между соответствующим * столбцом в предыдущем ключе и предыдущим столбцом в новом. */ static void key_expansion(uint8_t key[STATE_SIZE][STATE_SIZE], uint8_t prev_key[STATE_SIZE][STATE_SIZE], int round_num) { uint8_t first_col[STATE_SIZE] = {0}; for (int i = 0; i < STATE_SIZE; ++i) { first_col[i] = prev_key[i][STATE_SIZE - 1]; } key_rot_word_left(first_col); for (int i = 0; i < STATE_SIZE; ++i) { first_col[i] = SBOX[first_col[i]]; } for (int i = 0; i < STATE_SIZE; ++i) { first_col[i] ^= prev_key[i][0]; } first_col[0] ^= RCON[round_num]; for (int i = 0; i < STATE_SIZE; ++i) { key[i][0] = first_col[i]; } for (int col = 1; col < STATE_SIZE; ++col) { for (int i = 0; i < STATE_SIZE; ++i) { key[i][col] = key[i][col - 1] ^ prev_key[i][col]; } } } /** * Генерирует ключи раундов. * aes_key - байты ключа, 128-бит, * round_keys - массив[11/4/4], в который будут помещены ключи */ static void gen_round_keys( const uint8_t *aes_key, uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE]) { // Первый ключ просто вытаскиваем из aes_key. for (int i = 0; i < STATE_SIZE; ++i) { for (int j = 0; j < STATE_SIZE; ++j) { round_keys[0][j][i] = aes_key[i * STATE_SIZE + j]; } } // Остальные ключи генерируются методом KeyExpansion for (int round_num = 0; round_num < TOTAL_ROUNDS; ++round_num) { key_expansion( round_keys[round_num + 1], round_keys[round_num], round_num); } } /** * Выполняет шифрование одного блока данных. * state - матрица с данными для шифрования. В ней же и будет размещен * результат. * round_keys - массив матриц ключей (для каждого раунда свой ключ). * * Шифрование выполняется следующим образом: * - К данным применяется AddRoundKey с начальным ключом. * - 9 раундов с операциями SubBytes, ShiftRows, MixColumns, AddRoundKey. * - 10 раунд, в котором не выполняется MixColumns. */ __device__ static void aes_encrypt_block( uint8_t state[STATE_SIZE][STATE_SIZE], uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE]) { add_round_key(state, round_keys[0]); for (int round_num = 0; round_num < TOTAL_ROUNDS - 1; ++round_num) { sub_bytes(state, SBOX); shift_rows(state, LEFT); mix_columns(state, ENCRYPTION); add_round_key(state, round_keys[round_num + 1]); } sub_bytes(state, SBOX); shift_rows(state, LEFT); add_round_key(state, round_keys[TOTAL_ROUNDS]); } /** * Выполняет расшифрование одного блока данных. * state - матрица с шифротекстом. В ней же и будет размещен * результат. * round_keys - массив матриц ключей (для каждого раунда свой ключ). * * Расшифровка выполняется способом обратным шифрованию, * см. aes_encrypt_block(). */ __device__ static void aes_decrypt_block( uint8_t state[STATE_SIZE][STATE_SIZE], uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE]) { add_round_key(state, round_keys[TOTAL_ROUNDS]); shift_rows(state, RIGHT); sub_bytes(state, RSBOX); for (int round_num = TOTAL_ROUNDS - 2; round_num >= 0; --round_num) { add_round_key(state, round_keys[round_num + 1]); mix_columns(state, DECRYPTION); shift_rows(state, RIGHT); sub_bytes(state, RSBOX); } add_round_key(state, round_keys[0]); } /** * Выполняет шифрование или расшифровку массива данных. * all_data - массив данных, * len - количество блоков, * encr_or_decr - шифрование или расшифровка. */ __global__ static void map_aes( uint8_t *all_data, int len, uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE], int encr_or_decr, int device_loops) { for (int iteration = 0; iteration < device_loops; ++iteration) { int worker_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; int offset = THREADS_PER_BLOCK * BLOCKS_PER_GRID * iteration; int idx = offset + worker_idx; if (idx < len) { int bytes_offset = idx * STATE_SIZE * STATE_SIZE; // TODO: не копировать данные, попробовать привести к массиву. uint8_t state[STATE_SIZE][STATE_SIZE] = {0}; for (int i = 0; i < STATE_SIZE; ++i) { for (int j = 0; j < STATE_SIZE; ++j) { state[i][j] = all_data[bytes_offset + i * STATE_SIZE + j]; } } if (encr_or_decr == ENCRYPTION) { aes_encrypt_block(state, round_keys); } else if (encr_or_decr == DECRYPTION) { aes_decrypt_block(state, round_keys); } for (int i = 0; i < STATE_SIZE; ++i) { for (int j = 0; j < STATE_SIZE; ++j) { all_data[bytes_offset + i * STATE_SIZE + j] = state[i][j]; } } } } } /** * Запускает процесс шифрования на GPU. * data - массив данных для обработки, * len - количество данных (блоков 4x4), * round_keys - раундовые ключи, * encr_or_decr - шифрование или расшифровка. * Запускает процесс вычисления, аллоцирует и освобождает память и т.д. */ static void aes_run_device( uint8_t *data, size_t len, uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE], int encr_or_decr) { uint8_t *data_dev = NULL; GPU_CHECK_ERROR( cudaHostGetDevicePointer(&data_dev, data, 0) ); uint8_t ***round_keys_dev = NULL; GPU_CHECK_ERROR( cudaMalloc((void **) &round_keys_dev, sizeof(round_keys_dev)) ); GPU_CHECK_ERROR( cudaMemcpy( round_keys_dev, round_keys, sizeof(round_keys_dev), cudaMemcpyHostToDevice ) ); // Запускаем обработку на девайсе int grid_size = THREADS_PER_BLOCK * BLOCKS_PER_GRID; int device_loops = (len + grid_size - 1) / grid_size; map_aes<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>( data_dev, len, (uint8_t (*)[4][4]) round_keys_dev, encr_or_decr, device_loops ); GPU_CHECK_ERROR_STATE(); GPU_CHECK_ERROR( cudaFree(round_keys_dev) ); GPU_CHECK_ERROR( cudaThreadSynchronize() ); } /** * Выполняет шифрование данных по алгоритму AES. * data - массив данных (кратен 16 байтам, размеру одного блока), * len - количество блоков (4x4 байта), * key - ключ (128-бит, 16-байтный массив). */ void aes_encrypt(void *data, size_t len, const void *key) { uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE]; gen_round_keys((const uint8_t *) key, round_keys); aes_run_device((uint8_t *) data, len, round_keys, ENCRYPTION); } /** * Выполняет расшифровку данных по алгоритму AES. * cipher - массив шифротекста (кратен 16 байтам, размеру одного блока), * len - количество блоков (4x4 байта), * key - ключ (128-бит, 16-байтный массив). */ void aes_decrypt(void *cipher, size_t len, const void *key) { uint8_t round_keys[TOTAL_ROUNDS + 1][STATE_SIZE][STATE_SIZE]; gen_round_keys((const uint8_t *) key, round_keys); aes_run_device((uint8_t *) cipher, len, round_keys, DECRYPTION); }
cff6a5889b24b3f2a76aec78a65f5de6ef332eba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2018-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define CUB_STDERR #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include "caffe2/core/common_gpu.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/pow_op.h" #include "caffe2/utils/conversions.h" namespace caffe2 { // pow, log and other math functions are defined in // CUDA math library in header file math.h #define CUDA_POW(x, y) (pow(x, y)) template <int b_is_scalar, typename T1, typename T2, typename R> __global__ void PowKernel(const T1* a, const T2* b, T2 e, R* out, int n) { CUDA_1D_KERNEL_LOOP(i, n) { out[i] = CUDA_POW(a[i], ((b == NULL) ? e : b[b_is_scalar ? 0 : i])); } } template <typename T1, typename T2, typename R> __global__ void PowBroadcastKernel(const T1* a, const T2* b, R* out, int pre, int n) { CUDA_1D_KERNEL_LOOP(i, pre * n) { out[i] = CUDA_POW(a[i], b[i % n]); } } template <typename T1, typename T2, typename R> __global__ void PowBroadcast2Kernel( const T1* a, const T2* b, R* out, int pre, int n, int post) { CUDA_1D_KERNEL_LOOP(i, pre * n * post) { out[i] = CUDA_POW(a[i], b[(i / post) % n]); } } struct CudaPowFunctor { template <bool b_is_scalar, typename T1, typename T2, typename R> inline void Run(size_t n, const T1* a, const T2* b, T2 e, R* out, CUDAContext* context) { hipLaunchKernelGGL(( PowKernel<b_is_scalar, T1, T2, R>) , dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), a, b, e, out, n); } template <typename T1, typename T2, typename R> void RunWithBroadcast( const T1* a, const T2* b, R* out, size_t pre, size_t n, CUDAContext* context) { hipLaunchKernelGGL(( PowBroadcastKernel<T1, T2, R>) , dim3(CAFFE_GET_BLOCKS(pre * n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), a, b, out, pre, n); } template <typename T1, typename T2, typename R> void RunWithBroadcast2( const T1* a, const T2* b, R* out, size_t pre, size_t n, size_t post, CUDAContext* context) { hipLaunchKernelGGL(( PowBroadcast2Kernel<T1, T2, R>) , dim3(CAFFE_GET_BLOCKS(pre * n * post)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), a, b, out, pre, n, post); } }; REGISTER_CUDA_OPERATOR( Pow, PowOp< TensorTypes<float> /*NumericTypes*/, CUDAContext, CudaPowFunctor, SameTypeAsInput>) } // namespace caffe2
cff6a5889b24b3f2a76aec78a65f5de6ef332eba.cu
/** * Copyright (c) 2018-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define CUB_STDERR #include <cub/block/block_load.cuh> #include <cub/block/block_reduce.cuh> #include <cub/device/device_reduce.cuh> #include "caffe2/core/common_gpu.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/pow_op.h" #include "caffe2/utils/conversions.h" namespace caffe2 { // pow, log and other math functions are defined in // CUDA math library in header file math.h #define CUDA_POW(x, y) (pow(x, y)) template <int b_is_scalar, typename T1, typename T2, typename R> __global__ void PowKernel(const T1* a, const T2* b, T2 e, R* out, int n) { CUDA_1D_KERNEL_LOOP(i, n) { out[i] = CUDA_POW(a[i], ((b == NULL) ? e : b[b_is_scalar ? 0 : i])); } } template <typename T1, typename T2, typename R> __global__ void PowBroadcastKernel(const T1* a, const T2* b, R* out, int pre, int n) { CUDA_1D_KERNEL_LOOP(i, pre * n) { out[i] = CUDA_POW(a[i], b[i % n]); } } template <typename T1, typename T2, typename R> __global__ void PowBroadcast2Kernel( const T1* a, const T2* b, R* out, int pre, int n, int post) { CUDA_1D_KERNEL_LOOP(i, pre * n * post) { out[i] = CUDA_POW(a[i], b[(i / post) % n]); } } struct CudaPowFunctor { template <bool b_is_scalar, typename T1, typename T2, typename R> inline void Run(size_t n, const T1* a, const T2* b, T2 e, R* out, CUDAContext* context) { PowKernel<b_is_scalar, T1, T2, R> <<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(a, b, e, out, n); } template <typename T1, typename T2, typename R> void RunWithBroadcast( const T1* a, const T2* b, R* out, size_t pre, size_t n, CUDAContext* context) { PowBroadcastKernel<T1, T2, R> <<<CAFFE_GET_BLOCKS(pre * n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(a, b, out, pre, n); } template <typename T1, typename T2, typename R> void RunWithBroadcast2( const T1* a, const T2* b, R* out, size_t pre, size_t n, size_t post, CUDAContext* context) { PowBroadcast2Kernel<T1, T2, R> <<<CAFFE_GET_BLOCKS(pre * n * post), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(a, b, out, pre, n, post); } }; REGISTER_CUDA_OPERATOR( Pow, PowOp< TensorTypes<float> /*NumericTypes*/, CUDAContext, CudaPowFunctor, SameTypeAsInput>) } // namespace caffe2
fa1f78eb8bc06d6d3fd56b9f86431d9166199516.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> __global__ void reduce0(int *d_in, int *d_out){ extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = d_in[i]; __syncthreads(); for (unsigned int s=1; s<blockDim.x; s*=2){ if (tid % (2*s) == 0){ sdata[tid] += sdata[tid+s]; } __syncthreads(); } if (tid == 0){ d_out[blockIdx.x] = sdata[0]; } } int reduce0(int n, int numThread, int *d_in){ int ret = 0; int *d_out; hipMallocManaged(&d_out, n*sizeof(int)); int blockSize = numThread; if (n < blockSize){ blockSize = n; } int numBlocks = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( reduce0), dim3(numBlocks), dim3(blockSize), blockSize*sizeof(int), 0, d_in, d_out); hipDeviceSynchronize(); // std::cout << "Reduce 0: " << d_out[0] << std::endl; if (numBlocks > 1){ ret += reduce0(numBlocks, numThread, d_out); } else { ret += d_out[0]; } hipFree(d_out); return ret; } __global__ void reduce1(int *d_in, int *d_out){ extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = d_in[i]; __syncthreads(); for (unsigned int s=1; s<blockDim.x; s*=2){ int index = 2 * s * tid; if (index < blockDim.x){ sdata[index] += sdata[index+s]; } __syncthreads(); } if (tid == 0){ d_out[blockIdx.x] = sdata[0]; } } int reduce1(int n, int numThread, int *d_in){ int ret = 0; int *d_out; hipMallocManaged(&d_out, n*sizeof(int)); int blockSize = numThread; if (n < blockSize){ blockSize = n; } int numBlocks = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( reduce1), dim3(numBlocks), dim3(blockSize), blockSize*sizeof(int), 0, d_in, d_out); hipDeviceSynchronize(); // std::cout << "Reduce 0: " << d_out[0] << std::endl; if (numBlocks > 1){ ret += reduce1(numBlocks, numThread, d_out); } else { ret += d_out[0]; } hipFree(d_out); return ret; } __global__ void reduce2(int *d_in, int *d_out){ extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = d_in[i]; __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1){ if (tid < s){ sdata[tid] += sdata[tid+s]; } __syncthreads(); } if (tid == 0){ d_out[blockIdx.x] = sdata[0]; } } int reduce2(int n, int numThread, int *d_in){ int ret = 0; int *d_out; hipMallocManaged(&d_out, n*sizeof(int)); int blockSize = numThread; if (n < blockSize){ blockSize = n; } int numBlocks = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( reduce2), dim3(numBlocks), dim3(blockSize), blockSize*sizeof(int), 0, d_in, d_out); hipDeviceSynchronize(); // std::cout << "Reduce 0: " << d_out[0] << std::endl; if (numBlocks > 1){ ret += reduce2(numBlocks, numThread, d_out); } else { ret += d_out[0]; } hipFree(d_out); return ret; } __global__ void reduce3(int *d_in, int *d_out){ extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x*2) + threadIdx.x; sdata[tid] = d_in[i] + d_in[i+blockDim.x]; __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1){ if (tid < s){ sdata[tid] += sdata[tid+s]; } __syncthreads(); } if (tid == 0){ d_out[blockIdx.x] = sdata[0]; } } int reduce3(int n, int numThread, int *d_in){ int ret = 0; int *d_out; hipMallocManaged(&d_out, n*sizeof(int)); int blockSize = numThread; if (n < blockSize){ blockSize = n; } int numBlocks = (n + blockSize - 1) / blockSize; if (numBlocks > 1){ hipLaunchKernelGGL(( reduce3), dim3(numBlocks/2), dim3(blockSize), blockSize*sizeof(int), 0, d_in, d_out); } else { hipLaunchKernelGGL(( reduce3), dim3(numBlocks), dim3(blockSize/2), (blockSize/2)*sizeof(int), 0, d_in, d_out); } hipDeviceSynchronize(); // std::cout << "Reduce 0: " << d_out[0] << std::endl; if (numBlocks > 2){ ret += reduce3(numBlocks, numThread, d_out); } else { ret += d_out[0]; } hipFree(d_out); return ret; } int main(){ int N = 1<<26; int blockSize = 512; int *d_in; hipMallocManaged(&d_in, N*sizeof(int)); for (int i=0; i<N; i++){ d_in[i] = 10; } std::cout << "Reduce 0: " << reduce0(N, blockSize, d_in) << std::endl; std::cout << "Reduce 1: " << reduce1(N, blockSize, d_in) << std::endl; std::cout << "Reduce 2: " << reduce2(N, blockSize, d_in) << std::endl; std::cout << "Reduce 3: " << reduce3(N, blockSize, d_in) << std::endl; /* GPU activities: 46.31% 22.244ms 3 7.4145ms 4.5440us 22.190ms reduce0(int*, int*) 24.47% 11.752ms 3 3.9173ms 3.4880us 11.721ms reduce1(int*, int*) 19.71% 9.4677ms 3 3.1559ms 3.2640us 9.4417ms reduce2(int*, int*) 9.51% 4.5690ms 3 1.5230ms 3.2000us 4.5509ms reduce3(int*, int*) */ hipFree(d_in); return 0; }
fa1f78eb8bc06d6d3fd56b9f86431d9166199516.cu
#include <iostream> #include <math.h> __global__ void reduce0(int *d_in, int *d_out){ extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = d_in[i]; __syncthreads(); for (unsigned int s=1; s<blockDim.x; s*=2){ if (tid % (2*s) == 0){ sdata[tid] += sdata[tid+s]; } __syncthreads(); } if (tid == 0){ d_out[blockIdx.x] = sdata[0]; } } int reduce0(int n, int numThread, int *d_in){ int ret = 0; int *d_out; cudaMallocManaged(&d_out, n*sizeof(int)); int blockSize = numThread; if (n < blockSize){ blockSize = n; } int numBlocks = (n + blockSize - 1) / blockSize; reduce0<<<numBlocks, blockSize, blockSize*sizeof(int)>>>(d_in, d_out); cudaDeviceSynchronize(); // std::cout << "Reduce 0: " << d_out[0] << std::endl; if (numBlocks > 1){ ret += reduce0(numBlocks, numThread, d_out); } else { ret += d_out[0]; } cudaFree(d_out); return ret; } __global__ void reduce1(int *d_in, int *d_out){ extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = d_in[i]; __syncthreads(); for (unsigned int s=1; s<blockDim.x; s*=2){ int index = 2 * s * tid; if (index < blockDim.x){ sdata[index] += sdata[index+s]; } __syncthreads(); } if (tid == 0){ d_out[blockIdx.x] = sdata[0]; } } int reduce1(int n, int numThread, int *d_in){ int ret = 0; int *d_out; cudaMallocManaged(&d_out, n*sizeof(int)); int blockSize = numThread; if (n < blockSize){ blockSize = n; } int numBlocks = (n + blockSize - 1) / blockSize; reduce1<<<numBlocks, blockSize, blockSize*sizeof(int)>>>(d_in, d_out); cudaDeviceSynchronize(); // std::cout << "Reduce 0: " << d_out[0] << std::endl; if (numBlocks > 1){ ret += reduce1(numBlocks, numThread, d_out); } else { ret += d_out[0]; } cudaFree(d_out); return ret; } __global__ void reduce2(int *d_in, int *d_out){ extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = d_in[i]; __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1){ if (tid < s){ sdata[tid] += sdata[tid+s]; } __syncthreads(); } if (tid == 0){ d_out[blockIdx.x] = sdata[0]; } } int reduce2(int n, int numThread, int *d_in){ int ret = 0; int *d_out; cudaMallocManaged(&d_out, n*sizeof(int)); int blockSize = numThread; if (n < blockSize){ blockSize = n; } int numBlocks = (n + blockSize - 1) / blockSize; reduce2<<<numBlocks, blockSize, blockSize*sizeof(int)>>>(d_in, d_out); cudaDeviceSynchronize(); // std::cout << "Reduce 0: " << d_out[0] << std::endl; if (numBlocks > 1){ ret += reduce2(numBlocks, numThread, d_out); } else { ret += d_out[0]; } cudaFree(d_out); return ret; } __global__ void reduce3(int *d_in, int *d_out){ extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x*2) + threadIdx.x; sdata[tid] = d_in[i] + d_in[i+blockDim.x]; __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1){ if (tid < s){ sdata[tid] += sdata[tid+s]; } __syncthreads(); } if (tid == 0){ d_out[blockIdx.x] = sdata[0]; } } int reduce3(int n, int numThread, int *d_in){ int ret = 0; int *d_out; cudaMallocManaged(&d_out, n*sizeof(int)); int blockSize = numThread; if (n < blockSize){ blockSize = n; } int numBlocks = (n + blockSize - 1) / blockSize; if (numBlocks > 1){ reduce3<<<numBlocks/2, blockSize, blockSize*sizeof(int)>>>(d_in, d_out); } else { reduce3<<<numBlocks, blockSize/2, (blockSize/2)*sizeof(int)>>>(d_in, d_out); } cudaDeviceSynchronize(); // std::cout << "Reduce 0: " << d_out[0] << std::endl; if (numBlocks > 2){ ret += reduce3(numBlocks, numThread, d_out); } else { ret += d_out[0]; } cudaFree(d_out); return ret; } int main(){ int N = 1<<26; int blockSize = 512; int *d_in; cudaMallocManaged(&d_in, N*sizeof(int)); for (int i=0; i<N; i++){ d_in[i] = 10; } std::cout << "Reduce 0: " << reduce0(N, blockSize, d_in) << std::endl; std::cout << "Reduce 1: " << reduce1(N, blockSize, d_in) << std::endl; std::cout << "Reduce 2: " << reduce2(N, blockSize, d_in) << std::endl; std::cout << "Reduce 3: " << reduce3(N, blockSize, d_in) << std::endl; /* GPU activities: 46.31% 22.244ms 3 7.4145ms 4.5440us 22.190ms reduce0(int*, int*) 24.47% 11.752ms 3 3.9173ms 3.4880us 11.721ms reduce1(int*, int*) 19.71% 9.4677ms 3 3.1559ms 3.2640us 9.4417ms reduce2(int*, int*) 9.51% 4.5690ms 3 1.5230ms 3.2000us 4.5509ms reduce3(int*, int*) */ cudaFree(d_in); return 0; }
464c524abb05d706a979a715a7340b8af7c8a8a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "RiemannFitOnGPU.h" #include "CUDACore/device_unique_ptr.h" void HelixFitOnGPU::launchRiemannKernels(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples, hipStream_t stream) { assert(tuples_d); auto blockSize = 64; auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; // Fit internals auto hitsGPU_ = cms::hip::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix3xNd<4>) / sizeof(double), stream); auto hits_geGPU_ = cms::hip::make_device_unique<float[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix6x4f) / sizeof(float), stream); auto fast_fit_resultsGPU_ = cms::hip::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Vector4d) / sizeof(double), stream); auto circle_fit_resultsGPU_holder = cms::hip::make_device_unique<char[]>(maxNumberOfConcurrentFits_ * sizeof(Rfit::circle_fit), stream); Rfit::circle_fit *circle_fit_resultsGPU_ = (Rfit::circle_fit *)(circle_fit_resultsGPU_holder.get()); for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { // triplets hipLaunchKernelGGL(( kernelFastFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, 3, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelCircleFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_d, 3, bField_, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelLineFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_d, 3, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); // quads hipLaunchKernelGGL(( kernelFastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, 4, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelCircleFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, 4, bField_, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelLineFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, 4, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); if (fit5as4_) { // penta hipLaunchKernelGGL(( kernelFastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, 5, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelCircleFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, 5, bField_, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelLineFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, 5, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); } else { // penta all 5 hipLaunchKernelGGL(( kernelFastFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, 5, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelCircleFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, 5, bField_, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelLineFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, 5, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); } } }
464c524abb05d706a979a715a7340b8af7c8a8a4.cu
#include "hip/hip_runtime.h" #include "RiemannFitOnGPU.h" #include "CUDACore/device_unique_ptr.h" void HelixFitOnGPU::launchRiemannKernels(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples, hipStream_t stream) { assert(tuples_d); auto blockSize = 64; auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; // Fit internals auto hitsGPU_ = cms::hip::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix3xNd<4>) / sizeof(double), stream); auto hits_geGPU_ = cms::hip::make_device_unique<float[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix6x4f) / sizeof(float), stream); auto fast_fit_resultsGPU_ = cms::hip::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Vector4d) / sizeof(double), stream); auto circle_fit_resultsGPU_holder = cms::hip::make_device_unique<char[]>(maxNumberOfConcurrentFits_ * sizeof(Rfit::circle_fit), stream); Rfit::circle_fit *circle_fit_resultsGPU_ = (Rfit::circle_fit *)(circle_fit_resultsGPU_holder.get()); for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { // triplets kernelFastFit<3><<<numberOfBlocks, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, 3, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), offset); cudaCheck(hipGetLastError()); kernelCircleFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_d, 3, bField_, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); kernelLineFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_d, 3, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); // quads kernelFastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, 4, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), offset); cudaCheck(hipGetLastError()); kernelCircleFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, 4, bField_, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); kernelLineFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, 4, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); if (fit5as4_) { // penta kernelFastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, 5, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), offset); cudaCheck(hipGetLastError()); kernelCircleFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, 5, bField_, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); kernelLineFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, 5, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); } else { // penta all 5 kernelFastFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, 5, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), offset); cudaCheck(hipGetLastError()); kernelCircleFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, 5, bField_, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); kernelLineFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, 5, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); } } }
c466102c0132b055250310c8007a5e293bad7129.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void MatrixMulKernel(float *d_M, float *d_N, float *d_P,int width){ int Row = blockIdx.y*blockDim.y + threadIdx.y; int Col = blockIdx.x*blockDim.x + threadIdx.x; if ((Row < width)&&(Col < width)){ float Pvalue = 0; for (int i = 0; i < width; ++i){ Pvalue += d_M[Row*width+i]*d_N[i*width+Col]; } d_P[Row*width + Col] = Pvalue; } }
c466102c0132b055250310c8007a5e293bad7129.cu
#include "includes.h" __global__ void MatrixMulKernel(float *d_M, float *d_N, float *d_P,int width){ int Row = blockIdx.y*blockDim.y + threadIdx.y; int Col = blockIdx.x*blockDim.x + threadIdx.x; if ((Row < width)&&(Col < width)){ float Pvalue = 0; for (int i = 0; i < width; ++i){ Pvalue += d_M[Row*width+i]*d_N[i*width+Col]; } d_P[Row*width + Col] = Pvalue; } }
ec0c93e4c00ba0f8a4b6ebe9a5fc0109629a0e6f.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* Example of integrating CUDA functions into an existing * application / framework. * Host part of the device code. * Compiled with Cuda compiler. */ // System includes #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // helper functions and utilities to work with CUDA #include "helper_cuda.h" #include "helper_functions.h" #ifndef MAX #define MAX(a,b) (a > b ? a : b) #endif //////////////////////////////////////////////////////////////////////////////// // declaration, forward extern "C" void computeGold(char *reference, char *idata, const unsigned int len); extern "C" void computeGold2(int2 *reference, int2 *idata, const unsigned int len); /////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_odata memory to process (in and out) /////////////////////////////////////////////////////////////////////////////// __global__ void kernel(int *g_data) { // write data to global memory const unsigned int tid = threadIdx.x; int data = g_data[tid]; // use integer arithmetic to process all four bytes with one thread // this serializes the execution, but is the simplest solutions to avoid // bank conflicts for this very low number of threads // in general it is more efficient to process each byte by a separate thread, // to avoid bank conflicts the access pattern should be // g_data[4 * wtid + wid], where wtid is the thread id within the half warp // and wid is the warp id // see also the programming guide for a more in depth discussion. g_data[tid] = ((((data << 0) >> 24) - 10) << 24) | ((((data << 8) >> 24) - 10) << 16) | ((((data << 16) >> 24) - 10) << 8) | ((((data << 24) >> 24) - 10) << 0); } /////////////////////////////////////////////////////////////////////////////// //! Demonstration that int2 data can be used in the cpp code //! @param g_odata memory to process (in and out) /////////////////////////////////////////////////////////////////////////////// __global__ void kernel2(int2 *g_data) { // write data to global memory const unsigned int tid = threadIdx.x; int2 data = g_data[tid]; // use integer arithmetic to process all four bytes with one thread // this serializes the execution, but is the simplest solutions to avoid // bank conflicts for this very low number of threads // in general it is more efficient to process each byte by a separate thread, // to avoid bank conflicts the access pattern should be // g_data[4 * wtid + wid], where wtid is the thread id within the half warp // and wid is the warp id // see also the programming guide for a more in depth discussion. g_data[tid].x = data.x - data.y; } //////////////////////////////////////////////////////////////////////////////// //! Entry point for Cuda functionality on host side //! @param argc command line argument count //! @param argv command line arguments //! @param data data to process on the device //! @param len len of \a data //////////////////////////////////////////////////////////////////////////////// extern "C" bool runTest(const int argc, const char **argv, char *data, int2 *data_int2, unsigned int len) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaDevice(argc, (const char **)argv); const unsigned int num_threads = len / 4; assert(0 == (len % 4)); const unsigned int mem_size = sizeof(char) * len; const unsigned int mem_size_int2 = sizeof(int2) * len; // allocate device memory char *d_data; checkCudaErrors(hipMalloc((void **) &d_data, mem_size)); // copy host memory to device checkCudaErrors(hipMemcpy(d_data, data, mem_size, hipMemcpyHostToDevice)); // allocate device memory for int2 version int2 *d_data_int2; checkCudaErrors(hipMalloc((void **) &d_data_int2, mem_size_int2)); // copy host memory to device checkCudaErrors(hipMemcpy(d_data_int2, data_int2, mem_size_int2, hipMemcpyHostToDevice)); // setup execution parameters dim3 grid(1, 1, 1); dim3 threads(num_threads, 1, 1); dim3 threads2(len, 1, 1); // more threads needed fir separate int2 version // execute the kernel hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads) , 0, 0, (int *) d_data); hipLaunchKernelGGL(( kernel2), dim3(grid), dim3(threads2) , 0, 0, d_data_int2); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); // compute reference solutions char *reference = (char *) malloc(mem_size); computeGold(reference, data, len); int2 *reference2 = (int2 *) malloc(mem_size_int2); computeGold2(reference2, data_int2, len); // copy results from device to host checkCudaErrors(hipMemcpy(data, d_data, mem_size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(data_int2, d_data_int2, mem_size_int2, hipMemcpyDeviceToHost)); // check result bool success = true; for (unsigned int i = 0; i < len; i++) { if (reference[i] != data[i] || reference2[i].x != data_int2[i].x || reference2[i].y != data_int2[i].y) { success = false; } } // cleanup memory checkCudaErrors(hipFree(d_data)); checkCudaErrors(hipFree(d_data_int2)); free(reference); free(reference2); return success; }
ec0c93e4c00ba0f8a4b6ebe9a5fc0109629a0e6f.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* Example of integrating CUDA functions into an existing * application / framework. * Host part of the device code. * Compiled with Cuda compiler. */ // System includes #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #include "helper_cuda.h" #include "helper_functions.h" #ifndef MAX #define MAX(a,b) (a > b ? a : b) #endif //////////////////////////////////////////////////////////////////////////////// // declaration, forward extern "C" void computeGold(char *reference, char *idata, const unsigned int len); extern "C" void computeGold2(int2 *reference, int2 *idata, const unsigned int len); /////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_odata memory to process (in and out) /////////////////////////////////////////////////////////////////////////////// __global__ void kernel(int *g_data) { // write data to global memory const unsigned int tid = threadIdx.x; int data = g_data[tid]; // use integer arithmetic to process all four bytes with one thread // this serializes the execution, but is the simplest solutions to avoid // bank conflicts for this very low number of threads // in general it is more efficient to process each byte by a separate thread, // to avoid bank conflicts the access pattern should be // g_data[4 * wtid + wid], where wtid is the thread id within the half warp // and wid is the warp id // see also the programming guide for a more in depth discussion. g_data[tid] = ((((data << 0) >> 24) - 10) << 24) | ((((data << 8) >> 24) - 10) << 16) | ((((data << 16) >> 24) - 10) << 8) | ((((data << 24) >> 24) - 10) << 0); } /////////////////////////////////////////////////////////////////////////////// //! Demonstration that int2 data can be used in the cpp code //! @param g_odata memory to process (in and out) /////////////////////////////////////////////////////////////////////////////// __global__ void kernel2(int2 *g_data) { // write data to global memory const unsigned int tid = threadIdx.x; int2 data = g_data[tid]; // use integer arithmetic to process all four bytes with one thread // this serializes the execution, but is the simplest solutions to avoid // bank conflicts for this very low number of threads // in general it is more efficient to process each byte by a separate thread, // to avoid bank conflicts the access pattern should be // g_data[4 * wtid + wid], where wtid is the thread id within the half warp // and wid is the warp id // see also the programming guide for a more in depth discussion. g_data[tid].x = data.x - data.y; } //////////////////////////////////////////////////////////////////////////////// //! Entry point for Cuda functionality on host side //! @param argc command line argument count //! @param argv command line arguments //! @param data data to process on the device //! @param len len of \a data //////////////////////////////////////////////////////////////////////////////// extern "C" bool runTest(const int argc, const char **argv, char *data, int2 *data_int2, unsigned int len) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaDevice(argc, (const char **)argv); const unsigned int num_threads = len / 4; assert(0 == (len % 4)); const unsigned int mem_size = sizeof(char) * len; const unsigned int mem_size_int2 = sizeof(int2) * len; // allocate device memory char *d_data; checkCudaErrors(cudaMalloc((void **) &d_data, mem_size)); // copy host memory to device checkCudaErrors(cudaMemcpy(d_data, data, mem_size, cudaMemcpyHostToDevice)); // allocate device memory for int2 version int2 *d_data_int2; checkCudaErrors(cudaMalloc((void **) &d_data_int2, mem_size_int2)); // copy host memory to device checkCudaErrors(cudaMemcpy(d_data_int2, data_int2, mem_size_int2, cudaMemcpyHostToDevice)); // setup execution parameters dim3 grid(1, 1, 1); dim3 threads(num_threads, 1, 1); dim3 threads2(len, 1, 1); // more threads needed fir separate int2 version // execute the kernel kernel<<< grid, threads >>>((int *) d_data); kernel2<<< grid, threads2 >>>(d_data_int2); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); // compute reference solutions char *reference = (char *) malloc(mem_size); computeGold(reference, data, len); int2 *reference2 = (int2 *) malloc(mem_size_int2); computeGold2(reference2, data_int2, len); // copy results from device to host checkCudaErrors(cudaMemcpy(data, d_data, mem_size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(data_int2, d_data_int2, mem_size_int2, cudaMemcpyDeviceToHost)); // check result bool success = true; for (unsigned int i = 0; i < len; i++) { if (reference[i] != data[i] || reference2[i].x != data_int2[i].x || reference2[i].y != data_int2[i].y) { success = false; } } // cleanup memory checkCudaErrors(cudaFree(d_data)); checkCudaErrors(cudaFree(d_data_int2)); free(reference); free(reference2); return success; }
77aa7a3b5f278fd532d98aec1872aa5e1bd050b5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <float.h> #include "Population.h" #include "PopulationKernels.cuh" #include "Utility.h" /** * Skales a matrix in the size of the population filled with random values towards the bounds * of the solution. * * @param solutions The solutions/particles the populations is made of. * @param solutionsPitch The pitch indicating how the matrix of solutions is aligned. * @param bounds The bounds that limit the solutions space. * @param boundsPitch The pitch indicating how the matrix of bounds is aligned. * @param dimensions The number of dimensions of the problem. * @param numberSolutions The number of particles in the population. */ void AdjustRandomMatrixToBounds(double *solutions, size_t solutionsPitch, double *bounds, size_t boundsPitch, int dimensions, int numberSolutions) { hipLaunchKernelGGL(( AdjustMatrixToBoundsKernel) , dim3(numberSolutions), dim3(dimensions), 0, 0, solutions, solutionsPitch, bounds, boundsPitch); CUDA_METHOD_CALL(hipGetLastError()); CUDA_METHOD_CALL(hipDeviceSynchronize()); } /** * Calculates the new function costs for all particles/solutions of the population. /PARALLEL/ * @param population The populations whose fuction costs will be evaluated. * @param parameters The parameters that describe the input given by the user to unfluence the * simulation. */ void EvaluatePopulationCost(Population *population, Parameters *parameters, bool initialEvaluation){ hipLaunchKernelGGL(( EvaluatePopulationCostKernel) , dim3(1), dim3(parameters->numberSolutions), 0, 0, population->dev_solutions, population->solutionsPitch, population->dev_costs, parameters->numberSolutions, parameters->dimensions, parameters->problemId); CUDA_METHOD_CALL(hipGetLastError()); CUDA_METHOD_CALL(hipDeviceSynchronize()); if (initialEvaluation){ CUDA_METHOD_CALL(hipMemcpy(population->dev_pBestCosts, population->dev_costs, parameters->numberSolutions*sizeof(double), hipMemcpyDeviceToDevice)); CUDA_METHOD_CALL(hipGetLastError()); CUDA_METHOD_CALL(hipDeviceSynchronize()); } } /** * Cleans up the memory used to store the population. /SERIAL/ * @param population The populations that will be cleaned up. */ void FreePopulation(Population *population) { CUDA_METHOD_CALL(hipFree(population->dev_bestCosts)); CUDA_METHOD_CALL(hipFree(population->dev_bestSolutions)); CUDA_METHOD_CALL(hipFree(population->dev_costs)); CUDA_METHOD_CALL(hipFree(population->dev_gBestCost)); CUDA_METHOD_CALL(hipFree(population->dev_gBestSolution)); CUDA_METHOD_CALL(hipFree(population->dev_pBestCosts)); CUDA_METHOD_CALL(hipFree(population->dev_pBestSolutions)); CUDA_METHOD_CALL(hipFree(population->dev_solutions)); CUDA_METHOD_CALL(hipFree(population->dev_velocity)); } /** * Creates and Initializes a new random population. /SERIAL/ * @param parameters The parameters that describe the input given by the user to unfluence the * simulation. * @return The initialized population. */ Population *GeneratePopulation(Parameters* parameter) { Population *population = (Population*)calloc(1, sizeof(Population)); //! create solutions an fill them with random start values and repair population to make sure it is within its bounds population->solutionsPitch = CreateRandomDoubleMatrix(&(population->dev_solutions), parameter->numberSolutions, parameter->dimensions); AdjustMatrixToBoundsKernel << <parameter->numberSolutions, parameter->dimensions >> >(population->dev_solutions, population->solutionsPitch, parameter->dev_bounds, parameter->boundsPitch); CUDA_METHOD_CALL(hipGetLastError()); CUDA_METHOD_CALL(hipDeviceSynchronize()); CUDA_METHOD_CALL(hipMallocPitch<double>(&(population->dev_bestSolutions), &(population->bestSolutionsPitch), parameter->dimensions*sizeof(double), parameter->iterations)); //! create the personal best for all solutions and initialize it with the start position of all solutions CUDA_METHOD_CALL(hipMallocPitch<double>(&(population->dev_pBestSolutions), &(population->pBestSolutionsPitch), parameter->dimensions*sizeof(double), parameter->numberSolutions)); CUDA_METHOD_CALL(hipMemcpy2D( population->dev_pBestSolutions, population->pBestSolutionsPitch, population->dev_solutions, population->solutionsPitch, parameter->dimensions*sizeof(double), parameter->numberSolutions, hipMemcpyDeviceToDevice)); CUDA_METHOD_CALL(hipMallocPitch<double>(&(population->dev_velocity), &(population->velocityPitch), parameter->dimensions*sizeof(double), parameter->numberSolutions)); CUDA_METHOD_CALL(hipMemset2D(population->dev_velocity, population->velocityPitch, 0, parameter->dimensions*sizeof(double), parameter->numberSolutions)); CUDA_METHOD_CALL(hipMalloc<double>(&(population->dev_gBestSolution), parameter->dimensions*sizeof(double))); CUDA_METHOD_CALL(hipMalloc<double>(&(population->dev_costs), parameter->numberSolutions*sizeof(double))); CUDA_METHOD_CALL(hipMalloc<double>(&(population->dev_bestCosts), parameter->iterations*sizeof(double))); CUDA_METHOD_CALL(hipMalloc<double>(&(population->dev_pBestCosts), parameter->numberSolutions*sizeof(double))); CUDA_METHOD_CALL(hipMemcpy(population->dev_pBestCosts, population->dev_costs, parameter->numberSolutions*sizeof(double), hipMemcpyDeviceToDevice)); CUDA_METHOD_CALL(hipMalloc<double>(&(population->dev_gBestCost), sizeof(double))); CUDA_METHOD_CALL(hipMemset(population->dev_gBestCost, DBL_MAX, sizeof(double))); return population; } /** * Copys the array of best solutions over time to the host and prints them. * @param population The populations whose best solutions will be printed. * @param parameters The parameters that describe the input given by the user to unfluence the * simulation. */ void PrintBestSolutions(Population* population, Parameters *parameters, FILE *out) { FILE * file = fopen("PSO.csv", "w"); size_t pitch = population->bestSolutionsPitch; double *bestSolutions = (double*)malloc(sizeof(double)*(parameters->iterations)*(parameters->dimensions)); double *bestCosts = (double*)malloc(sizeof(double)*parameters->iterations); int i, j; CUDA_METHOD_CALL(hipMemcpy2D( bestSolutions, parameters->dimensions*sizeof(double), population->dev_bestSolutions, pitch, parameters->dimensions*sizeof(double), parameters->iterations, hipMemcpyDeviceToHost)); CUDA_METHOD_CALL(hipMemcpy(bestCosts, population->dev_bestCosts, parameters->iterations*sizeof(double), hipMemcpyDeviceToHost)); for (i = 0; i < parameters->iterations; i++){ fprintf(file, "%lf;", bestCosts[i]); for (j = 0; j < parameters->dimensions; j++){ double value = bestSolutions[i * parameters->dimensions + j]; fprintf(file, "%lf;", value); } fprintf(file, "\n"); } fclose(file); } /** * Repairs the population and makes sure that the solution does not leav the bounds specified * by the user. * @param population The populations that will be repaired. * @param parameters The parameters that describe the input given by the user to unfluence the * simulation. */ void RepairPopulation(Population* population, Parameters *parameters) { hipLaunchKernelGGL(( RepairPopulationKernel) , dim3(parameters->numberSolutions), dim3(parameters->dimensions), 0, 0, population->dev_solutions, population->solutionsPitch, parameters->dev_bounds, parameters->boundsPitch); CUDA_METHOD_CALL(hipGetLastError()); CUDA_METHOD_CALL(hipDeviceSynchronize()); }
77aa7a3b5f278fd532d98aec1872aa5e1bd050b5.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <float.h> #include "Population.h" #include "PopulationKernels.cuh" #include "Utility.h" /** * Skales a matrix in the size of the population filled with random values towards the bounds * of the solution. * * @param solutions The solutions/particles the populations is made of. * @param solutionsPitch The pitch indicating how the matrix of solutions is aligned. * @param bounds The bounds that limit the solutions space. * @param boundsPitch The pitch indicating how the matrix of bounds is aligned. * @param dimensions The number of dimensions of the problem. * @param numberSolutions The number of particles in the population. */ void AdjustRandomMatrixToBounds(double *solutions, size_t solutionsPitch, double *bounds, size_t boundsPitch, int dimensions, int numberSolutions) { AdjustMatrixToBoundsKernel <<<numberSolutions, dimensions>>>(solutions, solutionsPitch, bounds, boundsPitch); CUDA_METHOD_CALL(cudaGetLastError()); CUDA_METHOD_CALL(cudaDeviceSynchronize()); } /** * Calculates the new function costs for all particles/solutions of the population. /PARALLEL/ * @param population The populations whose fuction costs will be evaluated. * @param parameters The parameters that describe the input given by the user to unfluence the * simulation. */ void EvaluatePopulationCost(Population *population, Parameters *parameters, bool initialEvaluation){ EvaluatePopulationCostKernel <<<1, parameters->numberSolutions>>>( population->dev_solutions, population->solutionsPitch, population->dev_costs, parameters->numberSolutions, parameters->dimensions, parameters->problemId); CUDA_METHOD_CALL(cudaGetLastError()); CUDA_METHOD_CALL(cudaDeviceSynchronize()); if (initialEvaluation){ CUDA_METHOD_CALL(cudaMemcpy(population->dev_pBestCosts, population->dev_costs, parameters->numberSolutions*sizeof(double), cudaMemcpyDeviceToDevice)); CUDA_METHOD_CALL(cudaGetLastError()); CUDA_METHOD_CALL(cudaDeviceSynchronize()); } } /** * Cleans up the memory used to store the population. /SERIAL/ * @param population The populations that will be cleaned up. */ void FreePopulation(Population *population) { CUDA_METHOD_CALL(cudaFree(population->dev_bestCosts)); CUDA_METHOD_CALL(cudaFree(population->dev_bestSolutions)); CUDA_METHOD_CALL(cudaFree(population->dev_costs)); CUDA_METHOD_CALL(cudaFree(population->dev_gBestCost)); CUDA_METHOD_CALL(cudaFree(population->dev_gBestSolution)); CUDA_METHOD_CALL(cudaFree(population->dev_pBestCosts)); CUDA_METHOD_CALL(cudaFree(population->dev_pBestSolutions)); CUDA_METHOD_CALL(cudaFree(population->dev_solutions)); CUDA_METHOD_CALL(cudaFree(population->dev_velocity)); } /** * Creates and Initializes a new random population. /SERIAL/ * @param parameters The parameters that describe the input given by the user to unfluence the * simulation. * @return The initialized population. */ Population *GeneratePopulation(Parameters* parameter) { Population *population = (Population*)calloc(1, sizeof(Population)); //! create solutions an fill them with random start values and repair population to make sure it is within its bounds population->solutionsPitch = CreateRandomDoubleMatrix(&(population->dev_solutions), parameter->numberSolutions, parameter->dimensions); AdjustMatrixToBoundsKernel << <parameter->numberSolutions, parameter->dimensions >> >(population->dev_solutions, population->solutionsPitch, parameter->dev_bounds, parameter->boundsPitch); CUDA_METHOD_CALL(cudaGetLastError()); CUDA_METHOD_CALL(cudaDeviceSynchronize()); CUDA_METHOD_CALL(cudaMallocPitch<double>(&(population->dev_bestSolutions), &(population->bestSolutionsPitch), parameter->dimensions*sizeof(double), parameter->iterations)); //! create the personal best for all solutions and initialize it with the start position of all solutions CUDA_METHOD_CALL(cudaMallocPitch<double>(&(population->dev_pBestSolutions), &(population->pBestSolutionsPitch), parameter->dimensions*sizeof(double), parameter->numberSolutions)); CUDA_METHOD_CALL(cudaMemcpy2D( population->dev_pBestSolutions, population->pBestSolutionsPitch, population->dev_solutions, population->solutionsPitch, parameter->dimensions*sizeof(double), parameter->numberSolutions, cudaMemcpyDeviceToDevice)); CUDA_METHOD_CALL(cudaMallocPitch<double>(&(population->dev_velocity), &(population->velocityPitch), parameter->dimensions*sizeof(double), parameter->numberSolutions)); CUDA_METHOD_CALL(cudaMemset2D(population->dev_velocity, population->velocityPitch, 0, parameter->dimensions*sizeof(double), parameter->numberSolutions)); CUDA_METHOD_CALL(cudaMalloc<double>(&(population->dev_gBestSolution), parameter->dimensions*sizeof(double))); CUDA_METHOD_CALL(cudaMalloc<double>(&(population->dev_costs), parameter->numberSolutions*sizeof(double))); CUDA_METHOD_CALL(cudaMalloc<double>(&(population->dev_bestCosts), parameter->iterations*sizeof(double))); CUDA_METHOD_CALL(cudaMalloc<double>(&(population->dev_pBestCosts), parameter->numberSolutions*sizeof(double))); CUDA_METHOD_CALL(cudaMemcpy(population->dev_pBestCosts, population->dev_costs, parameter->numberSolutions*sizeof(double), cudaMemcpyDeviceToDevice)); CUDA_METHOD_CALL(cudaMalloc<double>(&(population->dev_gBestCost), sizeof(double))); CUDA_METHOD_CALL(cudaMemset(population->dev_gBestCost, DBL_MAX, sizeof(double))); return population; } /** * Copys the array of best solutions over time to the host and prints them. * @param population The populations whose best solutions will be printed. * @param parameters The parameters that describe the input given by the user to unfluence the * simulation. */ void PrintBestSolutions(Population* population, Parameters *parameters, FILE *out) { FILE * file = fopen("PSO.csv", "w"); size_t pitch = population->bestSolutionsPitch; double *bestSolutions = (double*)malloc(sizeof(double)*(parameters->iterations)*(parameters->dimensions)); double *bestCosts = (double*)malloc(sizeof(double)*parameters->iterations); int i, j; CUDA_METHOD_CALL(cudaMemcpy2D( bestSolutions, parameters->dimensions*sizeof(double), population->dev_bestSolutions, pitch, parameters->dimensions*sizeof(double), parameters->iterations, cudaMemcpyDeviceToHost)); CUDA_METHOD_CALL(cudaMemcpy(bestCosts, population->dev_bestCosts, parameters->iterations*sizeof(double), cudaMemcpyDeviceToHost)); for (i = 0; i < parameters->iterations; i++){ fprintf(file, "%lf;", bestCosts[i]); for (j = 0; j < parameters->dimensions; j++){ double value = bestSolutions[i * parameters->dimensions + j]; fprintf(file, "%lf;", value); } fprintf(file, "\n"); } fclose(file); } /** * Repairs the population and makes sure that the solution does not leav the bounds specified * by the user. * @param population The populations that will be repaired. * @param parameters The parameters that describe the input given by the user to unfluence the * simulation. */ void RepairPopulation(Population* population, Parameters *parameters) { RepairPopulationKernel <<<parameters->numberSolutions, parameters->dimensions>>>( population->dev_solutions, population->solutionsPitch, parameters->dev_bounds, parameters->boundsPitch); CUDA_METHOD_CALL(cudaGetLastError()); CUDA_METHOD_CALL(cudaDeviceSynchronize()); }
1a214963c9afc803c1c841149e37099491d4b996.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include "colour-convert.h" // AM: empy kernel to fire up GPU __global__ void empyKernel(void) { } __global__ void rgb2yuvKernel(unsigned char *r, unsigned char *g, unsigned char *b, unsigned char *y, unsigned char *u, unsigned char *v) { unsigned char ny, cb, cr; // ny = (unsigned char)(__fadd_rn(__fadd_rn(__fmul_rn(0.299, r[blockIdx.x]), // __fmul_rn(0.587, g[blockIdx.x])), // __fmul_rn(0.114, b[blockIdx.x]))); // cb = (unsigned char)(__fadd_rn(__fadd_rn(__fadd_rn(__fmul_rn(-0.169, r[blockIdx.x]), // __fmul_rn(-0.331, g[blockIdx.x])), // __fmul_rn(0.499, b[blockIdx.x])), 128)); // cr = (unsigned char) (__fadd_rn(__fadd_rn(__fadd_rn(__fmul_rn(0.499, r[blockIdx.x]), // __fmul_rn(-0.418, g[blockIdx.x])), // __fmul_rn(-0.0813, b[blockIdx.x])), 128)); ny = (unsigned char)( 0.299*r[blockIdx.x] + 0.587*g[blockIdx.x] + 0.114*b[blockIdx.x]); cb = (unsigned char)(-0.169*r[blockIdx.x] - 0.331*g[blockIdx.x] + 0.499*b[blockIdx.x] + 128); cr = (unsigned char)( 0.499*r[blockIdx.x] - 0.418*g[blockIdx.x] - 0.0813*b[blockIdx.x] + 128); y[blockIdx.x] = ny; u[blockIdx.x] = cb; v[blockIdx.x] = cr; } __global__ void yuv2rgbKernel(unsigned char *r, unsigned char *g, unsigned char *b, unsigned char *y, unsigned char *u, unsigned char *v) { int ny = (int)y[blockIdx.x]; int cb = (int)u[blockIdx.x] - 128; int cr = (int)v[blockIdx.x] - 128; int rt = (int)(ny + 1.402*cr); int gt = (int)(ny - 0.344*cb - 0.714*cr); int bt = (int)(ny + 1.772*cb); r[blockIdx.x] = (rt < 255) ? rt: 255; r[blockIdx.x] = (rt > 0) ? rt: 0; //g[blockIdx.x] = (int)( y[blockIdx.x] - 0.344*u[blockIdx.x] - 0.714*v[blockIdx.x]); g[blockIdx.x] = (gt < 255) ? gt: 255; g[blockIdx.x] = (gt > 0) ? gt : 0; //b[blockIdx.x] = (int)(y[blockIdx.x]+ 1.772*u[blockIdx.x]); b[blockIdx.x] = (bt < 255 )? bt: 255; b[blockIdx.x] = (bt > 0) ? bt : 0; } // AM copy image to device void copyToDevice(PPM_IMG img_in) { } void copyToHost(PPM_IMG img_in) { unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; // AM: Allocate memory for the PPM_IMG struct on the device int size = img_in.w * img_in.h * sizeof(unsigned char); hipMalloc((void **) &img_r_d, size); hipMalloc((void **) &img_g_d, size); hipMalloc((void **) &img_b_d, size); } YUV_IMG rgb2yuvGPU(PPM_IMG img_in) { YUV_IMG img_out; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); // AM: Allocate memory for the PPM_IMG & YUV_IMG on the device unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; unsigned char * img_y_d; unsigned char * img_u_d; unsigned char * img_v_d; int size = img_in.w * img_in.h * sizeof(unsigned char); hipMalloc((void **) &img_r_d, size); hipMalloc((void **) &img_g_d, size); hipMalloc((void **) &img_b_d, size); hipMalloc((void **) &img_y_d, size); hipMalloc((void **) &img_u_d, size); hipMalloc((void **) &img_v_d, size); // Copy PPM to device hipMemcpy(img_r_d, img_in.img_r, size, hipMemcpyHostToDevice); hipMemcpy(img_g_d, img_in.img_g, size, hipMemcpyHostToDevice); hipMemcpy(img_b_d, img_in.img_b, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( rgb2yuvKernel), dim3(img_in.w*img_in.h), dim3(1), 0, 0, img_r_d, img_g_d, img_b_d, img_y_d, img_u_d, img_v_d);//Launch the Kernel // Copy from device to host hipMemcpy(img_out.img_y, img_y_d, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_u, img_u_d, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_v, img_v_d, size, hipMemcpyDeviceToHost); return img_out; } PPM_IMG yuv2rgbGPU(YUV_IMG img_in) { PPM_IMG img_out; //Put you CUDA setup code here. img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); // AM: Allocate memory for the PPM_IMG & YUV_IMG on the device unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; unsigned char * img_y_d; unsigned char * img_u_d; unsigned char * img_v_d; int size = img_in.w * img_in.h * sizeof(unsigned char); hipMalloc((void **) &img_r_d, size); hipMalloc((void **) &img_g_d, size); hipMalloc((void **) &img_b_d, size); hipMalloc((void **) &img_y_d, size); hipMalloc((void **) &img_u_d, size); hipMalloc((void **) &img_v_d, size); // Copy YUV to device hipMemcpy(img_y_d, img_in.img_y, size, hipMemcpyHostToDevice); hipMemcpy(img_u_d, img_in.img_u, size, hipMemcpyHostToDevice); hipMemcpy(img_v_d, img_in.img_v, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( yuv2rgbKernel), dim3(img_in.w*img_in.h), dim3(1), 0, 0, img_r_d, img_g_d, img_b_d, img_y_d, img_u_d, img_v_d);//Launch the Kernel // Copy from device to host hipMemcpy(img_out.img_r, img_r_d, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_g, img_g_d, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_b, img_b_d, size, hipMemcpyDeviceToHost); return img_out; } //Convert RGB to YUV444, all components in [0, 255] YUV_IMG rgb2yuv(PPM_IMG img_in) { YUV_IMG img_out; int i;//, j; unsigned char r, g, b; unsigned char y, cb, cr; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); for(i = 0; i < img_out.w*img_out.h; i ++){ r = img_in.img_r[i]; g = img_in.img_g[i]; b = img_in.img_b[i]; y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b); cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128); cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128); img_out.img_y[i] = y; img_out.img_u[i] = cb; img_out.img_v[i] = cr; } return img_out; } unsigned char clip_rgb(int x) { if(x > 255) return 255; if(x < 0) return 0; return (unsigned char)x; } //Convert YUV to RGB, all components in [0, 255] PPM_IMG yuv2rgb(YUV_IMG img_in) { PPM_IMG img_out; int i; int rt,gt,bt; int y, cb, cr; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); for(i = 0; i < img_out.w*img_out.h; i ++){ y = (int)img_in.img_y[i]; cb = (int)img_in.img_u[i] - 128; cr = (int)img_in.img_v[i] - 128; rt = (int)( y + 1.402*cr); gt = (int)( y - 0.344*cb - 0.714*cr); bt = (int)( y + 1.772*cb); img_out.img_r[i] = clip_rgb(rt); img_out.img_g[i] = clip_rgb(gt); img_out.img_b[i] = clip_rgb(bt); } return img_out; }
1a214963c9afc803c1c841149e37099491d4b996.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include "colour-convert.h" // AM: empy kernel to fire up GPU __global__ void empyKernel(void) { } __global__ void rgb2yuvKernel(unsigned char *r, unsigned char *g, unsigned char *b, unsigned char *y, unsigned char *u, unsigned char *v) { unsigned char ny, cb, cr; // ny = (unsigned char)(__fadd_rn(__fadd_rn(__fmul_rn(0.299, r[blockIdx.x]), // __fmul_rn(0.587, g[blockIdx.x])), // __fmul_rn(0.114, b[blockIdx.x]))); // cb = (unsigned char)(__fadd_rn(__fadd_rn(__fadd_rn(__fmul_rn(-0.169, r[blockIdx.x]), // __fmul_rn(-0.331, g[blockIdx.x])), // __fmul_rn(0.499, b[blockIdx.x])), 128)); // cr = (unsigned char) (__fadd_rn(__fadd_rn(__fadd_rn(__fmul_rn(0.499, r[blockIdx.x]), // __fmul_rn(-0.418, g[blockIdx.x])), // __fmul_rn(-0.0813, b[blockIdx.x])), 128)); ny = (unsigned char)( 0.299*r[blockIdx.x] + 0.587*g[blockIdx.x] + 0.114*b[blockIdx.x]); cb = (unsigned char)(-0.169*r[blockIdx.x] - 0.331*g[blockIdx.x] + 0.499*b[blockIdx.x] + 128); cr = (unsigned char)( 0.499*r[blockIdx.x] - 0.418*g[blockIdx.x] - 0.0813*b[blockIdx.x] + 128); y[blockIdx.x] = ny; u[blockIdx.x] = cb; v[blockIdx.x] = cr; } __global__ void yuv2rgbKernel(unsigned char *r, unsigned char *g, unsigned char *b, unsigned char *y, unsigned char *u, unsigned char *v) { int ny = (int)y[blockIdx.x]; int cb = (int)u[blockIdx.x] - 128; int cr = (int)v[blockIdx.x] - 128; int rt = (int)(ny + 1.402*cr); int gt = (int)(ny - 0.344*cb - 0.714*cr); int bt = (int)(ny + 1.772*cb); r[blockIdx.x] = (rt < 255) ? rt: 255; r[blockIdx.x] = (rt > 0) ? rt: 0; //g[blockIdx.x] = (int)( y[blockIdx.x] - 0.344*u[blockIdx.x] - 0.714*v[blockIdx.x]); g[blockIdx.x] = (gt < 255) ? gt: 255; g[blockIdx.x] = (gt > 0) ? gt : 0; //b[blockIdx.x] = (int)(y[blockIdx.x]+ 1.772*u[blockIdx.x]); b[blockIdx.x] = (bt < 255 )? bt: 255; b[blockIdx.x] = (bt > 0) ? bt : 0; } // AM copy image to device void copyToDevice(PPM_IMG img_in) { } void copyToHost(PPM_IMG img_in) { unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; // AM: Allocate memory for the PPM_IMG struct on the device int size = img_in.w * img_in.h * sizeof(unsigned char); cudaMalloc((void **) &img_r_d, size); cudaMalloc((void **) &img_g_d, size); cudaMalloc((void **) &img_b_d, size); } YUV_IMG rgb2yuvGPU(PPM_IMG img_in) { YUV_IMG img_out; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); // AM: Allocate memory for the PPM_IMG & YUV_IMG on the device unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; unsigned char * img_y_d; unsigned char * img_u_d; unsigned char * img_v_d; int size = img_in.w * img_in.h * sizeof(unsigned char); cudaMalloc((void **) &img_r_d, size); cudaMalloc((void **) &img_g_d, size); cudaMalloc((void **) &img_b_d, size); cudaMalloc((void **) &img_y_d, size); cudaMalloc((void **) &img_u_d, size); cudaMalloc((void **) &img_v_d, size); // Copy PPM to device cudaMemcpy(img_r_d, img_in.img_r, size, cudaMemcpyHostToDevice); cudaMemcpy(img_g_d, img_in.img_g, size, cudaMemcpyHostToDevice); cudaMemcpy(img_b_d, img_in.img_b, size, cudaMemcpyHostToDevice); rgb2yuvKernel<<<img_in.w*img_in.h, 1>>>(img_r_d, img_g_d, img_b_d, img_y_d, img_u_d, img_v_d);//Launch the Kernel // Copy from device to host cudaMemcpy(img_out.img_y, img_y_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_u, img_u_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_v, img_v_d, size, cudaMemcpyDeviceToHost); return img_out; } PPM_IMG yuv2rgbGPU(YUV_IMG img_in) { PPM_IMG img_out; //Put you CUDA setup code here. img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); // AM: Allocate memory for the PPM_IMG & YUV_IMG on the device unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; unsigned char * img_y_d; unsigned char * img_u_d; unsigned char * img_v_d; int size = img_in.w * img_in.h * sizeof(unsigned char); cudaMalloc((void **) &img_r_d, size); cudaMalloc((void **) &img_g_d, size); cudaMalloc((void **) &img_b_d, size); cudaMalloc((void **) &img_y_d, size); cudaMalloc((void **) &img_u_d, size); cudaMalloc((void **) &img_v_d, size); // Copy YUV to device cudaMemcpy(img_y_d, img_in.img_y, size, cudaMemcpyHostToDevice); cudaMemcpy(img_u_d, img_in.img_u, size, cudaMemcpyHostToDevice); cudaMemcpy(img_v_d, img_in.img_v, size, cudaMemcpyHostToDevice); yuv2rgbKernel<<<img_in.w*img_in.h, 1>>>(img_r_d, img_g_d, img_b_d, img_y_d, img_u_d, img_v_d);//Launch the Kernel // Copy from device to host cudaMemcpy(img_out.img_r, img_r_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_g, img_g_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_b, img_b_d, size, cudaMemcpyDeviceToHost); return img_out; } //Convert RGB to YUV444, all components in [0, 255] YUV_IMG rgb2yuv(PPM_IMG img_in) { YUV_IMG img_out; int i;//, j; unsigned char r, g, b; unsigned char y, cb, cr; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); for(i = 0; i < img_out.w*img_out.h; i ++){ r = img_in.img_r[i]; g = img_in.img_g[i]; b = img_in.img_b[i]; y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b); cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128); cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128); img_out.img_y[i] = y; img_out.img_u[i] = cb; img_out.img_v[i] = cr; } return img_out; } unsigned char clip_rgb(int x) { if(x > 255) return 255; if(x < 0) return 0; return (unsigned char)x; } //Convert YUV to RGB, all components in [0, 255] PPM_IMG yuv2rgb(YUV_IMG img_in) { PPM_IMG img_out; int i; int rt,gt,bt; int y, cb, cr; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); for(i = 0; i < img_out.w*img_out.h; i ++){ y = (int)img_in.img_y[i]; cb = (int)img_in.img_u[i] - 128; cr = (int)img_in.img_v[i] - 128; rt = (int)( y + 1.402*cr); gt = (int)( y - 0.344*cb - 0.714*cr); bt = (int)( y + 1.772*cb); img_out.img_r[i] = clip_rgb(rt); img_out.img_g[i] = clip_rgb(gt); img_out.img_b[i] = clip_rgb(bt); } return img_out; }
422cdab9ac273254a2617aaff952fdfbdb8bd678.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "total_power.h" __global__ void total_power_kernel1(unsigned char * input, float * output, int start_t) { // Declare dynamic shared memory __shared__ float power[nblocks1]; // Get indicies int c = blockIdx.x; int f = blockIdx.y; int t = threadIdx.x; // Get internal index (internal to block) int sid = t; // Get absolute index int idx = f + 8*NF*c + 8*NF*NC*t; if (sid + start_t < NM*NT) { // Extract real and imaginary components; float real = (float)input[2*idx]; float imag = (float)input[2*idx + 1]; // Compute instantaneous power power[sid] = real*real + imag*imag; } else { power[sid] = 0.0; } // Complete power computation before moving on __syncthreads(); // Perform reduction for (int s = blockDim.x/2; s > 0; s>>=1) { if (sid < s) { power[sid] += power[sid + s]; } __syncthreads(); } // Save sum to output if (sid == 0) { output[f + 8*NF*c] = power[0]; } } __global__ void total_power_kernel2(float * input, float * output) { // Declare dynamic shared memory __shared__ float power[nblocks2]; // Get indicies int c = blockIdx.x; int f = blockIdx.y; int t = threadIdx.x; // Get internal index (internal to block) int sid = t; // Get absolute index int idx = f + 8*NF*c + 8*NF*NC*t; power[sid] = input[idx]; // Complete power computation before moving on __syncthreads(); // Perform reduction for (int s = blockDim.x/2; s > 0; s>>=1) { if (sid < s) { power[sid] += power[sid + s]; } __syncthreads(); } // Save sum to output if (sid == 0) { output[f + 8*NF*c] = power[0]; } } __global__ void total_power_kernel3(float * input, float * output) { // Declare dynamic shared memory __shared__ float power[pow2]; // Get indices int f = blockIdx.x; int c = threadIdx.x; // Get internal index (internal to block) int sid = c; // Get absolute index int idx = f + 8*NF*c; if (sid < NC) { // Copy input to shared memory power[sid] = input[idx]; } else { power[sid] = 0.0; } // Finish copy before proceeding __syncthreads(); // Perform reduction for (int s = blockDim.x/2; s > 0; s >>= 1) { if (sid < s) { power[sid] += power[sid + s]; } __syncthreads(); } // Save sum to input if (sid == 0) { output[f] = power[0]; } } static unsigned char * d_input; static float * d_output1; static float * d_output2; static float * d_output3; void initTotalPower() { hipMalloc((void **) &d_input, 8*NF*NC*NT*NM*sizeof(unsigned char)*2); hipMalloc((void **) &d_output1, 8*NF*NC*nblocks2*sizeof(float)); hipMalloc((void **) &d_output2, 8*NF*NC*sizeof(float)); hipMalloc((void **) &d_output3, 8*NF*sizeof(float)); } void getTotalPower(unsigned char * input, float * output) { hipMemcpy(d_input, input, 8*NF*NC*NT*NM*2*sizeof(unsigned char), hipMemcpyHostToDevice); /********************************************** * Reduce over 1024 time samples **********************************************/ dim3 gridSize1(NC,8*NF,1); dim3 blockSize1(nblocks1,1,1); hipStream_t s[nblocks2]; for (int i = 0; i < nblocks2; i++) { hipStreamCreate(&s[i]); } for (int i = 0; i < nblocks2; i++) { // int in_off = 8*NF*NC*nblocks1*i; // int out_off = 8*NF*NC*i; hipLaunchKernelGGL(( total_power_kernel1), dim3(gridSize1), dim3(blockSize1), 0, s[i], d_input+NA*NC*nblocks1*i, d_output1+NA*NC*i, nblocks1*i); } hipDeviceSynchronize(); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) { printf("ERROR: total_power_kernel1 - %s\n", hipGetErrorString(ret)); } /********************************************** * Reduce over remaining time samples **********************************************/ dim3 gridSize2(NC,8*NF,1); dim3 blockSize2(nblocks2,1,1); hipLaunchKernelGGL(( total_power_kernel2), dim3(gridSize2), dim3(blockSize2), 0, 0, d_output1, d_output2); ret = hipGetLastError(); if (ret != hipSuccess) { printf("ERROR: total_power_kernel2 - %s\n", hipGetErrorString(ret)); } /********************************************** * Reduce over frequency channels **********************************************/ dim3 gridSize3(8*NF,1,1); dim3 blockSize3(pow2,1,1); hipLaunchKernelGGL(( total_power_kernel3), dim3(gridSize3), dim3(blockSize3), 0, 0, d_output2, d_output3); ret = hipGetLastError(); if (ret != hipSuccess) { printf("ERROR: total_power_kernel3 - %s\n", hipGetErrorString(ret)); } hipMemcpy(output, d_output3, 8*NF*sizeof(float), hipMemcpyDeviceToHost); }
422cdab9ac273254a2617aaff952fdfbdb8bd678.cu
#include "total_power.h" __global__ void total_power_kernel1(unsigned char * input, float * output, int start_t) { // Declare dynamic shared memory __shared__ float power[nblocks1]; // Get indicies int c = blockIdx.x; int f = blockIdx.y; int t = threadIdx.x; // Get internal index (internal to block) int sid = t; // Get absolute index int idx = f + 8*NF*c + 8*NF*NC*t; if (sid + start_t < NM*NT) { // Extract real and imaginary components; float real = (float)input[2*idx]; float imag = (float)input[2*idx + 1]; // Compute instantaneous power power[sid] = real*real + imag*imag; } else { power[sid] = 0.0; } // Complete power computation before moving on __syncthreads(); // Perform reduction for (int s = blockDim.x/2; s > 0; s>>=1) { if (sid < s) { power[sid] += power[sid + s]; } __syncthreads(); } // Save sum to output if (sid == 0) { output[f + 8*NF*c] = power[0]; } } __global__ void total_power_kernel2(float * input, float * output) { // Declare dynamic shared memory __shared__ float power[nblocks2]; // Get indicies int c = blockIdx.x; int f = blockIdx.y; int t = threadIdx.x; // Get internal index (internal to block) int sid = t; // Get absolute index int idx = f + 8*NF*c + 8*NF*NC*t; power[sid] = input[idx]; // Complete power computation before moving on __syncthreads(); // Perform reduction for (int s = blockDim.x/2; s > 0; s>>=1) { if (sid < s) { power[sid] += power[sid + s]; } __syncthreads(); } // Save sum to output if (sid == 0) { output[f + 8*NF*c] = power[0]; } } __global__ void total_power_kernel3(float * input, float * output) { // Declare dynamic shared memory __shared__ float power[pow2]; // Get indices int f = blockIdx.x; int c = threadIdx.x; // Get internal index (internal to block) int sid = c; // Get absolute index int idx = f + 8*NF*c; if (sid < NC) { // Copy input to shared memory power[sid] = input[idx]; } else { power[sid] = 0.0; } // Finish copy before proceeding __syncthreads(); // Perform reduction for (int s = blockDim.x/2; s > 0; s >>= 1) { if (sid < s) { power[sid] += power[sid + s]; } __syncthreads(); } // Save sum to input if (sid == 0) { output[f] = power[0]; } } static unsigned char * d_input; static float * d_output1; static float * d_output2; static float * d_output3; void initTotalPower() { cudaMalloc((void **) &d_input, 8*NF*NC*NT*NM*sizeof(unsigned char)*2); cudaMalloc((void **) &d_output1, 8*NF*NC*nblocks2*sizeof(float)); cudaMalloc((void **) &d_output2, 8*NF*NC*sizeof(float)); cudaMalloc((void **) &d_output3, 8*NF*sizeof(float)); } void getTotalPower(unsigned char * input, float * output) { cudaMemcpy(d_input, input, 8*NF*NC*NT*NM*2*sizeof(unsigned char), cudaMemcpyHostToDevice); /********************************************** * Reduce over 1024 time samples **********************************************/ dim3 gridSize1(NC,8*NF,1); dim3 blockSize1(nblocks1,1,1); cudaStream_t s[nblocks2]; for (int i = 0; i < nblocks2; i++) { cudaStreamCreate(&s[i]); } for (int i = 0; i < nblocks2; i++) { // int in_off = 8*NF*NC*nblocks1*i; // int out_off = 8*NF*NC*i; total_power_kernel1<<<gridSize1, blockSize1, 0, s[i]>>>(d_input+NA*NC*nblocks1*i, d_output1+NA*NC*i, nblocks1*i); } cudaDeviceSynchronize(); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) { printf("ERROR: total_power_kernel1 - %s\n", cudaGetErrorString(ret)); } /********************************************** * Reduce over remaining time samples **********************************************/ dim3 gridSize2(NC,8*NF,1); dim3 blockSize2(nblocks2,1,1); total_power_kernel2<<<gridSize2, blockSize2>>>(d_output1, d_output2); ret = cudaGetLastError(); if (ret != cudaSuccess) { printf("ERROR: total_power_kernel2 - %s\n", cudaGetErrorString(ret)); } /********************************************** * Reduce over frequency channels **********************************************/ dim3 gridSize3(8*NF,1,1); dim3 blockSize3(pow2,1,1); total_power_kernel3<<<gridSize3, blockSize3>>>(d_output2, d_output3); ret = cudaGetLastError(); if (ret != cudaSuccess) { printf("ERROR: total_power_kernel3 - %s\n", cudaGetErrorString(ret)); } cudaMemcpy(output, d_output3, 8*NF*sizeof(float), cudaMemcpyDeviceToHost); }
8358b980e42e62283e4287cfbf801f6d917227d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 template<bool betazero> __global__ void zmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { extern __shared__ magmaDoubleComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ) { for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; magmaDoubleComplex val = dval [ num_rows * n + row ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } for( int i=0; i<num_vecs; i++ ) { if (betazero) { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha; } else { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors if (beta == MAGMA_Z_ZERO) { hipLaunchKernelGGL(( zmgeelltmv_kernel<true>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { hipLaunchKernelGGL(( zmgeelltmv_kernel<false>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; }
8358b980e42e62283e4287cfbf801f6d917227d9.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 template<bool betazero> __global__ void zmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { extern __shared__ magmaDoubleComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ) { for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; magmaDoubleComplex val = dval [ num_rows * n + row ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } for( int i=0; i<num_vecs; i++ ) { if (betazero) { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha; } else { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors if (beta == MAGMA_Z_ZERO) { zmgeelltmv_kernel<true><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { zmgeelltmv_kernel<false><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; }
15c754283318656ea3b10ac4727e6e34aa44280e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements Mersenne Twister random number generator * and Cartesian Box-Muller transformation on the GPU. * See supplied whitepaper for more explanations. */ // Utilities and system includes #include <shrUtils.h> #include <shrQATest.h> #include <cutil_inline.h> #include "MersenneTwister.h" /////////////////////////////////////////////////////////////////////////////// // Common host and device function /////////////////////////////////////////////////////////////////////////////// //ceil(a / b) extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } //floor(a / b) extern "C" int iDivDown(int a, int b){ return a / b; } //Align a to nearest higher multiple of b extern "C" int iAlignUp(int a, int b){ return ((a % b) != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b extern "C" int iAlignDown(int a, int b){ return a - a % b; } /////////////////////////////////////////////////////////////////////////////// // Reference MT front-end and Box-Muller transform /////////////////////////////////////////////////////////////////////////////// extern "C" void initMTRef(const char *fname); extern "C" void RandomRef(float *h_Random, int NPerRng, unsigned int seed); extern "C" void BoxMullerRef(float *h_Random, int NPerRng); /////////////////////////////////////////////////////////////////////////////// // Fast GPU random number generator and Box-Muller transform /////////////////////////////////////////////////////////////////////////////// #include "MersenneTwister_kernel.cu" /////////////////////////////////////////////////////////////////////////////// // Data configuration /////////////////////////////////////////////////////////////////////////////// const int PATH_N = 24000000; const int N_PER_RNG = iAlignUp(iDivUp(PATH_N, MT_RNG_COUNT), 2); const int RAND_N = MT_RNG_COUNT * N_PER_RNG; const unsigned int SEED = 777; #define DO_BOXMULLER /////////////////////////////////////////////////////////////////////////////// // Main program /////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // Start logs shrQAStart(argc, argv); shrSetLogFileName ("MersenneTwister.txt"); shrLog("%s Starting...\n\n", argv[0]); float *d_Rand, *h_RandCPU, *h_RandGPU; double rCPU, rGPU, delta, sum_delta, max_delta, sum_ref, L1norm, gpuTime; int i, j; unsigned int hTimer; if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else hipSetDevice( cutGetMaxGflopsDeviceId() ); cutilCheckError( cutCreateTimer(&hTimer) ); shrLog("Initializing data for %i samples...\n", PATH_N); h_RandCPU = (float *)malloc(RAND_N * sizeof(float)); h_RandGPU = (float *)malloc(RAND_N * sizeof(float)); cutilSafeCall( hipMalloc((void **)&d_Rand, RAND_N * sizeof(float)) ); shrLog("Loading CPU and GPU twisters configurations...\n"); const char *raw_path = shrFindFilePath("MersenneTwister.raw", argv[0]); const char *dat_path = shrFindFilePath("MersenneTwister.dat", argv[0]); initMTRef(raw_path); loadMTGPU(dat_path); seedMTGPU(SEED); shrLog("Generating random numbers on GPU...\n\n"); int numIterations = 10; for (i = -1; i < numIterations; i++) { if (i == 0) { cutilSafeCall( cutilDeviceSynchronize() ); cutilCheckError( cutResetTimer(hTimer) ); cutilCheckError( cutStartTimer(hTimer) ); } hipLaunchKernelGGL(( RandomGPU), dim3(32), dim3(128), 0, 0, d_Rand, N_PER_RNG); cutilCheckMsg("RandomGPU() execution failed\n"); #ifdef DO_BOXMULLER hipLaunchKernelGGL(( BoxMullerGPU), dim3(32), dim3(128), 0, 0, d_Rand, N_PER_RNG); cutilCheckMsg("BoxMullerGPU() execution failed\n"); #endif } cutilSafeCall( cutilDeviceSynchronize() ); cutilCheckError( cutStopTimer(hTimer) ); gpuTime = 1.0e-3 * cutGetTimerValue(hTimer)/(double)numIterations; shrLogEx(LOGBOTH | MASTER, 0, "MersenneTwister, Throughput = %.4f GNumbers/s, Time = %.5f s, Size = %u Numbers, NumDevsUsed = %u, Workgroup = %u\n", 1.0e-9 * RAND_N / gpuTime, gpuTime, RAND_N, 1, 128); shrLog("\nReading back the results...\n"); cutilSafeCall( hipMemcpy(h_RandGPU, d_Rand, RAND_N * sizeof(float), hipMemcpyDeviceToHost) ); shrLog("Checking GPU results...\n"); shrLog(" ...generating random numbers on CPU using reference generator\n"); RandomRef(h_RandCPU, N_PER_RNG, SEED); #ifdef DO_BOXMULLER shrLog(" ...applying Box-Muller transformation on CPU\n"); BoxMullerRef(h_RandCPU, N_PER_RNG); #endif shrLog(" ...comparing the results\n\n"); max_delta = 0; sum_delta = 0; sum_ref = 0; for(i = 0; i < MT_RNG_COUNT; i++) for(j = 0; j < N_PER_RNG; j++){ rCPU = h_RandCPU[i * N_PER_RNG + j]; rGPU = h_RandGPU[i + j * MT_RNG_COUNT]; delta = fabs(rCPU - rGPU); sum_delta += delta; sum_ref += fabs(rCPU); if(delta >= max_delta) max_delta = delta; } L1norm = (float)(sum_delta / sum_ref); shrLog("Max absolute error: %E\n", max_delta); shrLog("L1 norm: %E\n\n", L1norm); shrLog("Shutting down...\n"); cutilSafeCall( hipFree(d_Rand) ); free(h_RandGPU); free(h_RandCPU); cutilCheckError( cutDeleteTimer( hTimer) ); cutilDeviceReset(); shrQAFinishExit(argc, (const char**)argv, (L1norm < 1e-6) ? QA_PASSED : QA_FAILED); }
15c754283318656ea3b10ac4727e6e34aa44280e.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements Mersenne Twister random number generator * and Cartesian Box-Muller transformation on the GPU. * See supplied whitepaper for more explanations. */ // Utilities and system includes #include <shrUtils.h> #include <shrQATest.h> #include <cutil_inline.h> #include "MersenneTwister.h" /////////////////////////////////////////////////////////////////////////////// // Common host and device function /////////////////////////////////////////////////////////////////////////////// //ceil(a / b) extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } //floor(a / b) extern "C" int iDivDown(int a, int b){ return a / b; } //Align a to nearest higher multiple of b extern "C" int iAlignUp(int a, int b){ return ((a % b) != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b extern "C" int iAlignDown(int a, int b){ return a - a % b; } /////////////////////////////////////////////////////////////////////////////// // Reference MT front-end and Box-Muller transform /////////////////////////////////////////////////////////////////////////////// extern "C" void initMTRef(const char *fname); extern "C" void RandomRef(float *h_Random, int NPerRng, unsigned int seed); extern "C" void BoxMullerRef(float *h_Random, int NPerRng); /////////////////////////////////////////////////////////////////////////////// // Fast GPU random number generator and Box-Muller transform /////////////////////////////////////////////////////////////////////////////// #include "MersenneTwister_kernel.cu" /////////////////////////////////////////////////////////////////////////////// // Data configuration /////////////////////////////////////////////////////////////////////////////// const int PATH_N = 24000000; const int N_PER_RNG = iAlignUp(iDivUp(PATH_N, MT_RNG_COUNT), 2); const int RAND_N = MT_RNG_COUNT * N_PER_RNG; const unsigned int SEED = 777; #define DO_BOXMULLER /////////////////////////////////////////////////////////////////////////////// // Main program /////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // Start logs shrQAStart(argc, argv); shrSetLogFileName ("MersenneTwister.txt"); shrLog("%s Starting...\n\n", argv[0]); float *d_Rand, *h_RandCPU, *h_RandGPU; double rCPU, rGPU, delta, sum_delta, max_delta, sum_ref, L1norm, gpuTime; int i, j; unsigned int hTimer; if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); cutilCheckError( cutCreateTimer(&hTimer) ); shrLog("Initializing data for %i samples...\n", PATH_N); h_RandCPU = (float *)malloc(RAND_N * sizeof(float)); h_RandGPU = (float *)malloc(RAND_N * sizeof(float)); cutilSafeCall( cudaMalloc((void **)&d_Rand, RAND_N * sizeof(float)) ); shrLog("Loading CPU and GPU twisters configurations...\n"); const char *raw_path = shrFindFilePath("MersenneTwister.raw", argv[0]); const char *dat_path = shrFindFilePath("MersenneTwister.dat", argv[0]); initMTRef(raw_path); loadMTGPU(dat_path); seedMTGPU(SEED); shrLog("Generating random numbers on GPU...\n\n"); int numIterations = 10; for (i = -1; i < numIterations; i++) { if (i == 0) { cutilSafeCall( cutilDeviceSynchronize() ); cutilCheckError( cutResetTimer(hTimer) ); cutilCheckError( cutStartTimer(hTimer) ); } RandomGPU<<<32, 128>>>(d_Rand, N_PER_RNG); cutilCheckMsg("RandomGPU() execution failed\n"); #ifdef DO_BOXMULLER BoxMullerGPU<<<32, 128>>>(d_Rand, N_PER_RNG); cutilCheckMsg("BoxMullerGPU() execution failed\n"); #endif } cutilSafeCall( cutilDeviceSynchronize() ); cutilCheckError( cutStopTimer(hTimer) ); gpuTime = 1.0e-3 * cutGetTimerValue(hTimer)/(double)numIterations; shrLogEx(LOGBOTH | MASTER, 0, "MersenneTwister, Throughput = %.4f GNumbers/s, Time = %.5f s, Size = %u Numbers, NumDevsUsed = %u, Workgroup = %u\n", 1.0e-9 * RAND_N / gpuTime, gpuTime, RAND_N, 1, 128); shrLog("\nReading back the results...\n"); cutilSafeCall( cudaMemcpy(h_RandGPU, d_Rand, RAND_N * sizeof(float), cudaMemcpyDeviceToHost) ); shrLog("Checking GPU results...\n"); shrLog(" ...generating random numbers on CPU using reference generator\n"); RandomRef(h_RandCPU, N_PER_RNG, SEED); #ifdef DO_BOXMULLER shrLog(" ...applying Box-Muller transformation on CPU\n"); BoxMullerRef(h_RandCPU, N_PER_RNG); #endif shrLog(" ...comparing the results\n\n"); max_delta = 0; sum_delta = 0; sum_ref = 0; for(i = 0; i < MT_RNG_COUNT; i++) for(j = 0; j < N_PER_RNG; j++){ rCPU = h_RandCPU[i * N_PER_RNG + j]; rGPU = h_RandGPU[i + j * MT_RNG_COUNT]; delta = fabs(rCPU - rGPU); sum_delta += delta; sum_ref += fabs(rCPU); if(delta >= max_delta) max_delta = delta; } L1norm = (float)(sum_delta / sum_ref); shrLog("Max absolute error: %E\n", max_delta); shrLog("L1 norm: %E\n\n", L1norm); shrLog("Shutting down...\n"); cutilSafeCall( cudaFree(d_Rand) ); free(h_RandGPU); free(h_RandCPU); cutilCheckError( cutDeleteTimer( hTimer) ); cutilDeviceReset(); shrQAFinishExit(argc, (const char**)argv, (L1norm < 1e-6) ? QA_PASSED : QA_FAILED); }
97f7191629e6b7690637c39352744ceb2974439b.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdlib> #include <time.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> // Get the timer value. void get_time(double* ret) { volatile struct timespec val; clock_gettime(CLOCK_REALTIME, (struct timespec*)&val); *ret = (double)0.000000001 * val.tv_nsec + val.tv_sec; } void usage(const char* filename) { printf("Sort the random key-value data set of the given length by key.\n"); printf("Usage: %s <n>\n", filename); } using namespace thrust; int main(int argc, char* argv[]) { const int printable_n = 128; if (argc != 2) { usage(argv[0]); return 0; } int n = atoi(argv[1]); if (n <= 0) { usage(argv[0]); return 0; } // Generate the random keys and values on the host. host_vector<float> h_keys(n); host_vector<float> h_vals(n); for (int i = 0; i < n; i++) { h_keys[i] = drand48(); h_vals[i] = drand48(); } // Print out the input data if n is small. if (n <= printable_n) { printf("Input data:\n"); for (int i = 0; i < n; i++) printf("(%f, %f)\n", h_keys[i], h_vals[i]); printf("\n"); } // Strip initialization delay. double init_start, init_finish; get_time(&init_start); #ifndef CPU int count = 0; hipGetDeviceCount(&count); #endif get_time(&init_finish); // Transfer data to the device. double load_start, load_finish; get_time(&load_start); #ifndef CPU device_vector<float> d_keys = h_keys; device_vector<float> d_vals = h_vals; #endif get_time(&load_finish); double sort_start, sort_finish; get_time(&sort_start); #ifdef CPU sort_by_key(h_keys.begin(), h_keys.end(), h_vals.begin()); #else sort_by_key(d_keys.begin(), d_keys.end(), d_vals.begin()); hipDeviceSynchronize(); #endif get_time(&sort_finish); // Transfer data back to host. double save_start, save_finish; get_time(&save_start); #ifndef CPU copy(d_keys.begin(), d_keys.end(), h_keys.begin()); copy(d_vals.begin(), d_vals.end(), h_vals.begin()); #endif get_time(&save_finish); printf("Init time = %f sec\n", init_finish - init_start); printf("Load time = %f sec\n", load_finish - load_start); printf("Sort time = %f sec\n", sort_finish - sort_start); printf("Save time = %f sec\n", save_finish - save_start); // Print out the output data if n is small. if (n <= printable_n) { printf("Output data:\n"); for (int i = 0; i < n; i++) printf("(%f, %f)\n", h_keys[i], h_vals[i]); printf("\n"); } #if 1 // Show last 10 pairs. printf("Last 10 pairs:\n"); for (int i = n - 11; i < n; i++) { printf("(%f, %f)\n", h_keys[i], h_vals[i]); } printf("\n"); #endif return 0; }
97f7191629e6b7690637c39352744ceb2974439b.cu
#include <cstdio> #include <cstdlib> #include <time.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> // Get the timer value. void get_time(double* ret) { volatile struct timespec val; clock_gettime(CLOCK_REALTIME, (struct timespec*)&val); *ret = (double)0.000000001 * val.tv_nsec + val.tv_sec; } void usage(const char* filename) { printf("Sort the random key-value data set of the given length by key.\n"); printf("Usage: %s <n>\n", filename); } using namespace thrust; int main(int argc, char* argv[]) { const int printable_n = 128; if (argc != 2) { usage(argv[0]); return 0; } int n = atoi(argv[1]); if (n <= 0) { usage(argv[0]); return 0; } // Generate the random keys and values on the host. host_vector<float> h_keys(n); host_vector<float> h_vals(n); for (int i = 0; i < n; i++) { h_keys[i] = drand48(); h_vals[i] = drand48(); } // Print out the input data if n is small. if (n <= printable_n) { printf("Input data:\n"); for (int i = 0; i < n; i++) printf("(%f, %f)\n", h_keys[i], h_vals[i]); printf("\n"); } // Strip initialization delay. double init_start, init_finish; get_time(&init_start); #ifndef CPU int count = 0; cudaGetDeviceCount(&count); #endif get_time(&init_finish); // Transfer data to the device. double load_start, load_finish; get_time(&load_start); #ifndef CPU device_vector<float> d_keys = h_keys; device_vector<float> d_vals = h_vals; #endif get_time(&load_finish); double sort_start, sort_finish; get_time(&sort_start); #ifdef CPU sort_by_key(h_keys.begin(), h_keys.end(), h_vals.begin()); #else sort_by_key(d_keys.begin(), d_keys.end(), d_vals.begin()); cudaDeviceSynchronize(); #endif get_time(&sort_finish); // Transfer data back to host. double save_start, save_finish; get_time(&save_start); #ifndef CPU copy(d_keys.begin(), d_keys.end(), h_keys.begin()); copy(d_vals.begin(), d_vals.end(), h_vals.begin()); #endif get_time(&save_finish); printf("Init time = %f sec\n", init_finish - init_start); printf("Load time = %f sec\n", load_finish - load_start); printf("Sort time = %f sec\n", sort_finish - sort_start); printf("Save time = %f sec\n", save_finish - save_start); // Print out the output data if n is small. if (n <= printable_n) { printf("Output data:\n"); for (int i = 0; i < n; i++) printf("(%f, %f)\n", h_keys[i], h_vals[i]); printf("\n"); } #if 1 // Show last 10 pairs. printf("Last 10 pairs:\n"); for (int i = n - 11; i < n; i++) { printf("(%f, %f)\n", h_keys[i], h_vals[i]); } printf("\n"); #endif return 0; }
179de97958b4fe43a6fa5971da0a1b017afee030.hip
// !!! This is a file automatically generated by hipify!!! #include <nih/basic/types.h> #include <nih/basic/numbers.h> #include <nih/time/timer.h> #include <hip/hip_runtime_api.h> #include <PBGI.h> #include <thrust/device_vector.h> #include <thrust/detail/device/cuda/arch.h> using namespace nih; namespace pbgi { namespace gpu { template <typename T, uint32 K> struct Vec {}; template <> struct Vec<float,1> { typedef float type; __device__ static float make(const float a) {return a;} }; template <> struct Vec<float,2> { typedef float2 type; __device__ static float2 make(const float a) {return make_float2(a,a);} }; template <> struct Vec<float,3> { typedef float3 type; __device__ static float3 make(const float a) {return make_float3(a,a,a);} }; template <> struct Vec<float,4> { typedef float4 type; __device__ static float4 make(const float a) {return make_float4(a,a,a,a);} }; template <> struct Vec<uint32,1> { typedef uint32 type; }; template <> struct Vec<uint32,2> { typedef uint2 type; }; template <> struct Vec<uint32,3> { typedef uint3 type; }; template <> struct Vec<uint32,4> { typedef uint4 type; }; __device__ inline float2 operator*(const float a, const float2 b) { return make_float2( a*b.x, a*b.y ); } __device__ inline float3 operator*(const float a, const float3 b) { return make_float3( a*b.x, a*b.y, a*b.z ); } __device__ inline float4 operator*(const float a, const float4 b) { return make_float4( a*b.x, a*b.y, a*b.z, a*b.w ); } __device__ inline float2 operator*(const float2 a, const float2 b) { return make_float2( a.x*b.x, a.y*b.y ); } __device__ inline float3 operator*(const float3 a, const float3 b) { return make_float3( a.x*b.x, a.y*b.y, a.z*b.z ); } __device__ inline float4 operator*(const float4 a, const float4 b) { return make_float4( a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w ); } __device__ inline float2 operator-(const float a, const float2 b) { return make_float2( a-b.x, a-b.y ); } __device__ inline float3 operator-(const float a, const float3 b) { return make_float3( a-b.x, a-b.y, a-b.z ); } __device__ inline float4 operator-(const float a, const float4 b) { return make_float4( a-b.x, a-b.y, a-b.z, a-b.w ); } __device__ inline float2 operator-(const float2 a, const float2 b) { return make_float2( a.x-b.x, a.y-b.y ); } __device__ inline float3 operator-(const float3 a, const float3 b) { return make_float3( a.x-b.x, a.y-b.y, a.z-b.z ); } __device__ inline float4 operator-(const float4 a, const float4 b) { return make_float4( a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w ); } __device__ inline float2 operator-(const float2 a, const float b) { return make_float2( a.x-b, a.y-b ); } __device__ inline float3 operator-(const float3 a, const float b) { return make_float3( a.x-b, a.y-b, a.z-b ); } __device__ inline float4 operator-(const float4 a, const float b) { return make_float4( a.x-b, a.y-b, a.z-b, a.w-b ); } __device__ inline float2 operator+(const float2 a, const float2 b) { return make_float2( a.x+b.x, a.y+b.y ); } __device__ inline float3 operator+(const float3 a, const float3 b) { return make_float3( a.x+b.x, a.y+b.y, a.z+b.z ); } __device__ inline float4 operator+(const float4 a, const float4 b) { return make_float4( a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w ); } __device__ inline float2& operator+=(float2& a, const float2 b) { a.x += b.x; a.y += b.y; return a; } __device__ inline float3& operator+=(float3& a, const float3 b) { a.x += b.x; a.y += b.y; a.z += b.z; return a; } __device__ inline float4& operator+=(float4& a, const float4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; return a; } __device__ inline float abs(const float a) { return fabsf(a); } __device__ inline float2 abs(const float2 a) { return make_float2( fabsf(a.x), fabsf(a.y) ); } __device__ inline float3 abs(const float3 a) { return make_float3( fabsf(a.x), fabsf(a.y), fabsf(a.z) ); } __device__ inline float4 abs(const float4 a) { return make_float4( fabsf(a.x), fabsf(a.y), fabsf(a.z), fabsf(a.w) ); } __device__ inline float rcp(const float a) { return 1.0f / a; } __device__ inline float2 rcp(const float2 a) { return make_float2( rcp(a.x), rcp(a.y) ); } __device__ inline float3 rcp(const float3 a) { return make_float3( rcp(a.x), rcp(a.y), rcp(a.z) ); } __device__ inline float4 rcp(const float4 a) { return make_float4( rcp(a.x), rcp(a.y), rcp(a.z), rcp(a.w) ); } template <uint32 K> struct rw {}; template <> struct rw<1> { typedef float vec_type; template <uint32 OFFSET> __device__ static void read( vec_type& item, const float* __restrict__ in_keys, const uint32 limit) { item = (threadIdx.x + OFFSET < limit) ? in_keys[ threadIdx.x + OFFSET ] : 0; } template <uint32 OFFSET> __device__ static void write( const vec_type item, float* __restrict__ out_keys, const uint32 limit) { if (threadIdx.x + OFFSET < limit) out_keys[ threadIdx.x + OFFSET ] = item; } }; template <> struct rw<2> { typedef typename Vec<float,2>::type vec_type; template <uint32 OFFSET> __device__ static void read( vec_type& item, const float* __restrict__ in_keys, const uint32 limit) { item.x = (threadIdx.x*2 + OFFSET*2 < limit) ? in_keys[ threadIdx.x*2 + OFFSET*2 ] : 0; item.y = (threadIdx.x*2 + OFFSET*2+1 < limit) ? in_keys[ threadIdx.x*2 + OFFSET*2+1 ] : 0; } template <uint32 OFFSET> __device__ static void write( const vec_type item, float* __restrict__ out_keys, const uint32 limit) { if (threadIdx.x*2 + OFFSET*2 < limit) out_keys[ threadIdx.x*2 + OFFSET*2 ] = item.x; if (threadIdx.x*2 + OFFSET*2+1 < limit) out_keys[ threadIdx.x*2 + OFFSET*2+1 ] = item.y; } }; template <> struct rw<3> { typedef typename Vec<float,3>::type vec_type; template <uint32 OFFSET> __device__ static void read( vec_type& item, const float* __restrict__ in_keys, const uint32 limit) { item.x = (threadIdx.x*3 + OFFSET*3 < limit) ? in_keys[ threadIdx.x*3 + OFFSET*3 ] : 0; item.y = (threadIdx.x*3 + OFFSET*3+1 < limit) ? in_keys[ threadIdx.x*3 + OFFSET*3+1 ] : 0; item.z = (threadIdx.x*3 + OFFSET*3+2 < limit) ? in_keys[ threadIdx.x*3 + OFFSET*3+2 ] : 0; } template <uint32 OFFSET> __device__ static void write( const vec_type item, float* __restrict__ out_keys, const uint32 limit) { if (threadIdx.x*3 + OFFSET*3 < limit) out_keys[ threadIdx.x*3 + OFFSET*3 ] = item.x; if (threadIdx.x*3 + OFFSET*3+1 < limit) out_keys[ threadIdx.x*3 + OFFSET*3+1 ] = item.y; if (threadIdx.x*3 + OFFSET*3+2 < limit) out_keys[ threadIdx.x*3 + OFFSET*3+2 ] = item.z; } }; template <> struct rw<4> { typedef typename Vec<float,4>::type vec_type; template <uint32 OFFSET> __device__ static void read( vec_type& item, const float* __restrict__ in_keys, const uint32 limit) { item.x = (threadIdx.x*4 + 4*OFFSET < limit) ? in_keys[ threadIdx.x*4 + OFFSET*4 ] : 0; item.y = (threadIdx.x*4 + 4*OFFSET+1 < limit) ? in_keys[ threadIdx.x*4 + OFFSET*4+1 ] : 0; item.z = (threadIdx.x*4 + 4*OFFSET+2 < limit) ? in_keys[ threadIdx.x*4 + OFFSET*4+2 ] : 0; item.w = (threadIdx.x*4 + 4*OFFSET+3 < limit) ? in_keys[ threadIdx.x*4 + OFFSET*4+3 ] : 0; } template <uint32 OFFSET> __device__ static void write( const vec_type item, float* __restrict__ out_keys, const uint32 limit) { if (threadIdx.x*4 + 4*OFFSET < limit) out_keys[ threadIdx.x*4 + OFFSET*4 ] = item.x; if (threadIdx.x*4 + 4*OFFSET+1 < limit) out_keys[ threadIdx.x*4 + OFFSET*4+1 ] = item.y; if (threadIdx.x*4 + 4*OFFSET+2 < limit) out_keys[ threadIdx.x*4 + OFFSET*4+2 ] = item.z; if (threadIdx.x*4 + 4*OFFSET+3 < limit) out_keys[ threadIdx.x*4 + OFFSET*4+3 ] = item.w; } }; template <uint32 CTA_SIZE, uint32 K, bool CHECKED> __device__ void pbgi_block( typename Vec<float,K>::type* smem, const uint32 src_begin, const uint32 src_end, const uint32 block_offset, const uint32 block_end, PBGI_state state, PBGI_values in_values, PBGI_values out_values) { typedef typename Vec<float,K>::type vec_type; const uint32 thread_id = threadIdx.x; vec_type* x; x = &smem[0]; vec_type* y; y = &smem[0] + CTA_SIZE*1; vec_type* z; z = &smem[0] + CTA_SIZE*2; vec_type* nx; nx = &smem[0] + CTA_SIZE*3; vec_type* ny; ny = &smem[0] + CTA_SIZE*4; vec_type* nz; nz = &smem[0] + CTA_SIZE*5; vec_type* rec_r; rec_r = &smem[0] + CTA_SIZE*6; vec_type* rec_g; rec_g = &smem[0] + CTA_SIZE*7; vec_type* rec_b; rec_b = &smem[0] + CTA_SIZE*8; // read block in shared memory (not caring about overflows) if (thread_id < CTA_SIZE) // help the poor compiler reducing register pressure { if (CHECKED == false || block_offset + thread_id*K + K-1 < block_end) { x[ thread_id ] = reinterpret_cast<vec_type*>(state.x + block_offset)[ thread_id ]; y[ thread_id ] = reinterpret_cast<vec_type*>(state.y + block_offset)[ thread_id ]; z[ thread_id ] = reinterpret_cast<vec_type*>(state.z + block_offset)[ thread_id ]; nx[ thread_id ] = reinterpret_cast<vec_type*>(state.nx + block_offset)[ thread_id ]; ny[ thread_id ] = reinterpret_cast<vec_type*>(state.ny + block_offset)[ thread_id ]; nz[ thread_id ] = reinterpret_cast<vec_type*>(state.nz + block_offset)[ thread_id ]; rec_r[ thread_id ] = reinterpret_cast<vec_type*>(out_values.r + block_offset)[ thread_id ]; rec_g[ thread_id ] = reinterpret_cast<vec_type*>(out_values.g + block_offset)[ thread_id ]; rec_b[ thread_id ] = reinterpret_cast<vec_type*>(out_values.b + block_offset)[ thread_id ]; } else { rw<K>::read<0>( x[ thread_id ], &state.x[ block_offset ], block_end - block_offset ); rw<K>::read<0>( y[ thread_id ], &state.y[ block_offset ], block_end - block_offset ); rw<K>::read<0>( z[ thread_id ], &state.z[ block_offset ], block_end - block_offset ); rw<K>::read<0>( nx[ thread_id ], &state.nx[ block_offset ], block_end - block_offset ); rw<K>::read<0>( ny[ thread_id ], &state.ny[ block_offset ], block_end - block_offset ); rw<K>::read<0>( nz[ thread_id ], &state.nz[ block_offset ], block_end - block_offset ); rw<K>::read<0>( rec_r[ thread_id ], &in_values.r[ block_offset ], block_end - block_offset ); rw<K>::read<0>( rec_g[ thread_id ], &in_values.g[ block_offset ], block_end - block_offset ); rw<K>::read<0>( rec_b[ thread_id ], &in_values.b[ block_offset ], block_end - block_offset ); } } // process block { __syncthreads(); float* x; x = reinterpret_cast<float*>(&smem[0]); float* y; y = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*1); float* z; z = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*2); float* nx; nx = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*3); float* ny; ny = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*4); float* nz; nz = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*5); float* rec_r; rec_r = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*6); float* rec_g; rec_g = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*7); float* rec_b; rec_b = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*8); // iterating over batches of N_SENDERS senders at a time #if __CUDA_ARCH__ < 200 const uint32 N_SENDERS = 4; #else const uint32 N_SENDERS = 64; #endif __shared__ float s_x[N_SENDERS], s_y[N_SENDERS], s_z[N_SENDERS], s_nx[N_SENDERS], s_ny[N_SENDERS], s_nz[N_SENDERS], s_r[N_SENDERS], s_g[N_SENDERS], s_b[N_SENDERS]; for (uint32 i = src_begin; i < src_end; i += N_SENDERS) { // load N_SENDERS senders in shared memory (issuing float4 loads) if (thread_id < N_SENDERS/4) { *reinterpret_cast<float4*>(s_x + thread_id*4) = *reinterpret_cast<float4*>(state.x + i + thread_id*4); *reinterpret_cast<float4*>(s_y + thread_id*4) = *reinterpret_cast<float4*>(state.y + i + thread_id*4); *reinterpret_cast<float4*>(s_z + thread_id*4) = *reinterpret_cast<float4*>(state.z + i + thread_id*4); *reinterpret_cast<float4*>(s_nx + thread_id*4) = *reinterpret_cast<float4*>(state.nx + i + thread_id*4); *reinterpret_cast<float4*>(s_ny + thread_id*4) = *reinterpret_cast<float4*>(state.ny + i + thread_id*4); *reinterpret_cast<float4*>(s_nz + thread_id*4) = *reinterpret_cast<float4*>(state.nz + i + thread_id*4); *reinterpret_cast<float4*>(s_r + thread_id*4) = *reinterpret_cast<float4*>(in_values.r + i + thread_id*4); *reinterpret_cast<float4*>(s_g + thread_id*4) = *reinterpret_cast<float4*>(in_values.g + i + thread_id*4); *reinterpret_cast<float4*>(s_b + thread_id*4) = *reinterpret_cast<float4*>(in_values.b + i + thread_id*4); } __syncthreads(); // and compute their contribution to all K receivers per thread for (uint32 k = 0; k < K; ++k) { #if __CUDA_ARCH__ < 200 #pragma unroll #endif for (uint32 c = 0; c < N_SENDERS; ++c) { const float dx = s_x[c] - x[ thread_id + CTA_SIZE*k ]; const float dy = s_y[c] - y[ thread_id + CTA_SIZE*k ]; const float dz = s_z[c] - z[ thread_id + CTA_SIZE*k ]; const float d2 = dx*dx + dy*dy + dz*dz; const float g1 = nx[ thread_id + CTA_SIZE*k ]*dx + ny[ thread_id + CTA_SIZE*k ]*dy + nz[ thread_id + CTA_SIZE*k ]*dz; const float g2 = s_nx[c]*dx + s_ny[c]*dy + s_nz[c]*dz; const float G_tmp = abs( g1*g2 ) * rcp( fmaxf( d2*d2, 1.0e-8f ) ); const float G = (i+c == block_offset + thread_id + CTA_SIZE*k) ? 1.0f : G_tmp; // if the sender is the receiver the weight should be 1 //__syncthreads(); // helps lowering register usage for wide K rec_r[ thread_id + CTA_SIZE*k ] += s_r[c] * G; rec_g[ thread_id + CTA_SIZE*k ] += s_g[c] * G; rec_b[ thread_id + CTA_SIZE*k ] += s_b[c] * G; } } } __syncthreads(); } // write block to global memory if (thread_id < CTA_SIZE) // help the poor compiler reducing register pressure { if (CHECKED == false || block_offset + thread_id*K + K-1 < block_end) { reinterpret_cast<vec_type*>(out_values.r + block_offset)[ thread_id ] = rec_r[thread_id]; reinterpret_cast<vec_type*>(out_values.g + block_offset)[ thread_id ] = rec_g[thread_id]; reinterpret_cast<vec_type*>(out_values.b + block_offset)[ thread_id ] = rec_b[thread_id]; } else { rw<K>::write<0>( rec_r[ thread_id ], &out_values.r[ block_offset ], block_end - block_offset ); rw<K>::write<0>( rec_g[ thread_id ], &out_values.g[ block_offset ], block_end - block_offset ); rw<K>::write<0>( rec_b[ thread_id ], &out_values.b[ block_offset ], block_end - block_offset ); } } } template <uint32 CTA_SIZE, uint32 K> __global__ void pbgi_kernel( const uint32 n_points, const uint32 src_begin, const uint32 src_end, const uint32 rec_begin, const uint32 rec_end, const uint32 n_blocks, const uint32 n_elements_per_block, PBGI_state state, PBGI_values in_values, PBGI_values out_values) { const uint32 group_size = CTA_SIZE * K; // compile-time constant const uint32 block_id = blockIdx.x; // constant across CTA const uint32 block_begin = rec_begin + block_id * n_elements_per_block; // constant across CTA const uint32 block_end = nih::min( block_begin + n_elements_per_block, rec_end ); // constant across CTA //if (block_begin >= rec_end) // return; typedef typename Vec<float,K>::type vec_type; __shared__ vec_type smem[CTA_SIZE*9]; uint32 block_offset = block_begin; // process all the batches which dont need overflow checks while (block_offset + group_size <= block_end) { pbgi_block<CTA_SIZE,K,false>( smem, src_begin, src_end, block_offset, block_end, state, in_values, out_values ); block_offset += group_size; } // process the last batch if (block_offset < block_end) { pbgi_block<CTA_SIZE,K,true>( smem, src_begin, src_end, block_offset, block_end, state, in_values, out_values ); } } // check for cuda runtime errors void check_cuda_errors(const uint32 code) { hipError_t error = hipGetLastError(); if (error) { fprintf(stderr, "*** error (%u) ***\n %s\n", code, hipGetErrorString(error)); exit(1); } } template <uint32 CTA_SIZE, uint32 K> void test_pbgi_t(const uint32 n_points) { hipSetDeviceFlags( hipDeviceMapHost ); float* arena; //hipHostMalloc( &arena, sizeof(float)*12*n_points, hipHostMallocMapped ); arena = (float*)malloc( sizeof(float)*12*n_points ); float* ptr = arena; float* x = ptr; ptr += n_points; float* y = ptr; ptr += n_points; float* z = ptr; ptr += n_points; float* nx = ptr; ptr += n_points; float* ny = ptr; ptr += n_points; float* nz = ptr; ptr += n_points; float* in_r = ptr; ptr += n_points; float* in_g = ptr; ptr += n_points; float* in_b = ptr; ptr += n_points; float* out_r = ptr; ptr += n_points; float* out_g = ptr; ptr += n_points; float* out_b = ptr; ptr += n_points; for (uint32 i = 0; i < n_points; ++i) { x[i] = float(i) / float(n_points); y[i] = 1.0f - float(i) / float(n_points); z[i] = sinf( float(i) / float(n_points) * 2.0f * float(M_PI) ); nx[i] = 1.0f; ny[i] = 0.0f; nz[i] = 0.0f; in_r[i] = fabsf( sinf( float(i) / float(n_points) * 2.0f * float(M_PI) ) ); in_g[i] = fabsf( sinf( float(i) / float(n_points) * 4.0f * float(M_PI) ) ); in_b[i] = fabsf( sinf( float(i) / float(n_points) * 8.0f * float(M_PI) ) ); out_r[i] = 0.0f; out_g[i] = 0.0f; out_b[i] = 0.0f; } PBGI_state state; PBGI_values in_values; PBGI_values out_values; // compute the number of blocks we can launch to fill the machine const size_t max_blocks = thrust::detail::device::cuda::arch::max_active_blocks(pbgi_kernel<CTA_SIZE,K>, CTA_SIZE, 0); const uint32 group_size = CTA_SIZE * K; const uint32 n_groups = (n_points + group_size-1) / group_size; const size_t n_blocks = nih::min( max_blocks, n_groups ); // assume we can process 1 billion pairs per kernel launch avoiding timeout const float pairs_per_kernel = 1.0e9f; // compute the number of receivers and the number of senders we want to process per block: // our strategy is to process 4 times as many receivers as we can process concurrently, and // set the number of senders correspondingly to reach our quota uint32 n_receivers = nih::min( n_blocks * group_size * 4u, n_points ); uint32 n_senders = nih::max( uint32( pairs_per_kernel / float(n_receivers) ), 1u ); if (n_senders % 32) n_senders += 32 - (n_senders % 32); uint32 n_elements_per_block = (n_receivers + n_blocks-1) / n_blocks; if (n_elements_per_block % 4 != 0) n_elements_per_block += 4 - (n_elements_per_block % 4); float* cuda_arena; //hipHostGetDevicePointer( &cuda_arena, arena, 0 ); hipMalloc( &cuda_arena, sizeof(float)*12*n_points ); hipMemcpy( cuda_arena, arena, sizeof(float)*12*n_points, hipMemcpyHostToDevice ); check_cuda_errors( 0 ); ptr = cuda_arena; state.x = ptr; ptr += n_points; state.y = ptr; ptr += n_points; state.z = ptr; ptr += n_points; state.nx = ptr; ptr += n_points; state.ny = ptr; ptr += n_points; state.nz = ptr; ptr += n_points; in_values.r = ptr; ptr += n_points; in_values.g = ptr; ptr += n_points; in_values.b = ptr; ptr += n_points; out_values.r = ptr; ptr += n_points; out_values.g = ptr; ptr += n_points; out_values.b = ptr; ptr += n_points; fprintf(stderr, "test pbgi gpu\n"); fprintf(stderr," points : %u\n", n_points); fprintf(stderr," block size : %u\n", CTA_SIZE); fprintf(stderr," K : %u\n", K); fprintf(stderr," n_blocks : %u\n", n_blocks); fprintf(stderr," pts / block : %u\n", n_elements_per_block); fprintf(stderr," src / kernel : %u\n", n_senders); fprintf(stderr," rec / kernel : %u\n", n_receivers); uint32 min_pairs = uint32(-1); nih::Timer timer; timer.start(); { hipDeviceSynchronize(); uint32 rec_begin = 0; while (rec_begin < n_points) { uint32 rec_end = nih::min( rec_begin + n_receivers, n_points ); if (rec_end + n_receivers/2 > n_points) // if only a few points are missing, rec_end = n_points; // merge them in... n_elements_per_block = (rec_end - rec_begin + n_blocks-1) / n_blocks; if (n_elements_per_block % 4 != 0) n_elements_per_block += 4 - (n_elements_per_block % 4); uint32 sender_begin = 0; while (sender_begin < n_points) { uint32 sender_end = nih::min( sender_begin + n_senders, n_points ); if (sender_end + n_senders/2 > n_points) // if only a few points are missing, sender_end = n_points; // merge them in... min_pairs = nih::min( min_pairs, (sender_end - sender_begin)*(rec_end - rec_begin) ); hipLaunchKernelGGL(( pbgi_kernel<CTA_SIZE,K>), dim3(n_blocks),dim3(CTA_SIZE), 0, 0, n_points, sender_begin, sender_end, rec_begin, rec_end, n_blocks, n_elements_per_block, state, in_values, out_values ); hipDeviceSynchronize(); check_cuda_errors( 1 ); sender_begin = sender_end; } rec_begin = rec_end; } hipDeviceSynchronize(); } check_cuda_errors( 1 ); timer.stop(); hipMemcpy( arena, cuda_arena, sizeof(float)*12*n_points, hipMemcpyDeviceToHost ); float sum = 0.0f; for (uint32 i = 0; i < n_points; ++i) sum += (out_r[i] + out_g[i] + out_b[i]) / 3.0f; fprintf(stderr," min pairs : %u\n", min_pairs); fprintf(stderr," avg energy : %.3f\n", sum / float(n_points)); fprintf(stderr," time : %.3f s\n", float(timer.seconds())); fprintf(stderr," pairs/s : %.3f G\n", (float(n_points)/1000.0f)*(float(n_points)/1000.0f) / float(timer.seconds()*1000.0f)); //hipHostFree( arena ); hipFree( cuda_arena ); free(arena); } void test_pbgi(const uint32 n_points) { /* test_pbgi_t<32,1>( n_points ); test_pbgi_t<32,2>( n_points ); test_pbgi_t<32,4>( n_points ); test_pbgi_t<64,1>( n_points ); test_pbgi_t<64,2>( n_points ); test_pbgi_t<64,4>( n_points ); test_pbgi_t<96,1>( n_points ); test_pbgi_t<96,2>( n_points ); test_pbgi_t<96,4>( n_points ); test_pbgi_t<128,1>( n_points ); test_pbgi_t<128,2>( n_points ); test_pbgi_t<128,4>( n_points ); test_pbgi_t<256,1>( n_points ); test_pbgi_t<256,2>( n_points );*/ test_pbgi_t<512,1>( n_points ); // test_pbgi_t<512,2>( n_points ); } } // namespace gpu } // namespace pbgi
179de97958b4fe43a6fa5971da0a1b017afee030.cu
#include <nih/basic/types.h> #include <nih/basic/numbers.h> #include <nih/time/timer.h> #include <cuda_runtime_api.h> #include <PBGI.h> #include <thrust/device_vector.h> #include <thrust/detail/device/cuda/arch.h> using namespace nih; namespace pbgi { namespace gpu { template <typename T, uint32 K> struct Vec {}; template <> struct Vec<float,1> { typedef float type; __device__ static float make(const float a) {return a;} }; template <> struct Vec<float,2> { typedef float2 type; __device__ static float2 make(const float a) {return make_float2(a,a);} }; template <> struct Vec<float,3> { typedef float3 type; __device__ static float3 make(const float a) {return make_float3(a,a,a);} }; template <> struct Vec<float,4> { typedef float4 type; __device__ static float4 make(const float a) {return make_float4(a,a,a,a);} }; template <> struct Vec<uint32,1> { typedef uint32 type; }; template <> struct Vec<uint32,2> { typedef uint2 type; }; template <> struct Vec<uint32,3> { typedef uint3 type; }; template <> struct Vec<uint32,4> { typedef uint4 type; }; __device__ inline float2 operator*(const float a, const float2 b) { return make_float2( a*b.x, a*b.y ); } __device__ inline float3 operator*(const float a, const float3 b) { return make_float3( a*b.x, a*b.y, a*b.z ); } __device__ inline float4 operator*(const float a, const float4 b) { return make_float4( a*b.x, a*b.y, a*b.z, a*b.w ); } __device__ inline float2 operator*(const float2 a, const float2 b) { return make_float2( a.x*b.x, a.y*b.y ); } __device__ inline float3 operator*(const float3 a, const float3 b) { return make_float3( a.x*b.x, a.y*b.y, a.z*b.z ); } __device__ inline float4 operator*(const float4 a, const float4 b) { return make_float4( a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w ); } __device__ inline float2 operator-(const float a, const float2 b) { return make_float2( a-b.x, a-b.y ); } __device__ inline float3 operator-(const float a, const float3 b) { return make_float3( a-b.x, a-b.y, a-b.z ); } __device__ inline float4 operator-(const float a, const float4 b) { return make_float4( a-b.x, a-b.y, a-b.z, a-b.w ); } __device__ inline float2 operator-(const float2 a, const float2 b) { return make_float2( a.x-b.x, a.y-b.y ); } __device__ inline float3 operator-(const float3 a, const float3 b) { return make_float3( a.x-b.x, a.y-b.y, a.z-b.z ); } __device__ inline float4 operator-(const float4 a, const float4 b) { return make_float4( a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w ); } __device__ inline float2 operator-(const float2 a, const float b) { return make_float2( a.x-b, a.y-b ); } __device__ inline float3 operator-(const float3 a, const float b) { return make_float3( a.x-b, a.y-b, a.z-b ); } __device__ inline float4 operator-(const float4 a, const float b) { return make_float4( a.x-b, a.y-b, a.z-b, a.w-b ); } __device__ inline float2 operator+(const float2 a, const float2 b) { return make_float2( a.x+b.x, a.y+b.y ); } __device__ inline float3 operator+(const float3 a, const float3 b) { return make_float3( a.x+b.x, a.y+b.y, a.z+b.z ); } __device__ inline float4 operator+(const float4 a, const float4 b) { return make_float4( a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w ); } __device__ inline float2& operator+=(float2& a, const float2 b) { a.x += b.x; a.y += b.y; return a; } __device__ inline float3& operator+=(float3& a, const float3 b) { a.x += b.x; a.y += b.y; a.z += b.z; return a; } __device__ inline float4& operator+=(float4& a, const float4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; return a; } __device__ inline float abs(const float a) { return fabsf(a); } __device__ inline float2 abs(const float2 a) { return make_float2( fabsf(a.x), fabsf(a.y) ); } __device__ inline float3 abs(const float3 a) { return make_float3( fabsf(a.x), fabsf(a.y), fabsf(a.z) ); } __device__ inline float4 abs(const float4 a) { return make_float4( fabsf(a.x), fabsf(a.y), fabsf(a.z), fabsf(a.w) ); } __device__ inline float rcp(const float a) { return 1.0f / a; } __device__ inline float2 rcp(const float2 a) { return make_float2( rcp(a.x), rcp(a.y) ); } __device__ inline float3 rcp(const float3 a) { return make_float3( rcp(a.x), rcp(a.y), rcp(a.z) ); } __device__ inline float4 rcp(const float4 a) { return make_float4( rcp(a.x), rcp(a.y), rcp(a.z), rcp(a.w) ); } template <uint32 K> struct rw {}; template <> struct rw<1> { typedef float vec_type; template <uint32 OFFSET> __device__ static void read( vec_type& item, const float* __restrict__ in_keys, const uint32 limit) { item = (threadIdx.x + OFFSET < limit) ? in_keys[ threadIdx.x + OFFSET ] : 0; } template <uint32 OFFSET> __device__ static void write( const vec_type item, float* __restrict__ out_keys, const uint32 limit) { if (threadIdx.x + OFFSET < limit) out_keys[ threadIdx.x + OFFSET ] = item; } }; template <> struct rw<2> { typedef typename Vec<float,2>::type vec_type; template <uint32 OFFSET> __device__ static void read( vec_type& item, const float* __restrict__ in_keys, const uint32 limit) { item.x = (threadIdx.x*2 + OFFSET*2 < limit) ? in_keys[ threadIdx.x*2 + OFFSET*2 ] : 0; item.y = (threadIdx.x*2 + OFFSET*2+1 < limit) ? in_keys[ threadIdx.x*2 + OFFSET*2+1 ] : 0; } template <uint32 OFFSET> __device__ static void write( const vec_type item, float* __restrict__ out_keys, const uint32 limit) { if (threadIdx.x*2 + OFFSET*2 < limit) out_keys[ threadIdx.x*2 + OFFSET*2 ] = item.x; if (threadIdx.x*2 + OFFSET*2+1 < limit) out_keys[ threadIdx.x*2 + OFFSET*2+1 ] = item.y; } }; template <> struct rw<3> { typedef typename Vec<float,3>::type vec_type; template <uint32 OFFSET> __device__ static void read( vec_type& item, const float* __restrict__ in_keys, const uint32 limit) { item.x = (threadIdx.x*3 + OFFSET*3 < limit) ? in_keys[ threadIdx.x*3 + OFFSET*3 ] : 0; item.y = (threadIdx.x*3 + OFFSET*3+1 < limit) ? in_keys[ threadIdx.x*3 + OFFSET*3+1 ] : 0; item.z = (threadIdx.x*3 + OFFSET*3+2 < limit) ? in_keys[ threadIdx.x*3 + OFFSET*3+2 ] : 0; } template <uint32 OFFSET> __device__ static void write( const vec_type item, float* __restrict__ out_keys, const uint32 limit) { if (threadIdx.x*3 + OFFSET*3 < limit) out_keys[ threadIdx.x*3 + OFFSET*3 ] = item.x; if (threadIdx.x*3 + OFFSET*3+1 < limit) out_keys[ threadIdx.x*3 + OFFSET*3+1 ] = item.y; if (threadIdx.x*3 + OFFSET*3+2 < limit) out_keys[ threadIdx.x*3 + OFFSET*3+2 ] = item.z; } }; template <> struct rw<4> { typedef typename Vec<float,4>::type vec_type; template <uint32 OFFSET> __device__ static void read( vec_type& item, const float* __restrict__ in_keys, const uint32 limit) { item.x = (threadIdx.x*4 + 4*OFFSET < limit) ? in_keys[ threadIdx.x*4 + OFFSET*4 ] : 0; item.y = (threadIdx.x*4 + 4*OFFSET+1 < limit) ? in_keys[ threadIdx.x*4 + OFFSET*4+1 ] : 0; item.z = (threadIdx.x*4 + 4*OFFSET+2 < limit) ? in_keys[ threadIdx.x*4 + OFFSET*4+2 ] : 0; item.w = (threadIdx.x*4 + 4*OFFSET+3 < limit) ? in_keys[ threadIdx.x*4 + OFFSET*4+3 ] : 0; } template <uint32 OFFSET> __device__ static void write( const vec_type item, float* __restrict__ out_keys, const uint32 limit) { if (threadIdx.x*4 + 4*OFFSET < limit) out_keys[ threadIdx.x*4 + OFFSET*4 ] = item.x; if (threadIdx.x*4 + 4*OFFSET+1 < limit) out_keys[ threadIdx.x*4 + OFFSET*4+1 ] = item.y; if (threadIdx.x*4 + 4*OFFSET+2 < limit) out_keys[ threadIdx.x*4 + OFFSET*4+2 ] = item.z; if (threadIdx.x*4 + 4*OFFSET+3 < limit) out_keys[ threadIdx.x*4 + OFFSET*4+3 ] = item.w; } }; template <uint32 CTA_SIZE, uint32 K, bool CHECKED> __device__ void pbgi_block( typename Vec<float,K>::type* smem, const uint32 src_begin, const uint32 src_end, const uint32 block_offset, const uint32 block_end, PBGI_state state, PBGI_values in_values, PBGI_values out_values) { typedef typename Vec<float,K>::type vec_type; const uint32 thread_id = threadIdx.x; vec_type* x; x = &smem[0]; vec_type* y; y = &smem[0] + CTA_SIZE*1; vec_type* z; z = &smem[0] + CTA_SIZE*2; vec_type* nx; nx = &smem[0] + CTA_SIZE*3; vec_type* ny; ny = &smem[0] + CTA_SIZE*4; vec_type* nz; nz = &smem[0] + CTA_SIZE*5; vec_type* rec_r; rec_r = &smem[0] + CTA_SIZE*6; vec_type* rec_g; rec_g = &smem[0] + CTA_SIZE*7; vec_type* rec_b; rec_b = &smem[0] + CTA_SIZE*8; // read block in shared memory (not caring about overflows) if (thread_id < CTA_SIZE) // help the poor compiler reducing register pressure { if (CHECKED == false || block_offset + thread_id*K + K-1 < block_end) { x[ thread_id ] = reinterpret_cast<vec_type*>(state.x + block_offset)[ thread_id ]; y[ thread_id ] = reinterpret_cast<vec_type*>(state.y + block_offset)[ thread_id ]; z[ thread_id ] = reinterpret_cast<vec_type*>(state.z + block_offset)[ thread_id ]; nx[ thread_id ] = reinterpret_cast<vec_type*>(state.nx + block_offset)[ thread_id ]; ny[ thread_id ] = reinterpret_cast<vec_type*>(state.ny + block_offset)[ thread_id ]; nz[ thread_id ] = reinterpret_cast<vec_type*>(state.nz + block_offset)[ thread_id ]; rec_r[ thread_id ] = reinterpret_cast<vec_type*>(out_values.r + block_offset)[ thread_id ]; rec_g[ thread_id ] = reinterpret_cast<vec_type*>(out_values.g + block_offset)[ thread_id ]; rec_b[ thread_id ] = reinterpret_cast<vec_type*>(out_values.b + block_offset)[ thread_id ]; } else { rw<K>::read<0>( x[ thread_id ], &state.x[ block_offset ], block_end - block_offset ); rw<K>::read<0>( y[ thread_id ], &state.y[ block_offset ], block_end - block_offset ); rw<K>::read<0>( z[ thread_id ], &state.z[ block_offset ], block_end - block_offset ); rw<K>::read<0>( nx[ thread_id ], &state.nx[ block_offset ], block_end - block_offset ); rw<K>::read<0>( ny[ thread_id ], &state.ny[ block_offset ], block_end - block_offset ); rw<K>::read<0>( nz[ thread_id ], &state.nz[ block_offset ], block_end - block_offset ); rw<K>::read<0>( rec_r[ thread_id ], &in_values.r[ block_offset ], block_end - block_offset ); rw<K>::read<0>( rec_g[ thread_id ], &in_values.g[ block_offset ], block_end - block_offset ); rw<K>::read<0>( rec_b[ thread_id ], &in_values.b[ block_offset ], block_end - block_offset ); } } // process block { __syncthreads(); float* x; x = reinterpret_cast<float*>(&smem[0]); float* y; y = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*1); float* z; z = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*2); float* nx; nx = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*3); float* ny; ny = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*4); float* nz; nz = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*5); float* rec_r; rec_r = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*6); float* rec_g; rec_g = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*7); float* rec_b; rec_b = reinterpret_cast<float*>(&smem[0] + CTA_SIZE*8); // iterating over batches of N_SENDERS senders at a time #if __CUDA_ARCH__ < 200 const uint32 N_SENDERS = 4; #else const uint32 N_SENDERS = 64; #endif __shared__ float s_x[N_SENDERS], s_y[N_SENDERS], s_z[N_SENDERS], s_nx[N_SENDERS], s_ny[N_SENDERS], s_nz[N_SENDERS], s_r[N_SENDERS], s_g[N_SENDERS], s_b[N_SENDERS]; for (uint32 i = src_begin; i < src_end; i += N_SENDERS) { // load N_SENDERS senders in shared memory (issuing float4 loads) if (thread_id < N_SENDERS/4) { *reinterpret_cast<float4*>(s_x + thread_id*4) = *reinterpret_cast<float4*>(state.x + i + thread_id*4); *reinterpret_cast<float4*>(s_y + thread_id*4) = *reinterpret_cast<float4*>(state.y + i + thread_id*4); *reinterpret_cast<float4*>(s_z + thread_id*4) = *reinterpret_cast<float4*>(state.z + i + thread_id*4); *reinterpret_cast<float4*>(s_nx + thread_id*4) = *reinterpret_cast<float4*>(state.nx + i + thread_id*4); *reinterpret_cast<float4*>(s_ny + thread_id*4) = *reinterpret_cast<float4*>(state.ny + i + thread_id*4); *reinterpret_cast<float4*>(s_nz + thread_id*4) = *reinterpret_cast<float4*>(state.nz + i + thread_id*4); *reinterpret_cast<float4*>(s_r + thread_id*4) = *reinterpret_cast<float4*>(in_values.r + i + thread_id*4); *reinterpret_cast<float4*>(s_g + thread_id*4) = *reinterpret_cast<float4*>(in_values.g + i + thread_id*4); *reinterpret_cast<float4*>(s_b + thread_id*4) = *reinterpret_cast<float4*>(in_values.b + i + thread_id*4); } __syncthreads(); // and compute their contribution to all K receivers per thread for (uint32 k = 0; k < K; ++k) { #if __CUDA_ARCH__ < 200 #pragma unroll #endif for (uint32 c = 0; c < N_SENDERS; ++c) { const float dx = s_x[c] - x[ thread_id + CTA_SIZE*k ]; const float dy = s_y[c] - y[ thread_id + CTA_SIZE*k ]; const float dz = s_z[c] - z[ thread_id + CTA_SIZE*k ]; const float d2 = dx*dx + dy*dy + dz*dz; const float g1 = nx[ thread_id + CTA_SIZE*k ]*dx + ny[ thread_id + CTA_SIZE*k ]*dy + nz[ thread_id + CTA_SIZE*k ]*dz; const float g2 = s_nx[c]*dx + s_ny[c]*dy + s_nz[c]*dz; const float G_tmp = abs( g1*g2 ) * rcp( fmaxf( d2*d2, 1.0e-8f ) ); const float G = (i+c == block_offset + thread_id + CTA_SIZE*k) ? 1.0f : G_tmp; // if the sender is the receiver the weight should be 1 //__syncthreads(); // helps lowering register usage for wide K rec_r[ thread_id + CTA_SIZE*k ] += s_r[c] * G; rec_g[ thread_id + CTA_SIZE*k ] += s_g[c] * G; rec_b[ thread_id + CTA_SIZE*k ] += s_b[c] * G; } } } __syncthreads(); } // write block to global memory if (thread_id < CTA_SIZE) // help the poor compiler reducing register pressure { if (CHECKED == false || block_offset + thread_id*K + K-1 < block_end) { reinterpret_cast<vec_type*>(out_values.r + block_offset)[ thread_id ] = rec_r[thread_id]; reinterpret_cast<vec_type*>(out_values.g + block_offset)[ thread_id ] = rec_g[thread_id]; reinterpret_cast<vec_type*>(out_values.b + block_offset)[ thread_id ] = rec_b[thread_id]; } else { rw<K>::write<0>( rec_r[ thread_id ], &out_values.r[ block_offset ], block_end - block_offset ); rw<K>::write<0>( rec_g[ thread_id ], &out_values.g[ block_offset ], block_end - block_offset ); rw<K>::write<0>( rec_b[ thread_id ], &out_values.b[ block_offset ], block_end - block_offset ); } } } template <uint32 CTA_SIZE, uint32 K> __global__ void pbgi_kernel( const uint32 n_points, const uint32 src_begin, const uint32 src_end, const uint32 rec_begin, const uint32 rec_end, const uint32 n_blocks, const uint32 n_elements_per_block, PBGI_state state, PBGI_values in_values, PBGI_values out_values) { const uint32 group_size = CTA_SIZE * K; // compile-time constant const uint32 block_id = blockIdx.x; // constant across CTA const uint32 block_begin = rec_begin + block_id * n_elements_per_block; // constant across CTA const uint32 block_end = nih::min( block_begin + n_elements_per_block, rec_end ); // constant across CTA //if (block_begin >= rec_end) // return; typedef typename Vec<float,K>::type vec_type; __shared__ vec_type smem[CTA_SIZE*9]; uint32 block_offset = block_begin; // process all the batches which donīt need overflow checks while (block_offset + group_size <= block_end) { pbgi_block<CTA_SIZE,K,false>( smem, src_begin, src_end, block_offset, block_end, state, in_values, out_values ); block_offset += group_size; } // process the last batch if (block_offset < block_end) { pbgi_block<CTA_SIZE,K,true>( smem, src_begin, src_end, block_offset, block_end, state, in_values, out_values ); } } // check for cuda runtime errors void check_cuda_errors(const uint32 code) { cudaError_t error = cudaGetLastError(); if (error) { fprintf(stderr, "*** error (%u) ***\n %s\n", code, cudaGetErrorString(error)); exit(1); } } template <uint32 CTA_SIZE, uint32 K> void test_pbgi_t(const uint32 n_points) { cudaSetDeviceFlags( cudaDeviceMapHost ); float* arena; //cudaHostAlloc( &arena, sizeof(float)*12*n_points, cudaHostAllocMapped ); arena = (float*)malloc( sizeof(float)*12*n_points ); float* ptr = arena; float* x = ptr; ptr += n_points; float* y = ptr; ptr += n_points; float* z = ptr; ptr += n_points; float* nx = ptr; ptr += n_points; float* ny = ptr; ptr += n_points; float* nz = ptr; ptr += n_points; float* in_r = ptr; ptr += n_points; float* in_g = ptr; ptr += n_points; float* in_b = ptr; ptr += n_points; float* out_r = ptr; ptr += n_points; float* out_g = ptr; ptr += n_points; float* out_b = ptr; ptr += n_points; for (uint32 i = 0; i < n_points; ++i) { x[i] = float(i) / float(n_points); y[i] = 1.0f - float(i) / float(n_points); z[i] = sinf( float(i) / float(n_points) * 2.0f * float(M_PI) ); nx[i] = 1.0f; ny[i] = 0.0f; nz[i] = 0.0f; in_r[i] = fabsf( sinf( float(i) / float(n_points) * 2.0f * float(M_PI) ) ); in_g[i] = fabsf( sinf( float(i) / float(n_points) * 4.0f * float(M_PI) ) ); in_b[i] = fabsf( sinf( float(i) / float(n_points) * 8.0f * float(M_PI) ) ); out_r[i] = 0.0f; out_g[i] = 0.0f; out_b[i] = 0.0f; } PBGI_state state; PBGI_values in_values; PBGI_values out_values; // compute the number of blocks we can launch to fill the machine const size_t max_blocks = thrust::detail::device::cuda::arch::max_active_blocks(pbgi_kernel<CTA_SIZE,K>, CTA_SIZE, 0); const uint32 group_size = CTA_SIZE * K; const uint32 n_groups = (n_points + group_size-1) / group_size; const size_t n_blocks = nih::min( max_blocks, n_groups ); // assume we can process 1 billion pairs per kernel launch avoiding timeout const float pairs_per_kernel = 1.0e9f; // compute the number of receivers and the number of senders we want to process per block: // our strategy is to process 4 times as many receivers as we can process concurrently, and // set the number of senders correspondingly to reach our quota uint32 n_receivers = nih::min( n_blocks * group_size * 4u, n_points ); uint32 n_senders = nih::max( uint32( pairs_per_kernel / float(n_receivers) ), 1u ); if (n_senders % 32) n_senders += 32 - (n_senders % 32); uint32 n_elements_per_block = (n_receivers + n_blocks-1) / n_blocks; if (n_elements_per_block % 4 != 0) n_elements_per_block += 4 - (n_elements_per_block % 4); float* cuda_arena; //cudaHostGetDevicePointer( &cuda_arena, arena, 0 ); cudaMalloc( &cuda_arena, sizeof(float)*12*n_points ); cudaMemcpy( cuda_arena, arena, sizeof(float)*12*n_points, cudaMemcpyHostToDevice ); check_cuda_errors( 0 ); ptr = cuda_arena; state.x = ptr; ptr += n_points; state.y = ptr; ptr += n_points; state.z = ptr; ptr += n_points; state.nx = ptr; ptr += n_points; state.ny = ptr; ptr += n_points; state.nz = ptr; ptr += n_points; in_values.r = ptr; ptr += n_points; in_values.g = ptr; ptr += n_points; in_values.b = ptr; ptr += n_points; out_values.r = ptr; ptr += n_points; out_values.g = ptr; ptr += n_points; out_values.b = ptr; ptr += n_points; fprintf(stderr, "test pbgi gpu\n"); fprintf(stderr," points : %u\n", n_points); fprintf(stderr," block size : %u\n", CTA_SIZE); fprintf(stderr," K : %u\n", K); fprintf(stderr," n_blocks : %u\n", n_blocks); fprintf(stderr," pts / block : %u\n", n_elements_per_block); fprintf(stderr," src / kernel : %u\n", n_senders); fprintf(stderr," rec / kernel : %u\n", n_receivers); uint32 min_pairs = uint32(-1); nih::Timer timer; timer.start(); { cudaThreadSynchronize(); uint32 rec_begin = 0; while (rec_begin < n_points) { uint32 rec_end = nih::min( rec_begin + n_receivers, n_points ); if (rec_end + n_receivers/2 > n_points) // if only a few points are missing, rec_end = n_points; // merge them in... n_elements_per_block = (rec_end - rec_begin + n_blocks-1) / n_blocks; if (n_elements_per_block % 4 != 0) n_elements_per_block += 4 - (n_elements_per_block % 4); uint32 sender_begin = 0; while (sender_begin < n_points) { uint32 sender_end = nih::min( sender_begin + n_senders, n_points ); if (sender_end + n_senders/2 > n_points) // if only a few points are missing, sender_end = n_points; // merge them in... min_pairs = nih::min( min_pairs, (sender_end - sender_begin)*(rec_end - rec_begin) ); pbgi_kernel<CTA_SIZE,K><<<n_blocks,CTA_SIZE>>>( n_points, sender_begin, sender_end, rec_begin, rec_end, n_blocks, n_elements_per_block, state, in_values, out_values ); cudaThreadSynchronize(); check_cuda_errors( 1 ); sender_begin = sender_end; } rec_begin = rec_end; } cudaThreadSynchronize(); } check_cuda_errors( 1 ); timer.stop(); cudaMemcpy( arena, cuda_arena, sizeof(float)*12*n_points, cudaMemcpyDeviceToHost ); float sum = 0.0f; for (uint32 i = 0; i < n_points; ++i) sum += (out_r[i] + out_g[i] + out_b[i]) / 3.0f; fprintf(stderr," min pairs : %u\n", min_pairs); fprintf(stderr," avg energy : %.3f\n", sum / float(n_points)); fprintf(stderr," time : %.3f s\n", float(timer.seconds())); fprintf(stderr," pairs/s : %.3f G\n", (float(n_points)/1000.0f)*(float(n_points)/1000.0f) / float(timer.seconds()*1000.0f)); //cudaFreeHost( arena ); cudaFree( cuda_arena ); free(arena); } void test_pbgi(const uint32 n_points) { /* test_pbgi_t<32,1>( n_points ); test_pbgi_t<32,2>( n_points ); test_pbgi_t<32,4>( n_points ); test_pbgi_t<64,1>( n_points ); test_pbgi_t<64,2>( n_points ); test_pbgi_t<64,4>( n_points ); test_pbgi_t<96,1>( n_points ); test_pbgi_t<96,2>( n_points ); test_pbgi_t<96,4>( n_points ); test_pbgi_t<128,1>( n_points ); test_pbgi_t<128,2>( n_points ); test_pbgi_t<128,4>( n_points ); test_pbgi_t<256,1>( n_points ); test_pbgi_t<256,2>( n_points );*/ test_pbgi_t<512,1>( n_points ); // test_pbgi_t<512,2>( n_points ); } } // namespace gpu } // namespace pbgi
ccad853b9bafc8ebcfccb6d8fa9a1f8b4eba42c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" // from https://stackoverflow.com/questions/39980645/enable-code-indexing-of-cuda-in-clion #ifdef __JETBRAINS_IDE__ #define __host__ #define __device__ #define __shared__ #define __constant__ #define __global__ // This is slightly mental, but gets it to properly index device function calls like __popc and whatever. #define __HIPCC__ #include <hip/device_functions.h> // These headers are all implicitly present when you compile CUDA with clang. Clion doesn't know that, so // we include them explicitly to make the indexer happy. Doing this when you actually build is, obviously, // a terrible idea :D #include <__clang_cuda_builtin_vars.h> #include <__clang_cuda_intrinsics.h> #include <__clang_cuda_math_forward_declares.h> #include <__clang_cuda_complex_builtins.h> #include <__clang_cuda_cmath.h> #endif #define BLOCK_SIZE 32 __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; float res = 0; //For every value in the filter around the pixel (c, r) for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r) { for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1)); int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1)); float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]); float filter_value = filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth / 2]; res += image_value * filter_value; } } outputChannel[thread_1D_pos] = res; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; uchar4 color = inputImageRGBA[thread_1D_pos]; redChannel[thread_1D_pos] = color.x; greenChannel[thread_1D_pos] = color.y; blueChannel[thread_1D_pos] = color.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc size_t size = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(hipMalloc(&d_filter, size)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, size, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE, 1); int gridX = (int)ceil(numCols / (float) blockSize.x); int gridY = (int)ceil(numRows / (float) blockSize.y); const dim3 gridSize(gridX, gridY, 1); // Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated // make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
ccad853b9bafc8ebcfccb6d8fa9a1f8b4eba42c7.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" // from https://stackoverflow.com/questions/39980645/enable-code-indexing-of-cuda-in-clion #ifdef __JETBRAINS_IDE__ #define __host__ #define __device__ #define __shared__ #define __constant__ #define __global__ // This is slightly mental, but gets it to properly index device function calls like __popc and whatever. #define __CUDACC__ #include <device_functions.h> // These headers are all implicitly present when you compile CUDA with clang. Clion doesn't know that, so // we include them explicitly to make the indexer happy. Doing this when you actually build is, obviously, // a terrible idea :D #include <__clang_cuda_builtin_vars.h> #include <__clang_cuda_intrinsics.h> #include <__clang_cuda_math_forward_declares.h> #include <__clang_cuda_complex_builtins.h> #include <__clang_cuda_cmath.h> #endif #define BLOCK_SIZE 32 __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; float res = 0; //For every value in the filter around the pixel (c, r) for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r) { for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1)); int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1)); float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]); float filter_value = filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth / 2]; res += image_value * filter_value; } } outputChannel[thread_1D_pos] = res; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; uchar4 color = inputImageRGBA[thread_1D_pos]; redChannel[thread_1D_pos] = color.x; greenChannel[thread_1D_pos] = color.y; blueChannel[thread_1D_pos] = color.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc size_t size = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(cudaMalloc(&d_filter, size)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, size, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE, 1); int gridX = (int)ceil(numCols / (float) blockSize.x); int gridY = (int)ceil(numRows / (float) blockSize.y); const dim3 gridSize(gridX, gridY, 1); // Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated // make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
c8c4bfe1c23aec04708af1a854e4648b8d27f219.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define N 16 __global__ void add( int *a, int *b, int *c ){ int tid = blockIdx.x * blockDim.x + threadIdx.x; c[tid] = a[tid] + b[tid]; } int main( void ) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; // allocate the memory on the CPU a = (int*)malloc( N * sizeof(int) ); b = (int*)malloc( N * sizeof(int) ); c = (int*)malloc( N * sizeof(int) ); // allocate the memory on the GPU hipMalloc( (void**)&dev_a, N * sizeof(int) ); hipMalloc( (void**)&dev_b, N * sizeof(int) ); hipMalloc( (void**)&dev_c, N * sizeof(int) ); // fill the arrays 'a' and 'b' on the CPU srand ( time(NULL) ); for (int i=0; i<N; i++) { a[i] = rand()%256; b[i] = rand()%256; } // copy the arrays 'a' and 'b' to the GPU hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( add), dim3(4), dim3(4), 0, 0, dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU hipMemcpy( c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost ); // verify that the GPU did the work we requested bool success = true; for (int i=0; i<N; i++) { if ((a[i] + b[i]) != c[i]) { printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] ); success = false; } } if (success) printf( "We did it!\n" ); // free the memory allocated on the GPU hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); return 0; }
c8c4bfe1c23aec04708af1a854e4648b8d27f219.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #define N 16 __global__ void add( int *a, int *b, int *c ){ int tid = blockIdx.x * blockDim.x + threadIdx.x; c[tid] = a[tid] + b[tid]; } int main( void ) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; // allocate the memory on the CPU a = (int*)malloc( N * sizeof(int) ); b = (int*)malloc( N * sizeof(int) ); c = (int*)malloc( N * sizeof(int) ); // allocate the memory on the GPU cudaMalloc( (void**)&dev_a, N * sizeof(int) ); cudaMalloc( (void**)&dev_b, N * sizeof(int) ); cudaMalloc( (void**)&dev_c, N * sizeof(int) ); // fill the arrays 'a' and 'b' on the CPU srand ( time(NULL) ); for (int i=0; i<N; i++) { a[i] = rand()%256; b[i] = rand()%256; } // copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ); add<<<4, 4>>>( dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ); // verify that the GPU did the work we requested bool success = true; for (int i=0; i<N; i++) { if ((a[i] + b[i]) != c[i]) { printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] ); success = false; } } if (success) printf( "We did it!\n" ); // free the memory allocated on the GPU cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; }
05e765b3b93b72c389c30671d45d556dace0ff84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include "functions.h" #include <time.h> __global__ void func_kernel1d(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t offset = idx; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx)}; if (idx< n) { dy[offset] = F0(x, params); } } __global__ void func_kernel2d(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t offset = idx + idy * blockDim.x * gridDim.x; // printf("%d, %d, %d\n", idx, idy, offset); // ensure we are within bounds float x[2] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy)}; if (idx< n && idy<n) { dy[offset] = F1(x, params); } } __global__ void func_kernel3dF2(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; if (idx< n) { dy[offset] = F2(x, params) ; } } __global__ void func_kernel3dF3(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; if (idx< n) { dy[offset] = F3(x, params) ; } } __global__ void func_kernel3dF4(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; if (idx< n) { dy[offset] = F4(x, params) ; } } __global__ void func_kernel3dF5(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; if (idx< n) { dy[offset] = F5(x, params) ; } } __global__ void func_kernel3dF6(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // printf("%d, %d, %d, %d, %d\n", idx, idy, idz, offset, n); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; if (idx< n) { dy[offset] = F6(x, params) ; } } __global__ void func_kernel3dF9(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // printf("%d, %d, %d, %d, %d\n", idx, idy, idz, offset, n); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; // printf("%0.10f, %0.10f, %0.10f\n", x[0], x[1],x[2]); // printf("%0.10f, %0.10f, %0.10f\n", a[0], a[1],a[2]); if (idx< n) { dy[offset] = myfunc(x, params) ; } } void cudasafe( hipError_t error, char* message) { if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s : %i\n",message,error); exit(-1); } } double Integrate( int functionCode, // Identifies the function (and dimensionality k) const float *a, // An array of k lower bounds const float *b, // An array of k upper bounds float eps, // A target accuracy const float *params, // Parameters to function float *errorEstimate // Estimated error in integral ) { int mult = 1; // multiplier *errorEstimate = 100; // set error to high value double sum = 0; double sum_temp = 0; while (*errorEstimate > eps) { size_t freeMem = 0; size_t totalMem = 0; hipMemGetInfo(&freeMem, &totalMem); // printf("Memory avaliable: Free: %lu, Total: %lu\n",freeMem, totalMem); const int nsize = 10000000; const int sz = sizeof(float) * nsize; float *devicemem; hipMalloc((void **)&devicemem, sz); hipMemset(devicemem, 0, sz); // zeros all the bytes in devicemem int n; int k; int p = 0 ; switch(functionCode){ case 0: k=1; p=0; n=32*mult; break; case 1: k=2; p=2; n=32*mult; break; case 2: k=3; p=0; n=8*mult; break; case 3: k=3; p=1; n=8*mult; break; case 4: k=3; p=10; n=8*mult; break; case 5: k=3; p=0; n=8*mult; break; case 6: k=3; p=2; n=128*mult; break; case 9: k=3; p=0; n=8*mult; break; default: fprintf(stderr, "Invalid function code."); exit(1); } int n0=n, n1=n, n2=n; // By default use n points in each dimension // Collapse any dimensions we don't use if(k<3){ n2=1; } if(k<2){ n1=1; } // size, in bytes, of each vector size_t bytes = (n0*n1*n2)*sizeof(float); size_t bytes_temp = (pow(2,k)*n0*n1*n2)*sizeof(float); float *y = (float*)malloc(bytes); float *y_temp = (float*)malloc(bytes_temp); float base[3] = {(b[0] - a[0])/n, (b[1] - a[1])/n, (b[2] - a[2])/n}; float base_temp[3] = {(b[0] - a[0])/(n*2), (b[1] - a[1])/(n*2), (b[2] - a[2])/(n*2)}; // printf("base: %0.10f, %0.10f, %0.10f\n", base[0], base[1], base[2]); // allocate memory for each vector on GPU float * dy; float * dy_temp; float * dbase; float * dbase_temp; float * da; float * dparams; // int * dn; hipMalloc(&dy, bytes); hipMalloc(&dy_temp, bytes_temp); hipMalloc(&dbase, 3*sizeof(float)); hipMalloc(&dbase_temp, 3*sizeof(float)); // hipMalloc((void**)&dn, sizeof(int)); hipMalloc(&da, k*sizeof(int)); hipMalloc(&dparams, p*sizeof(float)); hipMemcpy(dbase, base, 3*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dbase_temp, base_temp, 3*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(da, a, k*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dparams, params, p*sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(dn,&n,sizeof(int), hipMemcpyHostToDevice); //kernel execute if (k==1) { // printf("1D\n"); // number of threads in each thread block int blockSize = 32; dim3 dimBlock(blockSize); // number of thread blocks in grid int gridSize = (int) ceil((float)n/blockSize); dim3 dimGrid(gridSize); hipLaunchKernelGGL(( func_kernel1d), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n); int gridSize_temp = (int) ceil((float)n*2.0/blockSize); dim3 dimGrid_temp(gridSize_temp); hipLaunchKernelGGL(( func_kernel1d), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n); } else if (k==2) { // number of threads in each thread block // printf("2D\n"); int blockSize = 32; dim3 dimBlock(blockSize, blockSize); // number of thread blocks in grid int gridSize = (int) ceil((float)n/blockSize); dim3 dimGrid(gridSize, gridSize); hipLaunchKernelGGL(( func_kernel2d), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n); int gridSize_temp = (int) ceil((float)n*2.0/blockSize); dim3 dimGrid_temp(gridSize_temp, gridSize_temp); hipLaunchKernelGGL(( func_kernel2d), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n); } else { // number of threads in each thread block // printf("3D\n"); int blockSize = 8; dim3 dimBlock(blockSize, blockSize, blockSize); // number of thread blocks in grid int gridSize = (int) ceil((float)n/blockSize); dim3 dimGrid(gridSize, gridSize, gridSize); int gridSize_temp = (int) ceil((float)n*2.0/blockSize); dim3 dimGrid_temp(gridSize_temp, gridSize_temp, gridSize_temp); if (functionCode==2) { hipLaunchKernelGGL(( func_kernel3dF2), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n); hipLaunchKernelGGL(( func_kernel3dF2), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n); } else if (functionCode==3) { hipLaunchKernelGGL(( func_kernel3dF3), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n); hipLaunchKernelGGL(( func_kernel3dF3), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n); } else if (functionCode==4) { hipLaunchKernelGGL(( func_kernel3dF4), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n); hipLaunchKernelGGL(( func_kernel3dF4), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n); } else if (functionCode==5) { hipLaunchKernelGGL(( func_kernel3dF5), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n); hipLaunchKernelGGL(( func_kernel3dF5), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n); } else if (functionCode==6) { hipLaunchKernelGGL(( func_kernel3dF6), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n); hipLaunchKernelGGL(( func_kernel3dF6), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n); } else if (functionCode==9) { hipLaunchKernelGGL(( func_kernel3dF9), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n); hipLaunchKernelGGL(( func_kernel3dF9), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n); } else { fprintf(stderr, "Invalid function code."); } } //copy array back hipMemcpy(y, dy, bytes, hipMemcpyDeviceToHost); hipMemcpy(y_temp, dy_temp, bytes_temp, hipMemcpyDeviceToHost); sum = 0; sum_temp = 0; for(uint32_t i=0; i<n0*n1*n2; i++) { sum += y[i]; } for(uint32_t i=0; i<pow(2,k)*n0*n1*n2; i++) { sum_temp += y_temp[i]; } for(int j=0; j<k; j++) { sum *= base[j]; sum_temp *= base_temp[j]; } // printf("len: %0.10f\n", pow(2,k)*n0*n1*n2); // printf("sum: %0.10f\n", sum); // printf("sum_temp: %0.10f\n", sum_temp); hipFree(dy); hipFree(dy_temp); hipFree(da); hipFree(dbase); hipFree(dbase_temp); hipFree(dparams); // hipFree(dn); free(y); free(y_temp); hipMemset(devicemem, 0, sz); // zeros all the bytes in devicemem *errorEstimate = fabs(sum - sum_temp); mult += 1; } return sum; } void testmyfunc(void) { // float a[3]={0,0,0}; float a[3]={-1,-1,-1}; float b[3]={2,2,2}; float error; for (int n = 32; n<=1024; n*=2) { double time_spent; clock_t begin, end; begin = clock(); Integrate(9, a, b, n, NULL, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; // printf("0 %d %0.10f", n, time_spent); } } void test0(void) { float a[1]={0}; float b[1]={1}; float error; // for (int n = 32; n<=1024; n*=2) { double time_spent; clock_t begin, end; begin = clock(); float eps = 0.01; double result = Integrate(0, a, b, eps, NULL, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); printf("0 %0.10f\n", time_spent); } void test1(void) { float a[2]={0,0}; float b[2]={1,1}; float params[2]={0.5,0.5}; float error; float eps = 2; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(1, a, b, eps, params, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("1 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } void test2(void) { float exact=9.48557252267795; // Correct to about 6 digits float a[3]={-1,-1,-1}; float b[3]={1,1,1}; float eps = 0.01; float error; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(2, a, b, eps, NULL, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("2 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } void test3(void) { float exact=-7.18387139942142f; // Correct to about 6 digits float a[3]={0,0,0}; float b[3]={5,5,5}; float params[1]={2}; float eps = 0.01; float error; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(3, a, b, eps, params, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("3 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } void test4(void) { float exact=0.677779532970409f; // Correct to about 8 digits float a[3]={-16,-16,-16}; // We're going to cheat, and assume -16=-infinity. float b[3]={1,1,1}; // We're going to use the covariance matrix with ones on the diagonal, and // 0.5 off the diagonal. const float PI=3.1415926535897932384626433832795f; float params[10]={ 1.5, -0.5, -0.5, -0.5, 1.5, -0.5, -0.5, -0.5, 1.5, pow(2*PI,-3.0/2.0)*pow(0.5,-0.5) // This is the scale factor }; float eps = 0.01; float error; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(4, a, b, eps, params, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("4 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } void test5(void) { float exact=13.4249394627056; // Correct to about 6 digits float a[3]={0,0,0}; float b[3]={3,3,3}; float eps = 0.01; float error; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(5, a, b, eps, NULL, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("5 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } void test6(void) { float exact= 2.261955088165; float a[3]={-4,-4,-4}; float b[3]={4,4,4}; float params[2]={3,0.01}; float eps = 0.1; float error; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(6, a, b, eps, params, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("6 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } int main( int argc, char* argv[]) { // testmyfunc(); test0(); // works test1(); // works test2(); // works test3(); // works test4(); // works test5(); // works test6(); // works return 0; }
05e765b3b93b72c389c30671d45d556dace0ff84.cu
#include <stdio.h> #include <stdint.h> #include <stdlib.h> #include "functions.h" #include <time.h> __global__ void func_kernel1d(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t offset = idx; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx)}; if (idx< n) { dy[offset] = F0(x, params); } } __global__ void func_kernel2d(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t offset = idx + idy * blockDim.x * gridDim.x; // printf("%d, %d, %d\n", idx, idy, offset); // ensure we are within bounds float x[2] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy)}; if (idx< n && idy<n) { dy[offset] = F1(x, params); } } __global__ void func_kernel3dF2(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; if (idx< n) { dy[offset] = F2(x, params) ; } } __global__ void func_kernel3dF3(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; if (idx< n) { dy[offset] = F3(x, params) ; } } __global__ void func_kernel3dF4(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; if (idx< n) { dy[offset] = F4(x, params) ; } } __global__ void func_kernel3dF5(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; if (idx< n) { dy[offset] = F5(x, params) ; } } __global__ void func_kernel3dF6(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // printf("%d, %d, %d, %d, %d\n", idx, idy, idz, offset, n); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; if (idx< n) { dy[offset] = F6(x, params) ; } } __global__ void func_kernel3dF9(float * dy, float* a, float* base, float * params, int n) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y; uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z; uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x; // printf("%d, %d, %d, %d\n", idx, idy, idz, offset); // printf("%d, %d, %d, %d, %d\n", idx, idy, idz, offset, n); // ensure we are within bounds float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)}; // printf("%0.10f, %0.10f, %0.10f\n", x[0], x[1],x[2]); // printf("%0.10f, %0.10f, %0.10f\n", a[0], a[1],a[2]); if (idx< n) { dy[offset] = myfunc(x, params) ; } } void cudasafe( cudaError_t error, char* message) { if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s : %i\n",message,error); exit(-1); } } double Integrate( int functionCode, // Identifies the function (and dimensionality k) const float *a, // An array of k lower bounds const float *b, // An array of k upper bounds float eps, // A target accuracy const float *params, // Parameters to function float *errorEstimate // Estimated error in integral ) { int mult = 1; // multiplier *errorEstimate = 100; // set error to high value double sum = 0; double sum_temp = 0; while (*errorEstimate > eps) { size_t freeMem = 0; size_t totalMem = 0; cudaMemGetInfo(&freeMem, &totalMem); // printf("Memory avaliable: Free: %lu, Total: %lu\n",freeMem, totalMem); const int nsize = 10000000; const int sz = sizeof(float) * nsize; float *devicemem; cudaMalloc((void **)&devicemem, sz); cudaMemset(devicemem, 0, sz); // zeros all the bytes in devicemem int n; int k; int p = 0 ; switch(functionCode){ case 0: k=1; p=0; n=32*mult; break; case 1: k=2; p=2; n=32*mult; break; case 2: k=3; p=0; n=8*mult; break; case 3: k=3; p=1; n=8*mult; break; case 4: k=3; p=10; n=8*mult; break; case 5: k=3; p=0; n=8*mult; break; case 6: k=3; p=2; n=128*mult; break; case 9: k=3; p=0; n=8*mult; break; default: fprintf(stderr, "Invalid function code."); exit(1); } int n0=n, n1=n, n2=n; // By default use n points in each dimension // Collapse any dimensions we don't use if(k<3){ n2=1; } if(k<2){ n1=1; } // size, in bytes, of each vector size_t bytes = (n0*n1*n2)*sizeof(float); size_t bytes_temp = (pow(2,k)*n0*n1*n2)*sizeof(float); float *y = (float*)malloc(bytes); float *y_temp = (float*)malloc(bytes_temp); float base[3] = {(b[0] - a[0])/n, (b[1] - a[1])/n, (b[2] - a[2])/n}; float base_temp[3] = {(b[0] - a[0])/(n*2), (b[1] - a[1])/(n*2), (b[2] - a[2])/(n*2)}; // printf("base: %0.10f, %0.10f, %0.10f\n", base[0], base[1], base[2]); // allocate memory for each vector on GPU float * dy; float * dy_temp; float * dbase; float * dbase_temp; float * da; float * dparams; // int * dn; cudaMalloc(&dy, bytes); cudaMalloc(&dy_temp, bytes_temp); cudaMalloc(&dbase, 3*sizeof(float)); cudaMalloc(&dbase_temp, 3*sizeof(float)); // cudaMalloc((void**)&dn, sizeof(int)); cudaMalloc(&da, k*sizeof(int)); cudaMalloc(&dparams, p*sizeof(float)); cudaMemcpy(dbase, base, 3*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dbase_temp, base_temp, 3*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(da, a, k*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dparams, params, p*sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(dn,&n,sizeof(int), cudaMemcpyHostToDevice); //kernel execute if (k==1) { // printf("1D\n"); // number of threads in each thread block int blockSize = 32; dim3 dimBlock(blockSize); // number of thread blocks in grid int gridSize = (int) ceil((float)n/blockSize); dim3 dimGrid(gridSize); func_kernel1d<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n); int gridSize_temp = (int) ceil((float)n*2.0/blockSize); dim3 dimGrid_temp(gridSize_temp); func_kernel1d<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n); } else if (k==2) { // number of threads in each thread block // printf("2D\n"); int blockSize = 32; dim3 dimBlock(blockSize, blockSize); // number of thread blocks in grid int gridSize = (int) ceil((float)n/blockSize); dim3 dimGrid(gridSize, gridSize); func_kernel2d<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n); int gridSize_temp = (int) ceil((float)n*2.0/blockSize); dim3 dimGrid_temp(gridSize_temp, gridSize_temp); func_kernel2d<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n); } else { // number of threads in each thread block // printf("3D\n"); int blockSize = 8; dim3 dimBlock(blockSize, blockSize, blockSize); // number of thread blocks in grid int gridSize = (int) ceil((float)n/blockSize); dim3 dimGrid(gridSize, gridSize, gridSize); int gridSize_temp = (int) ceil((float)n*2.0/blockSize); dim3 dimGrid_temp(gridSize_temp, gridSize_temp, gridSize_temp); if (functionCode==2) { func_kernel3dF2<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n); func_kernel3dF2<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n); } else if (functionCode==3) { func_kernel3dF3<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n); func_kernel3dF3<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n); } else if (functionCode==4) { func_kernel3dF4<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n); func_kernel3dF4<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n); } else if (functionCode==5) { func_kernel3dF5<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n); func_kernel3dF5<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n); } else if (functionCode==6) { func_kernel3dF6<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n); func_kernel3dF6<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n); } else if (functionCode==9) { func_kernel3dF9<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n); func_kernel3dF9<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n); } else { fprintf(stderr, "Invalid function code."); } } //copy array back cudaMemcpy(y, dy, bytes, cudaMemcpyDeviceToHost); cudaMemcpy(y_temp, dy_temp, bytes_temp, cudaMemcpyDeviceToHost); sum = 0; sum_temp = 0; for(uint32_t i=0; i<n0*n1*n2; i++) { sum += y[i]; } for(uint32_t i=0; i<pow(2,k)*n0*n1*n2; i++) { sum_temp += y_temp[i]; } for(int j=0; j<k; j++) { sum *= base[j]; sum_temp *= base_temp[j]; } // printf("len: %0.10f\n", pow(2,k)*n0*n1*n2); // printf("sum: %0.10f\n", sum); // printf("sum_temp: %0.10f\n", sum_temp); cudaFree(dy); cudaFree(dy_temp); cudaFree(da); cudaFree(dbase); cudaFree(dbase_temp); cudaFree(dparams); // cudaFree(dn); free(y); free(y_temp); cudaMemset(devicemem, 0, sz); // zeros all the bytes in devicemem *errorEstimate = fabs(sum - sum_temp); mult += 1; } return sum; } void testmyfunc(void) { // float a[3]={0,0,0}; float a[3]={-1,-1,-1}; float b[3]={2,2,2}; float error; for (int n = 32; n<=1024; n*=2) { double time_spent; clock_t begin, end; begin = clock(); Integrate(9, a, b, n, NULL, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; // printf("0 %d %0.10f", n, time_spent); } } void test0(void) { float a[1]={0}; float b[1]={1}; float error; // for (int n = 32; n<=1024; n*=2) { double time_spent; clock_t begin, end; begin = clock(); float eps = 0.01; double result = Integrate(0, a, b, eps, NULL, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); printf("0 %0.10f\n", time_spent); } void test1(void) { float a[2]={0,0}; float b[2]={1,1}; float params[2]={0.5,0.5}; float error; float eps = 2; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(1, a, b, eps, params, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("1 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } void test2(void) { float exact=9.48557252267795; // Correct to about 6 digits float a[3]={-1,-1,-1}; float b[3]={1,1,1}; float eps = 0.01; float error; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(2, a, b, eps, NULL, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("2 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } void test3(void) { float exact=-7.18387139942142f; // Correct to about 6 digits float a[3]={0,0,0}; float b[3]={5,5,5}; float params[1]={2}; float eps = 0.01; float error; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(3, a, b, eps, params, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("3 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } void test4(void) { float exact=0.677779532970409f; // Correct to about 8 digits float a[3]={-16,-16,-16}; // We're going to cheat, and assume -16=-infinity. float b[3]={1,1,1}; // We're going to use the covariance matrix with ones on the diagonal, and // 0.5 off the diagonal. const float PI=3.1415926535897932384626433832795f; float params[10]={ 1.5, -0.5, -0.5, -0.5, 1.5, -0.5, -0.5, -0.5, 1.5, pow(2*PI,-3.0/2.0)*pow(0.5,-0.5) // This is the scale factor }; float eps = 0.01; float error; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(4, a, b, eps, params, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("4 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } void test5(void) { float exact=13.4249394627056; // Correct to about 6 digits float a[3]={0,0,0}; float b[3]={3,3,3}; float eps = 0.01; float error; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(5, a, b, eps, NULL, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("5 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } void test6(void) { float exact= 2.261955088165; float a[3]={-4,-4,-4}; float b[3]={4,4,4}; float params[2]={3,0.01}; float eps = 0.1; float error; double time_spent; clock_t begin, end; begin = clock(); double result = Integrate(6, a, b, eps, params, &error); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("********\n"); printf("6 %0.10f\n", time_spent); printf("Error: %0.10f\n", error); printf("Result: %0.10f\n", result); } int main( int argc, char* argv[]) { // testmyfunc(); test0(); // works test1(); // works test2(); // works test3(); // works test4(); // works test5(); // works test6(); // works return 0; }
85ec9df9eccfff068db4d8197875dc40505dc89f.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void nextafter_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "nextafter_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return ::nextafter(a, b); }); }); } void heaviside_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(), "heaviside_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a == 0 ? b : static_cast<scalar_t>(a > 0); }); }); } REGISTER_DISPATCH(nextafter_stub, &nextafter_kernel_cuda); REGISTER_DISPATCH(heaviside_stub, &heaviside_kernel_cuda); }} // namespace at::native
85ec9df9eccfff068db4d8197875dc40505dc89f.cu
#include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void nextafter_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "nextafter_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return ::nextafter(a, b); }); }); } void heaviside_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(), "heaviside_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a == 0 ? b : static_cast<scalar_t>(a > 0); }); }); } REGISTER_DISPATCH(nextafter_stub, &nextafter_kernel_cuda); REGISTER_DISPATCH(heaviside_stub, &heaviside_kernel_cuda); }} // namespace at::native
d3f24ee86803fbcd17f645c58722b604e68a1ed7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "Device.h" #include "Slice.h" #include <cmath> using std::cout; using std::endl; extern __global__ void slice(float* ptrDevPi, int nbSlice); #define PI 3.14159265358979323846264338327950288419716939937510 Slice::Slice(const Grid& grid, int nbSlice, float tolerance) { // Grid { this->dg = grid.dg; this->db = grid.db; } this->sizeOctet = db.x * db.y * db.z * sizeof(float); // octet this->nbSlice = nbSlice; this->tolerance = tolerance; this->pi = 0; // MM { // MM (malloc Device) { Device::malloc(&ptrDevPi, sizeof(float)); Device::memclear(ptrDevPi, sizeof(float)); } Device::lastCudaError("AddVector MM (end allocation)"); // temp debug, facultatif } } Slice::~Slice(void) { //MM (device free) { Device::free(ptrDevPi); Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug, facultatif } } float Slice::run() { Device::lastCudaError("addVecteur (before)"); // temp debug hipLaunchKernelGGL(( slice), dim3(dg), dim3(db), sizeOctet, 0, ptrDevPi, nbSlice); // assynchrone Device::lastCudaError("addVecteur (after)"); // temp debug Device::memcpyDToH(&pi, ptrDevPi, sizeof(float)); // barriere synchronisation implicite return pi; } void Slice::display() { cout << "///////////////////////" << endl; cout << "/////TP SLICE - PI/////" << endl; cout << "///////////////////////" << endl; cout << "Exact Pi : \t" << PI << endl; cout << "Estimation : \t" << this->pi << endl; cout << "///////////////////////" << endl; cout << "//////////END//////////" << endl; cout << "///////////////////////" << endl; } bool Slice::check() { return ::fabs(this->pi - PI) < this->tolerance; }
d3f24ee86803fbcd17f645c58722b604e68a1ed7.cu
#include <iostream> #include "Device.h" #include "Slice.h" #include <cmath> using std::cout; using std::endl; extern __global__ void slice(float* ptrDevPi, int nbSlice); #define PI 3.14159265358979323846264338327950288419716939937510 Slice::Slice(const Grid& grid, int nbSlice, float tolerance) { // Grid { this->dg = grid.dg; this->db = grid.db; } this->sizeOctet = db.x * db.y * db.z * sizeof(float); // octet this->nbSlice = nbSlice; this->tolerance = tolerance; this->pi = 0; // MM { // MM (malloc Device) { Device::malloc(&ptrDevPi, sizeof(float)); Device::memclear(ptrDevPi, sizeof(float)); } Device::lastCudaError("AddVector MM (end allocation)"); // temp debug, facultatif } } Slice::~Slice(void) { //MM (device free) { Device::free(ptrDevPi); Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug, facultatif } } float Slice::run() { Device::lastCudaError("addVecteur (before)"); // temp debug slice<<<dg, db, sizeOctet>>>(ptrDevPi, nbSlice); // assynchrone Device::lastCudaError("addVecteur (after)"); // temp debug Device::memcpyDToH(&pi, ptrDevPi, sizeof(float)); // barriere synchronisation implicite return pi; } void Slice::display() { cout << "///////////////////////" << endl; cout << "/////TP SLICE - PI/////" << endl; cout << "///////////////////////" << endl; cout << "Exact Pi : \t" << PI << endl; cout << "Estimation : \t" << this->pi << endl; cout << "///////////////////////" << endl; cout << "//////////END//////////" << endl; cout << "///////////////////////" << endl; } bool Slice::check() { return std::fabs(this->pi - PI) < this->tolerance; }
c8461814dadaea1709cde6d7e86cf0081564bbc5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" const int N=10000000; __global__ void add(int *A, int *B,int *R) { int id = blockIdx.x*blockDim.x+threadIdx.x; R[id] = A[id]*B[id]*id; } int main() { int *h_A,*h_B,*h_R; h_A=(int*)malloc(sizeof(int)*N); h_B=(int*)malloc(sizeof(int)*N); h_R=(int*)malloc(sizeof(int)*N); int i; for(i=0;i<N;i++){ h_A[i]=i; h_B[i]=i; h_R[i]=88; } int *d_A,*d_B,*d_R; hipMalloc(&d_A,N*sizeof(int)); hipMalloc(&d_B,N*sizeof(int)); hipMalloc(&d_R,N*sizeof(int)); hipMemcpy(d_A,h_A,N*sizeof(int), hipMemcpyHostToDevice ); hipMemcpy(d_B,h_B,N*sizeof(int), hipMemcpyHostToDevice ); hipMemcpy(d_R,h_R,N*sizeof(int), hipMemcpyHostToDevice ); dim3 dimBlock( 512, 1, 1 ); dim3 dimGrid( 1000, 1000 ); hipLaunchKernelGGL(( add), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B,d_R); hipDeviceSynchronize(); hipMemcpy(h_R,d_R,N*sizeof(int), hipMemcpyDeviceToHost ); hipFree( d_R ); hipFree( d_A ); hipFree( d_A ); for(i=0;i<10;i++) printf("%d\n",h_R[i]); free(h_A); free(h_B); free(h_R); return EXIT_SUCCESS; }
c8461814dadaea1709cde6d7e86cf0081564bbc5.cu
#include <stdio.h> #include "cuda.h" const int N=10000000; __global__ void add(int *A, int *B,int *R) { int id = blockIdx.x*blockDim.x+threadIdx.x; R[id] = A[id]*B[id]*id; } int main() { int *h_A,*h_B,*h_R; h_A=(int*)malloc(sizeof(int)*N); h_B=(int*)malloc(sizeof(int)*N); h_R=(int*)malloc(sizeof(int)*N); int i; for(i=0;i<N;i++){ h_A[i]=i; h_B[i]=i; h_R[i]=88; } int *d_A,*d_B,*d_R; cudaMalloc(&d_A,N*sizeof(int)); cudaMalloc(&d_B,N*sizeof(int)); cudaMalloc(&d_R,N*sizeof(int)); cudaMemcpy(d_A,h_A,N*sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy(d_B,h_B,N*sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy(d_R,h_R,N*sizeof(int), cudaMemcpyHostToDevice ); dim3 dimBlock( 512, 1, 1 ); dim3 dimGrid( 1000, 1000 ); add<<<dimGrid, dimBlock>>>(d_A, d_B,d_R); cudaThreadSynchronize(); cudaMemcpy(h_R,d_R,N*sizeof(int), cudaMemcpyDeviceToHost ); cudaFree( d_R ); cudaFree( d_A ); cudaFree( d_A ); for(i=0;i<10;i++) printf("%d\n",h_R[i]); free(h_A); free(h_B); free(h_R); return EXIT_SUCCESS; }
b79bee0bb1c07252633562108caf7b9bff182ca4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <thrust/device_vector.h> #include <thrust/tuple.h> #include "iou_box3d/iou_utils.cuh" // Parallelize over N*M computations which can each be done // independently __global__ void IoUBox3DKernel( const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> boxes1, const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> boxes2, at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> vols, at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> ious) { const size_t N = boxes1.size(0); const size_t M = boxes2.size(0); const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = gridDim.x * blockDim.x; std::array<FaceVerts, NUM_TRIS> box1_tris{}; std::array<FaceVerts, NUM_TRIS> box2_tris{}; std::array<FaceVerts, NUM_PLANES> box1_planes{}; std::array<FaceVerts, NUM_PLANES> box2_planes{}; for (size_t i = tid; i < N * M; i += stride) { const size_t n = i / M; // box1 index const size_t m = i % M; // box2 index // Convert to array of structs of face vertices i.e. effectively (F, 3, 3) // FaceVerts is a data type defined in iou_utils.cuh GetBoxTris(boxes1[n], box1_tris); GetBoxTris(boxes2[m], box2_tris); // Calculate the position of the center of the box which is used in // several calculations. This requires a tensor as input. const float3 box1_center = BoxCenter(boxes1[n]); const float3 box2_center = BoxCenter(boxes2[m]); // Convert to an array of face vertices GetBoxPlanes(boxes1[n], box1_planes); GetBoxPlanes(boxes2[m], box2_planes); // Get Box Volumes const float box1_vol = BoxVolume(box1_tris, box1_center, NUM_TRIS); const float box2_vol = BoxVolume(box2_tris, box2_center, NUM_TRIS); // Tris in Box1 intersection with Planes in Box2 // Initialize box1 intersecting faces. MAX_TRIS is the // max faces possible in the intersecting shape. // TODO: determine if the value of MAX_TRIS is sufficient or // if we should store the max tris for each NxM computation // and throw an error if any exceeds the max. FaceVerts box1_intersect[MAX_TRIS]; for (int j = 0; j < NUM_TRIS; ++j) { // Initialize the faces from the box box1_intersect[j] = box1_tris[j]; } // Get the count of the actual number of faces in the intersecting shape int box1_count = BoxIntersections(box2_planes, box2_center, box1_intersect); // Tris in Box2 intersection with Planes in Box1 FaceVerts box2_intersect[MAX_TRIS]; for (int j = 0; j < NUM_TRIS; ++j) { box2_intersect[j] = box2_tris[j]; } const int box2_count = BoxIntersections(box1_planes, box1_center, box2_intersect); // If there are overlapping regions in Box2, remove any coplanar faces if (box2_count > 0) { // Identify if any triangles in Box2 are coplanar with Box1 Keep tri2_keep[MAX_TRIS]; for (int j = 0; j < MAX_TRIS; ++j) { // Initialize the valid faces to be true tri2_keep[j].keep = j < box2_count ? true : false; } for (int b1 = 0; b1 < box1_count; ++b1) { for (int b2 = 0; b2 < box2_count; ++b2) { const bool is_coplanar = IsCoplanarTriTri(box1_intersect[b1], box2_intersect[b2]); const float area = FaceArea(box1_intersect[b1]); if ((is_coplanar) && (area > aEpsilon)) { tri2_keep[b2].keep = false; } } } // Keep only the non coplanar triangles in Box2 - add them to the // Box1 triangles. for (int b2 = 0; b2 < box2_count; ++b2) { if (tri2_keep[b2].keep) { box1_intersect[box1_count] = box2_intersect[b2]; // box1_count will determine the total faces in the // intersecting shape box1_count++; } } } // Initialize the vol and iou to 0.0 in case there are no triangles // in the intersecting shape. float vol = 0.0; float iou = 0.0; // If there are triangles in the intersecting shape if (box1_count > 0) { // The intersecting shape is a polyhedron made up of the // triangular faces that are all now in box1_intersect. // Calculate the polyhedron center const float3 poly_center = PolyhedronCenter(box1_intersect, box1_count); // Compute intersecting polyhedron volume vol = BoxVolume(box1_intersect, poly_center, box1_count); // Compute IoU iou = vol / (box1_vol + box2_vol - vol); } // Write the volume and IoU to global memory vols[n][m] = vol; ious[n][m] = iou; } } std::tuple<at::Tensor, at::Tensor> IoUBox3DCuda( const at::Tensor& boxes1, // (N, 8, 3) const at::Tensor& boxes2) { // (M, 8, 3) // Check inputs are on the same device at::TensorArg boxes1_t{boxes1, "boxes1", 1}, boxes2_t{boxes2, "boxes2", 2}; at::CheckedFrom c = "IoUBox3DCuda"; at::checkAllSameGPU(c, {boxes1_t, boxes2_t}); at::checkAllSameType(c, {boxes1_t, boxes2_t}); // Set the device for the kernel launch based on the device of boxes1 at::hip::HIPGuardMasqueradingAsCUDA device_guard(boxes1.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); TORCH_CHECK(boxes2.size(2) == boxes1.size(2), "Boxes must have shape (8, 3)"); TORCH_CHECK( (boxes2.size(1) == 8) && (boxes1.size(1) == 8), "Boxes must have shape (8, 3)"); const int64_t N = boxes1.size(0); const int64_t M = boxes2.size(0); auto vols = at::zeros({N, M}, boxes1.options()); auto ious = at::zeros({N, M}, boxes1.options()); if (vols.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(vols, ious); } const size_t blocks = 512; const size_t threads = 256; hipLaunchKernelGGL(( IoUBox3DKernel), dim3(blocks), dim3(threads), 0, stream, boxes1.packed_accessor64<float, 3, at::RestrictPtrTraits>(), boxes2.packed_accessor64<float, 3, at::RestrictPtrTraits>(), vols.packed_accessor64<float, 2, at::RestrictPtrTraits>(), ious.packed_accessor64<float, 2, at::RestrictPtrTraits>()); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(vols, ious); }
b79bee0bb1c07252633562108caf7b9bff182ca4.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <thrust/device_vector.h> #include <thrust/tuple.h> #include "iou_box3d/iou_utils.cuh" // Parallelize over N*M computations which can each be done // independently __global__ void IoUBox3DKernel( const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> boxes1, const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> boxes2, at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> vols, at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> ious) { const size_t N = boxes1.size(0); const size_t M = boxes2.size(0); const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = gridDim.x * blockDim.x; std::array<FaceVerts, NUM_TRIS> box1_tris{}; std::array<FaceVerts, NUM_TRIS> box2_tris{}; std::array<FaceVerts, NUM_PLANES> box1_planes{}; std::array<FaceVerts, NUM_PLANES> box2_planes{}; for (size_t i = tid; i < N * M; i += stride) { const size_t n = i / M; // box1 index const size_t m = i % M; // box2 index // Convert to array of structs of face vertices i.e. effectively (F, 3, 3) // FaceVerts is a data type defined in iou_utils.cuh GetBoxTris(boxes1[n], box1_tris); GetBoxTris(boxes2[m], box2_tris); // Calculate the position of the center of the box which is used in // several calculations. This requires a tensor as input. const float3 box1_center = BoxCenter(boxes1[n]); const float3 box2_center = BoxCenter(boxes2[m]); // Convert to an array of face vertices GetBoxPlanes(boxes1[n], box1_planes); GetBoxPlanes(boxes2[m], box2_planes); // Get Box Volumes const float box1_vol = BoxVolume(box1_tris, box1_center, NUM_TRIS); const float box2_vol = BoxVolume(box2_tris, box2_center, NUM_TRIS); // Tris in Box1 intersection with Planes in Box2 // Initialize box1 intersecting faces. MAX_TRIS is the // max faces possible in the intersecting shape. // TODO: determine if the value of MAX_TRIS is sufficient or // if we should store the max tris for each NxM computation // and throw an error if any exceeds the max. FaceVerts box1_intersect[MAX_TRIS]; for (int j = 0; j < NUM_TRIS; ++j) { // Initialize the faces from the box box1_intersect[j] = box1_tris[j]; } // Get the count of the actual number of faces in the intersecting shape int box1_count = BoxIntersections(box2_planes, box2_center, box1_intersect); // Tris in Box2 intersection with Planes in Box1 FaceVerts box2_intersect[MAX_TRIS]; for (int j = 0; j < NUM_TRIS; ++j) { box2_intersect[j] = box2_tris[j]; } const int box2_count = BoxIntersections(box1_planes, box1_center, box2_intersect); // If there are overlapping regions in Box2, remove any coplanar faces if (box2_count > 0) { // Identify if any triangles in Box2 are coplanar with Box1 Keep tri2_keep[MAX_TRIS]; for (int j = 0; j < MAX_TRIS; ++j) { // Initialize the valid faces to be true tri2_keep[j].keep = j < box2_count ? true : false; } for (int b1 = 0; b1 < box1_count; ++b1) { for (int b2 = 0; b2 < box2_count; ++b2) { const bool is_coplanar = IsCoplanarTriTri(box1_intersect[b1], box2_intersect[b2]); const float area = FaceArea(box1_intersect[b1]); if ((is_coplanar) && (area > aEpsilon)) { tri2_keep[b2].keep = false; } } } // Keep only the non coplanar triangles in Box2 - add them to the // Box1 triangles. for (int b2 = 0; b2 < box2_count; ++b2) { if (tri2_keep[b2].keep) { box1_intersect[box1_count] = box2_intersect[b2]; // box1_count will determine the total faces in the // intersecting shape box1_count++; } } } // Initialize the vol and iou to 0.0 in case there are no triangles // in the intersecting shape. float vol = 0.0; float iou = 0.0; // If there are triangles in the intersecting shape if (box1_count > 0) { // The intersecting shape is a polyhedron made up of the // triangular faces that are all now in box1_intersect. // Calculate the polyhedron center const float3 poly_center = PolyhedronCenter(box1_intersect, box1_count); // Compute intersecting polyhedron volume vol = BoxVolume(box1_intersect, poly_center, box1_count); // Compute IoU iou = vol / (box1_vol + box2_vol - vol); } // Write the volume and IoU to global memory vols[n][m] = vol; ious[n][m] = iou; } } std::tuple<at::Tensor, at::Tensor> IoUBox3DCuda( const at::Tensor& boxes1, // (N, 8, 3) const at::Tensor& boxes2) { // (M, 8, 3) // Check inputs are on the same device at::TensorArg boxes1_t{boxes1, "boxes1", 1}, boxes2_t{boxes2, "boxes2", 2}; at::CheckedFrom c = "IoUBox3DCuda"; at::checkAllSameGPU(c, {boxes1_t, boxes2_t}); at::checkAllSameType(c, {boxes1_t, boxes2_t}); // Set the device for the kernel launch based on the device of boxes1 at::cuda::CUDAGuard device_guard(boxes1.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); TORCH_CHECK(boxes2.size(2) == boxes1.size(2), "Boxes must have shape (8, 3)"); TORCH_CHECK( (boxes2.size(1) == 8) && (boxes1.size(1) == 8), "Boxes must have shape (8, 3)"); const int64_t N = boxes1.size(0); const int64_t M = boxes2.size(0); auto vols = at::zeros({N, M}, boxes1.options()); auto ious = at::zeros({N, M}, boxes1.options()); if (vols.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(vols, ious); } const size_t blocks = 512; const size_t threads = 256; IoUBox3DKernel<<<blocks, threads, 0, stream>>>( boxes1.packed_accessor64<float, 3, at::RestrictPtrTraits>(), boxes2.packed_accessor64<float, 3, at::RestrictPtrTraits>(), vols.packed_accessor64<float, 2, at::RestrictPtrTraits>(), ious.packed_accessor64<float, 2, at::RestrictPtrTraits>()); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(vols, ious); }
8f8f8a6bd97818c3d6d9a39d5f6759320808149f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gauge_field.h> #include <color_spinor_field.h> #include <uint_to_char.h> #include <worker.h> #include <tune_quda.h> #include <jitify_helper.cuh> #include <kernels/dslash_coarse.cuh> namespace quda { #ifdef GPU_MULTIGRID template <typename Float, typename yFloat, typename ghostFloat, int nDim, int Ns, int Nc, int Mc, bool dslash, bool clover, bool dagger, DslashType type> class DslashCoarse : public TunableVectorY { protected: ColorSpinorField &out; const ColorSpinorField &inA; const ColorSpinorField &inB; const GaugeField &Y; const GaugeField &X; const double kappa; const int parity; const int nParity; const int nSrc; const int max_color_col_stride = 8; mutable int color_col_stride; mutable int dim_threads; char *saveOut; long long flops() const { return ((dslash*2*nDim+clover*1)*(8*Ns*Nc*Ns*Nc)-2*Ns*Nc)*nParity*(long long)out.VolumeCB(); } long long bytes() const { return (dslash||clover) * out.Bytes() + dslash*8*inA.Bytes() + clover*inB.Bytes() + nSrc*nParity*(dslash*Y.Bytes()*Y.VolumeCB()/(2*Y.Stride()) + clover*X.Bytes()/2); } unsigned int sharedBytesPerThread() const { return (sizeof(complex<Float>) * Mc); } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneGridDim() const { return false; } // Don't tune the grid dimensions bool tuneAuxDim() const { return true; } // Do tune the aux dimensions unsigned int minThreads() const { return color_col_stride * X.VolumeCB(); } // 4-d volume since this x threads only bool advanceBlockDim(TuneParam &param) const { dim3 grid = param.grid; bool ret = TunableVectorY::advanceBlockDim(param); param.grid.z = grid.z; if (ret) { // we advanced the block.x so we're done return true; } else { // block.x (spacetime) was reset // let's try to advance spin/block-color while(param.block.z <= (unsigned int)(dim_threads * 2 * 2 * (Nc/Mc))) { param.block.z+=dim_threads * 2; if ( (dim_threads*2*2*(Nc/Mc)) % param.block.z == 0) { param.grid.z = (dim_threads * 2 * 2 * (Nc/Mc)) / param.block.z; break; } } // we can advance spin/block-color since this is valid if (param.block.z <= (unsigned int)(dim_threads * 2 * 2 * (Nc/Mc)) && param.block.z <= (unsigned int)deviceProp.maxThreadsDim[2] && param.block.x*param.block.y*param.block.z <= (unsigned int)deviceProp.maxThreadsPerBlock ) { // return true; } else { // we have run off the end so let's reset param.block.z = dim_threads * 2; param.grid.z = 2 * (Nc/Mc); return false; } } } // FIXME: understand why this leads to slower perf and variable correctness //int blockStep() const { return deviceProp.warpSize/4; } //int blockMin() const { return deviceProp.warpSize/4; } // Experimental autotuning of the color column stride bool advanceAux(TuneParam &param) const { #ifdef DOT_PRODUCT_SPLIT // we can only split the dot product on Kepler and later since we need the __shfl instruction if (2*param.aux.x <= max_color_col_stride && Nc % (2*param.aux.x) == 0 && param.block.x % deviceProp.warpSize == 0) { // An x-dimension block size that is not a multiple of the // warp size is incompatible with splitting the dot product // across the warp so we must skip this param.aux.x *= 2; // safe to advance color_col_stride = param.aux.x; // recompute grid size since minThreads() has now been updated param.grid.x = (minThreads()+param.block.x-1)/param.block.x; // check this grid size is valid before returning if (param.grid.x < (unsigned int)deviceProp.maxGridSize[0]) return true; } #endif // reset color column stride if too large or not divisible param.aux.x = 1; color_col_stride = param.aux.x; // recompute grid size since minThreads() has now been updated param.grid.x = (minThreads()+param.block.x-1)/param.block.x; if (2*param.aux.y <= nDim && param.block.x*param.block.y*dim_threads*2 <= (unsigned int)deviceProp.maxThreadsPerBlock) { param.aux.y *= 2; dim_threads = param.aux.y; // need to reset z-block/grid size/shared_bytes since dim_threads has changed param.block.z = dim_threads * 2; param.grid.z = 2* (Nc / Mc); param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ? sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param); return true; } else { param.aux.y = 1; dim_threads = param.aux.y; // need to reset z-block/grid size/shared_bytes since // dim_threads has changed. Strictly speaking this isn't needed // since this is the outer dimension to tune, but would be // needed if we added an aux.z tuning dimension param.block.z = dim_threads * 2; param.grid.z = 2* (Nc / Mc); param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ? sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param); return false; } } virtual void initTuneParam(TuneParam &param) const { param.aux = make_int4(1,1,1,1); color_col_stride = param.aux.x; dim_threads = param.aux.y; TunableVectorY::initTuneParam(param); param.block.z = dim_threads * 2; param.grid.z = 2*(Nc/Mc); param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ? sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param); } /** sets default values for when tuning is disabled */ virtual void defaultTuneParam(TuneParam &param) const { param.aux = make_int4(1,1,1,1); color_col_stride = param.aux.x; dim_threads = param.aux.y; TunableVectorY::defaultTuneParam(param); // ensure that the default x block size is divisible by the warpSize param.block.x = deviceProp.warpSize; param.grid.x = (minThreads()+param.block.x-1)/param.block.x; param.block.z = dim_threads * 2; param.grid.z = 2*(Nc/Mc); param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ? sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param); } public: inline DslashCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB, const GaugeField &Y, const GaugeField &X, double kappa, int parity, MemoryLocation *halo_location) : TunableVectorY(out.SiteSubset() * (out.Ndim()==5 ? out.X(4) : 1)), out(out), inA(inA), inB(inB), Y(Y), X(X), kappa(kappa), parity(parity), nParity(out.SiteSubset()), nSrc(out.Ndim()==5 ? out.X(4) : 1) { strcpy(aux, "policy_kernel,"); if (out.Location() == QUDA_CUDA_FIELD_LOCATION) { #ifdef JITIFY create_jitify_program("kernels/dslash_coarse.cuh"); #endif } strcat(aux, compile_type_str(out)); strcat(aux, out.AuxString()); strcat(aux, comm_dim_partitioned_string()); // record the location of where each pack buffer is in [2*dim+dir] ordering // 0 - no packing // 1 - pack to local GPU memory // 2 - pack to local mapped CPU memory // 3 - pack to remote mapped GPU memory switch(type) { case DSLASH_INTERIOR: strcat(aux,",interior"); break; case DSLASH_EXTERIOR: strcat(aux,",exterior"); break; case DSLASH_FULL: strcat(aux,",full"); break; } if (doHalo<type>()) { char label[15] = ",halo="; for (int dim=0; dim<4; dim++) { for (int dir=0; dir<2; dir++) { label[2*dim+dir+6] = !comm_dim_partitioned(dim) ? '0' : halo_location[2*dim+dir] == Device ? '1' : halo_location[2*dim+dir] == Host ? '2' : '3'; } } label[14] = '\0'; strcat(aux,label); } } virtual ~DslashCoarse() { } inline void apply(const hipStream_t &stream) { if (out.Location() == QUDA_CPU_FIELD_LOCATION) { if (out.FieldOrder() != QUDA_SPACE_SPIN_COLOR_FIELD_ORDER || Y.FieldOrder() != QUDA_QDP_GAUGE_ORDER) errorQuda("Unsupported field order colorspinor=%d gauge=%d combination\n", inA.FieldOrder(), Y.FieldOrder()); DslashCoarseArg<Float,yFloat,ghostFloat,Ns,Nc,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER,QUDA_QDP_GAUGE_ORDER> arg(out, inA, inB, Y, X, (Float)kappa, parity); coarseDslash<Float,nDim,Ns,Nc,Mc,dslash,clover,dagger,type>(arg); } else { const TuneParam &tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (out.FieldOrder() != QUDA_FLOAT2_FIELD_ORDER || Y.FieldOrder() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported field order colorspinor=%d gauge=%d combination\n", inA.FieldOrder(), Y.FieldOrder()); typedef DslashCoarseArg<Float,yFloat,ghostFloat,Ns,Nc,QUDA_FLOAT2_FIELD_ORDER,QUDA_FLOAT2_GAUGE_ORDER> Arg; Arg arg(out, inA, inB, Y, X, (Float)kappa, parity); #ifdef JITIFY using namespace jitify::reflection; jitify_error = program->kernel("quda::coarseDslashKernel") .instantiate(Type<Float>(),nDim,Ns,Nc,Mc,(int)tp.aux.x,(int)tp.aux.y,dslash,clover,dagger,type,Type<Arg>()) .configure(tp.grid,tp.block,tp.shared_bytes,stream).launch(arg); #else switch (tp.aux.y) { // dimension gather parallelisation case 1: switch (tp.aux.x) { // this is color_col_stride case 1: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,1,1,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; #ifdef DOT_PRODUCT_SPLIT case 2: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,2,1,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case 4: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,4,1,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case 8: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,8,1,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; #endif // DOT_PRODUCT_SPLIT default: errorQuda("Color column stride %d not valid", tp.aux.x); } break; case 2: switch (tp.aux.x) { // this is color_col_stride case 1: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,1,2,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; #ifdef DOT_PRODUCT_SPLIT case 2: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,2,2,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case 4: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,4,2,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case 8: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,8,2,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; #endif // DOT_PRODUCT_SPLIT default: errorQuda("Color column stride %d not valid", tp.aux.x); } break; case 4: switch (tp.aux.x) { // this is color_col_stride case 1: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,1,4,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; #ifdef DOT_PRODUCT_SPLIT case 2: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,2,4,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case 4: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,4,4,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case 8: hipLaunchKernelGGL(( coarseDslashKernel<Float,nDim,Ns,Nc,Mc,8,4,dslash,clover,dagger,type>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; #endif // DOT_PRODUCT_SPLIT default: errorQuda("Color column stride %d not valid", tp.aux.x); } break; default: errorQuda("Invalid dimension thread splitting %d", tp.aux.y); } #endif } } TuneKey tuneKey() const { return TuneKey(out.VolString(), typeid(*this).name(), aux); } void preTune() { saveOut = new char[out.Bytes()]; hipMemcpy(saveOut, out.V(), out.Bytes(), hipMemcpyDeviceToHost); } void postTune() { hipMemcpy(out.V(), saveOut, out.Bytes(), hipMemcpyHostToDevice); delete[] saveOut; } }; template <typename Float, typename yFloat, typename ghostFloat, int coarseColor, int coarseSpin> inline void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB, const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover, bool dagger, DslashType type, MemoryLocation *halo_location) { const int colors_per_thread = 1; const int nDim = 4; if (dagger) { if (dslash) { if (clover) { if (type == DSLASH_FULL) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,true,true,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else if (type == DSLASH_INTERIOR) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,true,true,DSLASH_INTERIOR> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Dslash type %d not instantiated", type); } } else { // plain dslash if (type == DSLASH_FULL) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,false,true,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else if (type == DSLASH_INTERIOR) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,false,true,DSLASH_INTERIOR> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Dslash type %d not instantiated", type); } } } else { if (type == DSLASH_EXTERIOR) errorQuda("Cannot call halo on pure clover kernel"); if (clover) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,false,true,true,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Unsupported dslash=false clover=false"); } } } else { if (dslash) { if (clover) { if (type == DSLASH_FULL) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,true,false,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else if (type == DSLASH_INTERIOR) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,true,false,DSLASH_INTERIOR> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Dslash type %d not instantiated", type); } } else { // plain dslash if (type == DSLASH_FULL) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,false,false,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else if (type == DSLASH_INTERIOR) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,false,false,DSLASH_INTERIOR> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Dslash type %d not instantiated", type); } } } else { if (type == DSLASH_EXTERIOR) errorQuda("Cannot call halo on pure clover kernel"); if (clover) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,false,true,false,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Unsupported dslash=false clover=false"); } } } } // template on the number of coarse colors template <typename Float, typename yFloat, typename ghostFloat> inline void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB, const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover, bool dagger, DslashType type, MemoryLocation *halo_location) { if (Y.FieldOrder() != X.FieldOrder()) errorQuda("Field order mismatch Y = %d, X = %d", Y.FieldOrder(), X.FieldOrder()); if (inA.FieldOrder() != out.FieldOrder()) errorQuda("Field order mismatch inA = %d, out = %d", inA.FieldOrder(), out.FieldOrder()); if (inA.Nspin() != 2) errorQuda("Unsupported number of coarse spins %d\n",inA.Nspin()); #if 0 } else if (inA.Ncolor() == 4) { ApplyCoarse<Float,yFloat,ghostFloat,4,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); #endif if (inA.Ncolor() == 6) { // free field Wilson ApplyCoarse<Float,yFloat,ghostFloat,6,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); #if 0 } else if (inA.Ncolor() == 8) { ApplyCoarse<Float,yFloat,ghostFloat,8,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); } else if (inA.Ncolor() == 12) { ApplyCoarse<Float,yFloat,ghostFloat,12,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); } else if (inA.Ncolor() == 16) { ApplyCoarse<Float,yFloat,ghostFloat,16,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); } else if (inA.Ncolor() == 20) { ApplyCoarse<Float,yFloat,ghostFloat,20,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); #endif } else if (inA.Ncolor() == 24) { ApplyCoarse<Float,yFloat,ghostFloat,24,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); #if 0 } else if (inA.Ncolor() == 28) { ApplyCoarse<Float,yFloat,ghostFloat,28,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); #endif } else if (inA.Ncolor() == 32) { ApplyCoarse<Float,yFloat,ghostFloat,32,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); } else { errorQuda("Unsupported number of coarse dof %d\n", Y.Ncolor()); } } // this is the Worker pointer that may have issue additional work // while we're waiting on communication to finish namespace dslash { extern Worker* aux_worker; } #endif // GPU_MULTIGRID enum class DslashCoarsePolicy { DSLASH_COARSE_BASIC, // stage both sends and recvs in host memory using memcpys DSLASH_COARSE_ZERO_COPY_PACK, // zero copy write pack buffers DSLASH_COARSE_ZERO_COPY_READ, // zero copy read halos in dslash kernel DSLASH_COARSE_ZERO_COPY, // full zero copy DSLASH_COARSE_GDR_SEND, // GDR send DSLASH_COARSE_GDR_RECV, // GDR recv DSLASH_COARSE_GDR, // full GDR DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV, // zero copy write and GDR recv DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ, // GDR send and zero copy read DSLASH_COARSE_POLICY_DISABLED }; struct DslashCoarseLaunch { ColorSpinorField &out; const ColorSpinorField &inA; const ColorSpinorField &inB; const GaugeField &Y; const GaugeField &X; double kappa; int parity; bool dslash; bool clover; bool dagger; const int *commDim; const QudaPrecision halo_precision; inline DslashCoarseLaunch(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB, const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover, bool dagger, const int *commDim, QudaPrecision halo_precision) : out(out), inA(inA), inB(inB), Y(Y), X(X), kappa(kappa), parity(parity), dslash(dslash), clover(clover), dagger(dagger), commDim(commDim), halo_precision(halo_precision == QUDA_INVALID_PRECISION ? Y.Precision() : halo_precision) { } /** @brief Execute the coarse dslash using the given policy */ inline void operator()(DslashCoarsePolicy policy) { #ifdef GPU_MULTIGRID if (inA.V() == out.V()) errorQuda("Aliasing pointers"); // check all precisions match QudaPrecision precision = checkPrecision(out, inA, inB); checkPrecision(Y, X); // check all locations match checkLocation(out, inA, inB, Y, X); int comm_sum = 4; if (commDim) for (int i=0; i<4; i++) comm_sum -= (1-commDim[i]); if (comm_sum != 4 && comm_sum != 0) errorQuda("Unsupported comms %d", comm_sum); bool comms = comm_sum; MemoryLocation pack_destination[2*QUDA_MAX_DIM]; // where we will pack the ghost buffer to MemoryLocation halo_location[2*QUDA_MAX_DIM]; // where we load the halo from for (int i=0; i<2*QUDA_MAX_DIM; i++) { pack_destination[i] = (policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV) ? Host : Device; halo_location[i] = (policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_READ || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY || policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ) ? Host : Device; } bool gdr_send = (policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND || policy == DslashCoarsePolicy::DSLASH_COARSE_GDR || policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ) ? true : false; bool gdr_recv = (policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_RECV || policy == DslashCoarsePolicy::DSLASH_COARSE_GDR || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV) ? true : false; // disable peer-to-peer if doing a zero-copy policy (temporary) if ( policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_READ || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV || policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ) comm_enable_peer2peer(false); if (dslash && comm_partitioned() && comms) { const int nFace = 1; inA.exchangeGhost((QudaParity)(inA.SiteSubset() == QUDA_PARITY_SITE_SUBSET ? (1 - parity) : 0), nFace, dagger, pack_destination, halo_location, gdr_send, gdr_recv, halo_precision); } if (dslash::aux_worker) dslash::aux_worker->apply(0); if (precision == QUDA_DOUBLE_PRECISION) { #ifdef GPU_MULTIGRID_DOUBLE if (Y.Precision() != QUDA_DOUBLE_PRECISION) errorQuda("Y Precision %d not supported", Y.Precision()); if (halo_precision != QUDA_DOUBLE_PRECISION) errorQuda("Halo precision %d not supported with field precision %d and link precision %d", halo_precision, precision, Y.Precision()); ApplyCoarse<double,double,double>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, comms ? DSLASH_FULL : DSLASH_INTERIOR, halo_location); //if (dslash && comm_partitioned()) ApplyCoarse<double>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, true, halo_location); #else errorQuda("Double precision multigrid has not been enabled"); #endif } else if (precision == QUDA_SINGLE_PRECISION) { if (Y.Precision() == QUDA_SINGLE_PRECISION) { if (halo_precision == QUDA_SINGLE_PRECISION) { ApplyCoarse<float,float,float>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, comms ? DSLASH_FULL : DSLASH_INTERIOR, halo_location); } else { errorQuda("Halo precision %d not supported with field precision %d and link precision %d", halo_precision, precision, Y.Precision()); } } else if (Y.Precision() == QUDA_HALF_PRECISION) { #if QUDA_PRECISION & 2 if (halo_precision == QUDA_HALF_PRECISION) { ApplyCoarse<float,short,short>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, comms ? DSLASH_FULL : DSLASH_INTERIOR, halo_location); } else if (halo_precision == QUDA_QUARTER_PRECISION) { #if QUDA_PRECISION & 1 ApplyCoarse<float,short,char>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, comms ? DSLASH_FULL : DSLASH_INTERIOR, halo_location); #else errorQuda("QUDA_PRECISION=%d does not enable quarter precision", QUDA_PRECISION); #endif } else { errorQuda("Halo precision %d not supported with field precision %d and link precision %d", halo_precision, precision, Y.Precision()); } #else errorQuda("QUDA_PRECISION=%d does not enable half precision", QUDA_PRECISION); #endif } else { errorQuda("Unsupported precision %d\n", Y.Precision()); } //if (dslash && comm_partitioned()) ApplyCoarse<float>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, true, halo_location); } else { errorQuda("Unsupported precision %d\n", Y.Precision()); } if (dslash && comm_partitioned() && comms) inA.bufferIndex = (1 - inA.bufferIndex); comm_enable_peer2peer(true); #else errorQuda("Multigrid has not been built"); #endif } }; static bool dslash_init = false; static std::vector<DslashCoarsePolicy> policies(static_cast<int>(DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED), DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED); static int first_active_policy=static_cast<int>(DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED); // string used as a tunekey to ensure we retune if the dslash policy env changes static char policy_string[TuneKey::aux_n]; void enable_policy(DslashCoarsePolicy p){ policies[static_cast<std::size_t>(p)] = p; } void disable_policy(DslashCoarsePolicy p){ policies[static_cast<std::size_t>(p)] = DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED; } class DslashCoarsePolicyTune : public Tunable { DslashCoarseLaunch &dslash; bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. bool tuneAuxDim() const { return true; } // Do tune the aux dimensions. unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } public: inline DslashCoarsePolicyTune(DslashCoarseLaunch &dslash) : dslash(dslash) { if (!dslash_init) { static char *dslash_policy_env = getenv("QUDA_ENABLE_DSLASH_COARSE_POLICY"); if (dslash_policy_env) { // set the policies to tune for explicitly std::stringstream policy_list(dslash_policy_env); int policy_; while (policy_list >> policy_) { DslashCoarsePolicy dslash_policy = static_cast<DslashCoarsePolicy>(policy_); // check this is a valid policy choice if ( (dslash_policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND || dslash_policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_RECV || dslash_policy == DslashCoarsePolicy::DSLASH_COARSE_GDR || dslash_policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV || dslash_policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ) && !comm_gdr_enabled() ) { errorQuda("Cannot select a GDR policy %d unless QUDA_ENABLE_GDR is set", static_cast<int>(dslash_policy)); } enable_policy(dslash_policy); first_active_policy = policy_ < first_active_policy ? policy_ : first_active_policy; if (policy_list.peek() == ',') policy_list.ignore(); } if(first_active_policy == static_cast<int>(DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED)) errorQuda("No valid policy found in QUDA_ENABLE_DSLASH_COARSE_POLICY"); } else { first_active_policy = 0; enable_policy(DslashCoarsePolicy::DSLASH_COARSE_BASIC); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_READ); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY); if (comm_gdr_enabled()) { enable_policy(DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_GDR_RECV); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_GDR); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ); } } // construct string specifying which policies have been enabled strcat(policy_string, ",pol="); for (int i = 0; i < (int)DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED; i++) { strcat(policy_string, (int)policies[i] == i ? "1" : "0"); } dslash_init = true; } strcpy(aux, "policy,"); if (dslash.dslash) strcat(aux, "dslash"); strcat(aux, dslash.clover ? "clover," : ","); strcat(aux, dslash.inA.AuxString()); strcat(aux, ",gauge_prec="); char prec_str[8]; i32toa(prec_str, dslash.Y.Precision()); strcat(aux, prec_str); strcat(aux, ",halo_prec="); i32toa(prec_str, dslash.halo_precision); strcat(aux, prec_str); strcat(aux, comm_dim_partitioned_string(dslash.commDim)); strcat(aux, comm_dim_topology_string()); strcat(aux, comm_config_string()); // and change in P2P/GDR will be stored as a separate tunecache entry strcat(aux, policy_string); // any change in policies enabled will be stored as a separate entry int comm_sum = 4; if (dslash.commDim) for (int i = 0; i < 4; i++) comm_sum -= (1 - dslash.commDim[i]); strcat(aux, comm_sum ? ",full" : ",interior"); // before we do policy tuning we must ensure the kernel // constituents have been tuned since we can't do nested tuning if (getTuning() && getTuneCache().find(tuneKey()) == getTuneCache().end()) { disableProfileCount(); for (auto &i : policies) if(i!= DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED) dslash(i); enableProfileCount(); setPolicyTuning(true); } } virtual ~DslashCoarsePolicyTune() { setPolicyTuning(false); } inline void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (tp.aux.x >= (int)policies.size()) errorQuda("Requested policy that is outside of range"); if (policies[tp.aux.x] == DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED ) errorQuda("Requested policy is disabled"); dslash(policies[tp.aux.x]); } int tuningIter() const { return 10; } bool advanceAux(TuneParam &param) const { while ((unsigned)param.aux.x < policies.size()-1) { param.aux.x++; if(policies[param.aux.x] != DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED) return true; } param.aux.x = 0; return false; } bool advanceTuneParam(TuneParam &param) const { return advanceAux(param); } void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.aux.x = first_active_policy; param.aux.y = 0; param.aux.z = 0; param.aux.w = 0; } void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.aux.x = first_active_policy; param.aux.y = 0; param.aux.z = 0; param.aux.w = 0; } TuneKey tuneKey() const { return TuneKey(dslash.inA.VolString(), typeid(*this).name(), aux); } long long flops() const { int nDim = 4; int Ns = dslash.inA.Nspin(); int Nc = dslash.inA.Ncolor(); int nParity = dslash.inA.SiteSubset(); int volumeCB = dslash.inA.VolumeCB(); return ((dslash.dslash*2*nDim+dslash.clover*1)*(8*Ns*Nc*Ns*Nc)-2*Ns*Nc)*nParity*volumeCB; } long long bytes() const { int nParity = dslash.inA.SiteSubset(); return (dslash.dslash||dslash.clover) * dslash.out.Bytes() + dslash.dslash*8*dslash.inA.Bytes() + dslash.clover*dslash.inB.Bytes() + nParity*(dslash.dslash*dslash.Y.Bytes()*dslash.Y.VolumeCB()/(2*dslash.Y.Stride()) + dslash.clover*dslash.X.Bytes()/2); // multiply Y by volume / stride to correct for pad } }; //Apply the coarse Dirac matrix to a coarse grid vector //out(x) = M*in = X*in - kappa*\sum_mu Y_{-\mu}(x)in(x+mu) + Y^\dagger_mu(x-mu)in(x-mu) // or //out(x) = M^dagger*in = X^dagger*in - kappa*\sum_mu Y^\dagger_{-\mu}(x)in(x+mu) + Y_mu(x-mu)in(x-mu) //Uses the kappa normalization for the Wilson operator. void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB, const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover, bool dagger, const int *commDim, QudaPrecision halo_precision) { DslashCoarseLaunch Dslash(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, commDim, halo_precision); DslashCoarsePolicyTune policy(Dslash); policy.apply(0); }//ApplyCoarse } // namespace quda
8f8f8a6bd97818c3d6d9a39d5f6759320808149f.cu
#include <gauge_field.h> #include <color_spinor_field.h> #include <uint_to_char.h> #include <worker.h> #include <tune_quda.h> #include <jitify_helper.cuh> #include <kernels/dslash_coarse.cuh> namespace quda { #ifdef GPU_MULTIGRID template <typename Float, typename yFloat, typename ghostFloat, int nDim, int Ns, int Nc, int Mc, bool dslash, bool clover, bool dagger, DslashType type> class DslashCoarse : public TunableVectorY { protected: ColorSpinorField &out; const ColorSpinorField &inA; const ColorSpinorField &inB; const GaugeField &Y; const GaugeField &X; const double kappa; const int parity; const int nParity; const int nSrc; const int max_color_col_stride = 8; mutable int color_col_stride; mutable int dim_threads; char *saveOut; long long flops() const { return ((dslash*2*nDim+clover*1)*(8*Ns*Nc*Ns*Nc)-2*Ns*Nc)*nParity*(long long)out.VolumeCB(); } long long bytes() const { return (dslash||clover) * out.Bytes() + dslash*8*inA.Bytes() + clover*inB.Bytes() + nSrc*nParity*(dslash*Y.Bytes()*Y.VolumeCB()/(2*Y.Stride()) + clover*X.Bytes()/2); } unsigned int sharedBytesPerThread() const { return (sizeof(complex<Float>) * Mc); } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneGridDim() const { return false; } // Don't tune the grid dimensions bool tuneAuxDim() const { return true; } // Do tune the aux dimensions unsigned int minThreads() const { return color_col_stride * X.VolumeCB(); } // 4-d volume since this x threads only bool advanceBlockDim(TuneParam &param) const { dim3 grid = param.grid; bool ret = TunableVectorY::advanceBlockDim(param); param.grid.z = grid.z; if (ret) { // we advanced the block.x so we're done return true; } else { // block.x (spacetime) was reset // let's try to advance spin/block-color while(param.block.z <= (unsigned int)(dim_threads * 2 * 2 * (Nc/Mc))) { param.block.z+=dim_threads * 2; if ( (dim_threads*2*2*(Nc/Mc)) % param.block.z == 0) { param.grid.z = (dim_threads * 2 * 2 * (Nc/Mc)) / param.block.z; break; } } // we can advance spin/block-color since this is valid if (param.block.z <= (unsigned int)(dim_threads * 2 * 2 * (Nc/Mc)) && param.block.z <= (unsigned int)deviceProp.maxThreadsDim[2] && param.block.x*param.block.y*param.block.z <= (unsigned int)deviceProp.maxThreadsPerBlock ) { // return true; } else { // we have run off the end so let's reset param.block.z = dim_threads * 2; param.grid.z = 2 * (Nc/Mc); return false; } } } // FIXME: understand why this leads to slower perf and variable correctness //int blockStep() const { return deviceProp.warpSize/4; } //int blockMin() const { return deviceProp.warpSize/4; } // Experimental autotuning of the color column stride bool advanceAux(TuneParam &param) const { #ifdef DOT_PRODUCT_SPLIT // we can only split the dot product on Kepler and later since we need the __shfl instruction if (2*param.aux.x <= max_color_col_stride && Nc % (2*param.aux.x) == 0 && param.block.x % deviceProp.warpSize == 0) { // An x-dimension block size that is not a multiple of the // warp size is incompatible with splitting the dot product // across the warp so we must skip this param.aux.x *= 2; // safe to advance color_col_stride = param.aux.x; // recompute grid size since minThreads() has now been updated param.grid.x = (minThreads()+param.block.x-1)/param.block.x; // check this grid size is valid before returning if (param.grid.x < (unsigned int)deviceProp.maxGridSize[0]) return true; } #endif // reset color column stride if too large or not divisible param.aux.x = 1; color_col_stride = param.aux.x; // recompute grid size since minThreads() has now been updated param.grid.x = (minThreads()+param.block.x-1)/param.block.x; if (2*param.aux.y <= nDim && param.block.x*param.block.y*dim_threads*2 <= (unsigned int)deviceProp.maxThreadsPerBlock) { param.aux.y *= 2; dim_threads = param.aux.y; // need to reset z-block/grid size/shared_bytes since dim_threads has changed param.block.z = dim_threads * 2; param.grid.z = 2* (Nc / Mc); param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ? sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param); return true; } else { param.aux.y = 1; dim_threads = param.aux.y; // need to reset z-block/grid size/shared_bytes since // dim_threads has changed. Strictly speaking this isn't needed // since this is the outer dimension to tune, but would be // needed if we added an aux.z tuning dimension param.block.z = dim_threads * 2; param.grid.z = 2* (Nc / Mc); param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ? sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param); return false; } } virtual void initTuneParam(TuneParam &param) const { param.aux = make_int4(1,1,1,1); color_col_stride = param.aux.x; dim_threads = param.aux.y; TunableVectorY::initTuneParam(param); param.block.z = dim_threads * 2; param.grid.z = 2*(Nc/Mc); param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ? sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param); } /** sets default values for when tuning is disabled */ virtual void defaultTuneParam(TuneParam &param) const { param.aux = make_int4(1,1,1,1); color_col_stride = param.aux.x; dim_threads = param.aux.y; TunableVectorY::defaultTuneParam(param); // ensure that the default x block size is divisible by the warpSize param.block.x = deviceProp.warpSize; param.grid.x = (minThreads()+param.block.x-1)/param.block.x; param.block.z = dim_threads * 2; param.grid.z = 2*(Nc/Mc); param.shared_bytes = sharedBytesPerThread()*param.block.x*param.block.y*param.block.z > sharedBytesPerBlock(param) ? sharedBytesPerThread()*param.block.x*param.block.y*param.block.z : sharedBytesPerBlock(param); } public: inline DslashCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB, const GaugeField &Y, const GaugeField &X, double kappa, int parity, MemoryLocation *halo_location) : TunableVectorY(out.SiteSubset() * (out.Ndim()==5 ? out.X(4) : 1)), out(out), inA(inA), inB(inB), Y(Y), X(X), kappa(kappa), parity(parity), nParity(out.SiteSubset()), nSrc(out.Ndim()==5 ? out.X(4) : 1) { strcpy(aux, "policy_kernel,"); if (out.Location() == QUDA_CUDA_FIELD_LOCATION) { #ifdef JITIFY create_jitify_program("kernels/dslash_coarse.cuh"); #endif } strcat(aux, compile_type_str(out)); strcat(aux, out.AuxString()); strcat(aux, comm_dim_partitioned_string()); // record the location of where each pack buffer is in [2*dim+dir] ordering // 0 - no packing // 1 - pack to local GPU memory // 2 - pack to local mapped CPU memory // 3 - pack to remote mapped GPU memory switch(type) { case DSLASH_INTERIOR: strcat(aux,",interior"); break; case DSLASH_EXTERIOR: strcat(aux,",exterior"); break; case DSLASH_FULL: strcat(aux,",full"); break; } if (doHalo<type>()) { char label[15] = ",halo="; for (int dim=0; dim<4; dim++) { for (int dir=0; dir<2; dir++) { label[2*dim+dir+6] = !comm_dim_partitioned(dim) ? '0' : halo_location[2*dim+dir] == Device ? '1' : halo_location[2*dim+dir] == Host ? '2' : '3'; } } label[14] = '\0'; strcat(aux,label); } } virtual ~DslashCoarse() { } inline void apply(const cudaStream_t &stream) { if (out.Location() == QUDA_CPU_FIELD_LOCATION) { if (out.FieldOrder() != QUDA_SPACE_SPIN_COLOR_FIELD_ORDER || Y.FieldOrder() != QUDA_QDP_GAUGE_ORDER) errorQuda("Unsupported field order colorspinor=%d gauge=%d combination\n", inA.FieldOrder(), Y.FieldOrder()); DslashCoarseArg<Float,yFloat,ghostFloat,Ns,Nc,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER,QUDA_QDP_GAUGE_ORDER> arg(out, inA, inB, Y, X, (Float)kappa, parity); coarseDslash<Float,nDim,Ns,Nc,Mc,dslash,clover,dagger,type>(arg); } else { const TuneParam &tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (out.FieldOrder() != QUDA_FLOAT2_FIELD_ORDER || Y.FieldOrder() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported field order colorspinor=%d gauge=%d combination\n", inA.FieldOrder(), Y.FieldOrder()); typedef DslashCoarseArg<Float,yFloat,ghostFloat,Ns,Nc,QUDA_FLOAT2_FIELD_ORDER,QUDA_FLOAT2_GAUGE_ORDER> Arg; Arg arg(out, inA, inB, Y, X, (Float)kappa, parity); #ifdef JITIFY using namespace jitify::reflection; jitify_error = program->kernel("quda::coarseDslashKernel") .instantiate(Type<Float>(),nDim,Ns,Nc,Mc,(int)tp.aux.x,(int)tp.aux.y,dslash,clover,dagger,type,Type<Arg>()) .configure(tp.grid,tp.block,tp.shared_bytes,stream).launch(arg); #else switch (tp.aux.y) { // dimension gather parallelisation case 1: switch (tp.aux.x) { // this is color_col_stride case 1: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,1,1,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; #ifdef DOT_PRODUCT_SPLIT case 2: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,2,1,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case 4: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,4,1,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case 8: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,8,1,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; #endif // DOT_PRODUCT_SPLIT default: errorQuda("Color column stride %d not valid", tp.aux.x); } break; case 2: switch (tp.aux.x) { // this is color_col_stride case 1: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,1,2,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; #ifdef DOT_PRODUCT_SPLIT case 2: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,2,2,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case 4: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,4,2,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case 8: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,8,2,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; #endif // DOT_PRODUCT_SPLIT default: errorQuda("Color column stride %d not valid", tp.aux.x); } break; case 4: switch (tp.aux.x) { // this is color_col_stride case 1: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,1,4,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; #ifdef DOT_PRODUCT_SPLIT case 2: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,2,4,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case 4: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,4,4,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case 8: coarseDslashKernel<Float,nDim,Ns,Nc,Mc,8,4,dslash,clover,dagger,type> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; #endif // DOT_PRODUCT_SPLIT default: errorQuda("Color column stride %d not valid", tp.aux.x); } break; default: errorQuda("Invalid dimension thread splitting %d", tp.aux.y); } #endif } } TuneKey tuneKey() const { return TuneKey(out.VolString(), typeid(*this).name(), aux); } void preTune() { saveOut = new char[out.Bytes()]; cudaMemcpy(saveOut, out.V(), out.Bytes(), cudaMemcpyDeviceToHost); } void postTune() { cudaMemcpy(out.V(), saveOut, out.Bytes(), cudaMemcpyHostToDevice); delete[] saveOut; } }; template <typename Float, typename yFloat, typename ghostFloat, int coarseColor, int coarseSpin> inline void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB, const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover, bool dagger, DslashType type, MemoryLocation *halo_location) { const int colors_per_thread = 1; const int nDim = 4; if (dagger) { if (dslash) { if (clover) { if (type == DSLASH_FULL) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,true,true,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else if (type == DSLASH_INTERIOR) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,true,true,DSLASH_INTERIOR> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Dslash type %d not instantiated", type); } } else { // plain dslash if (type == DSLASH_FULL) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,false,true,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else if (type == DSLASH_INTERIOR) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,false,true,DSLASH_INTERIOR> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Dslash type %d not instantiated", type); } } } else { if (type == DSLASH_EXTERIOR) errorQuda("Cannot call halo on pure clover kernel"); if (clover) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,false,true,true,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Unsupported dslash=false clover=false"); } } } else { if (dslash) { if (clover) { if (type == DSLASH_FULL) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,true,false,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else if (type == DSLASH_INTERIOR) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,true,false,DSLASH_INTERIOR> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Dslash type %d not instantiated", type); } } else { // plain dslash if (type == DSLASH_FULL) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,false,false,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else if (type == DSLASH_INTERIOR) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,true,false,false,DSLASH_INTERIOR> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Dslash type %d not instantiated", type); } } } else { if (type == DSLASH_EXTERIOR) errorQuda("Cannot call halo on pure clover kernel"); if (clover) { DslashCoarse<Float,yFloat,ghostFloat,nDim,coarseSpin,coarseColor,colors_per_thread,false,true,false,DSLASH_FULL> dslash(out, inA, inB, Y, X, kappa, parity, halo_location); dslash.apply(0); } else { errorQuda("Unsupported dslash=false clover=false"); } } } } // template on the number of coarse colors template <typename Float, typename yFloat, typename ghostFloat> inline void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB, const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover, bool dagger, DslashType type, MemoryLocation *halo_location) { if (Y.FieldOrder() != X.FieldOrder()) errorQuda("Field order mismatch Y = %d, X = %d", Y.FieldOrder(), X.FieldOrder()); if (inA.FieldOrder() != out.FieldOrder()) errorQuda("Field order mismatch inA = %d, out = %d", inA.FieldOrder(), out.FieldOrder()); if (inA.Nspin() != 2) errorQuda("Unsupported number of coarse spins %d\n",inA.Nspin()); #if 0 } else if (inA.Ncolor() == 4) { ApplyCoarse<Float,yFloat,ghostFloat,4,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); #endif if (inA.Ncolor() == 6) { // free field Wilson ApplyCoarse<Float,yFloat,ghostFloat,6,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); #if 0 } else if (inA.Ncolor() == 8) { ApplyCoarse<Float,yFloat,ghostFloat,8,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); } else if (inA.Ncolor() == 12) { ApplyCoarse<Float,yFloat,ghostFloat,12,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); } else if (inA.Ncolor() == 16) { ApplyCoarse<Float,yFloat,ghostFloat,16,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); } else if (inA.Ncolor() == 20) { ApplyCoarse<Float,yFloat,ghostFloat,20,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); #endif } else if (inA.Ncolor() == 24) { ApplyCoarse<Float,yFloat,ghostFloat,24,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); #if 0 } else if (inA.Ncolor() == 28) { ApplyCoarse<Float,yFloat,ghostFloat,28,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); #endif } else if (inA.Ncolor() == 32) { ApplyCoarse<Float,yFloat,ghostFloat,32,2>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, type, halo_location); } else { errorQuda("Unsupported number of coarse dof %d\n", Y.Ncolor()); } } // this is the Worker pointer that may have issue additional work // while we're waiting on communication to finish namespace dslash { extern Worker* aux_worker; } #endif // GPU_MULTIGRID enum class DslashCoarsePolicy { DSLASH_COARSE_BASIC, // stage both sends and recvs in host memory using memcpys DSLASH_COARSE_ZERO_COPY_PACK, // zero copy write pack buffers DSLASH_COARSE_ZERO_COPY_READ, // zero copy read halos in dslash kernel DSLASH_COARSE_ZERO_COPY, // full zero copy DSLASH_COARSE_GDR_SEND, // GDR send DSLASH_COARSE_GDR_RECV, // GDR recv DSLASH_COARSE_GDR, // full GDR DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV, // zero copy write and GDR recv DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ, // GDR send and zero copy read DSLASH_COARSE_POLICY_DISABLED }; struct DslashCoarseLaunch { ColorSpinorField &out; const ColorSpinorField &inA; const ColorSpinorField &inB; const GaugeField &Y; const GaugeField &X; double kappa; int parity; bool dslash; bool clover; bool dagger; const int *commDim; const QudaPrecision halo_precision; inline DslashCoarseLaunch(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB, const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover, bool dagger, const int *commDim, QudaPrecision halo_precision) : out(out), inA(inA), inB(inB), Y(Y), X(X), kappa(kappa), parity(parity), dslash(dslash), clover(clover), dagger(dagger), commDim(commDim), halo_precision(halo_precision == QUDA_INVALID_PRECISION ? Y.Precision() : halo_precision) { } /** @brief Execute the coarse dslash using the given policy */ inline void operator()(DslashCoarsePolicy policy) { #ifdef GPU_MULTIGRID if (inA.V() == out.V()) errorQuda("Aliasing pointers"); // check all precisions match QudaPrecision precision = checkPrecision(out, inA, inB); checkPrecision(Y, X); // check all locations match checkLocation(out, inA, inB, Y, X); int comm_sum = 4; if (commDim) for (int i=0; i<4; i++) comm_sum -= (1-commDim[i]); if (comm_sum != 4 && comm_sum != 0) errorQuda("Unsupported comms %d", comm_sum); bool comms = comm_sum; MemoryLocation pack_destination[2*QUDA_MAX_DIM]; // where we will pack the ghost buffer to MemoryLocation halo_location[2*QUDA_MAX_DIM]; // where we load the halo from for (int i=0; i<2*QUDA_MAX_DIM; i++) { pack_destination[i] = (policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV) ? Host : Device; halo_location[i] = (policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_READ || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY || policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ) ? Host : Device; } bool gdr_send = (policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND || policy == DslashCoarsePolicy::DSLASH_COARSE_GDR || policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ) ? true : false; bool gdr_recv = (policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_RECV || policy == DslashCoarsePolicy::DSLASH_COARSE_GDR || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV) ? true : false; // disable peer-to-peer if doing a zero-copy policy (temporary) if ( policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_READ || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY || policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV || policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ) comm_enable_peer2peer(false); if (dslash && comm_partitioned() && comms) { const int nFace = 1; inA.exchangeGhost((QudaParity)(inA.SiteSubset() == QUDA_PARITY_SITE_SUBSET ? (1 - parity) : 0), nFace, dagger, pack_destination, halo_location, gdr_send, gdr_recv, halo_precision); } if (dslash::aux_worker) dslash::aux_worker->apply(0); if (precision == QUDA_DOUBLE_PRECISION) { #ifdef GPU_MULTIGRID_DOUBLE if (Y.Precision() != QUDA_DOUBLE_PRECISION) errorQuda("Y Precision %d not supported", Y.Precision()); if (halo_precision != QUDA_DOUBLE_PRECISION) errorQuda("Halo precision %d not supported with field precision %d and link precision %d", halo_precision, precision, Y.Precision()); ApplyCoarse<double,double,double>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, comms ? DSLASH_FULL : DSLASH_INTERIOR, halo_location); //if (dslash && comm_partitioned()) ApplyCoarse<double>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, true, halo_location); #else errorQuda("Double precision multigrid has not been enabled"); #endif } else if (precision == QUDA_SINGLE_PRECISION) { if (Y.Precision() == QUDA_SINGLE_PRECISION) { if (halo_precision == QUDA_SINGLE_PRECISION) { ApplyCoarse<float,float,float>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, comms ? DSLASH_FULL : DSLASH_INTERIOR, halo_location); } else { errorQuda("Halo precision %d not supported with field precision %d and link precision %d", halo_precision, precision, Y.Precision()); } } else if (Y.Precision() == QUDA_HALF_PRECISION) { #if QUDA_PRECISION & 2 if (halo_precision == QUDA_HALF_PRECISION) { ApplyCoarse<float,short,short>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, comms ? DSLASH_FULL : DSLASH_INTERIOR, halo_location); } else if (halo_precision == QUDA_QUARTER_PRECISION) { #if QUDA_PRECISION & 1 ApplyCoarse<float,short,char>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, comms ? DSLASH_FULL : DSLASH_INTERIOR, halo_location); #else errorQuda("QUDA_PRECISION=%d does not enable quarter precision", QUDA_PRECISION); #endif } else { errorQuda("Halo precision %d not supported with field precision %d and link precision %d", halo_precision, precision, Y.Precision()); } #else errorQuda("QUDA_PRECISION=%d does not enable half precision", QUDA_PRECISION); #endif } else { errorQuda("Unsupported precision %d\n", Y.Precision()); } //if (dslash && comm_partitioned()) ApplyCoarse<float>(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, true, halo_location); } else { errorQuda("Unsupported precision %d\n", Y.Precision()); } if (dslash && comm_partitioned() && comms) inA.bufferIndex = (1 - inA.bufferIndex); comm_enable_peer2peer(true); #else errorQuda("Multigrid has not been built"); #endif } }; static bool dslash_init = false; static std::vector<DslashCoarsePolicy> policies(static_cast<int>(DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED), DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED); static int first_active_policy=static_cast<int>(DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED); // string used as a tunekey to ensure we retune if the dslash policy env changes static char policy_string[TuneKey::aux_n]; void enable_policy(DslashCoarsePolicy p){ policies[static_cast<std::size_t>(p)] = p; } void disable_policy(DslashCoarsePolicy p){ policies[static_cast<std::size_t>(p)] = DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED; } class DslashCoarsePolicyTune : public Tunable { DslashCoarseLaunch &dslash; bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. bool tuneAuxDim() const { return true; } // Do tune the aux dimensions. unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } public: inline DslashCoarsePolicyTune(DslashCoarseLaunch &dslash) : dslash(dslash) { if (!dslash_init) { static char *dslash_policy_env = getenv("QUDA_ENABLE_DSLASH_COARSE_POLICY"); if (dslash_policy_env) { // set the policies to tune for explicitly std::stringstream policy_list(dslash_policy_env); int policy_; while (policy_list >> policy_) { DslashCoarsePolicy dslash_policy = static_cast<DslashCoarsePolicy>(policy_); // check this is a valid policy choice if ( (dslash_policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND || dslash_policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_RECV || dslash_policy == DslashCoarsePolicy::DSLASH_COARSE_GDR || dslash_policy == DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV || dslash_policy == DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ) && !comm_gdr_enabled() ) { errorQuda("Cannot select a GDR policy %d unless QUDA_ENABLE_GDR is set", static_cast<int>(dslash_policy)); } enable_policy(dslash_policy); first_active_policy = policy_ < first_active_policy ? policy_ : first_active_policy; if (policy_list.peek() == ',') policy_list.ignore(); } if(first_active_policy == static_cast<int>(DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED)) errorQuda("No valid policy found in QUDA_ENABLE_DSLASH_COARSE_POLICY"); } else { first_active_policy = 0; enable_policy(DslashCoarsePolicy::DSLASH_COARSE_BASIC); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_READ); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY); if (comm_gdr_enabled()) { enable_policy(DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_GDR_RECV); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_GDR); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_ZERO_COPY_PACK_GDR_RECV); enable_policy(DslashCoarsePolicy::DSLASH_COARSE_GDR_SEND_ZERO_COPY_READ); } } // construct string specifying which policies have been enabled strcat(policy_string, ",pol="); for (int i = 0; i < (int)DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED; i++) { strcat(policy_string, (int)policies[i] == i ? "1" : "0"); } dslash_init = true; } strcpy(aux, "policy,"); if (dslash.dslash) strcat(aux, "dslash"); strcat(aux, dslash.clover ? "clover," : ","); strcat(aux, dslash.inA.AuxString()); strcat(aux, ",gauge_prec="); char prec_str[8]; i32toa(prec_str, dslash.Y.Precision()); strcat(aux, prec_str); strcat(aux, ",halo_prec="); i32toa(prec_str, dslash.halo_precision); strcat(aux, prec_str); strcat(aux, comm_dim_partitioned_string(dslash.commDim)); strcat(aux, comm_dim_topology_string()); strcat(aux, comm_config_string()); // and change in P2P/GDR will be stored as a separate tunecache entry strcat(aux, policy_string); // any change in policies enabled will be stored as a separate entry int comm_sum = 4; if (dslash.commDim) for (int i = 0; i < 4; i++) comm_sum -= (1 - dslash.commDim[i]); strcat(aux, comm_sum ? ",full" : ",interior"); // before we do policy tuning we must ensure the kernel // constituents have been tuned since we can't do nested tuning if (getTuning() && getTuneCache().find(tuneKey()) == getTuneCache().end()) { disableProfileCount(); for (auto &i : policies) if(i!= DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED) dslash(i); enableProfileCount(); setPolicyTuning(true); } } virtual ~DslashCoarsePolicyTune() { setPolicyTuning(false); } inline void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (tp.aux.x >= (int)policies.size()) errorQuda("Requested policy that is outside of range"); if (policies[tp.aux.x] == DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED ) errorQuda("Requested policy is disabled"); dslash(policies[tp.aux.x]); } int tuningIter() const { return 10; } bool advanceAux(TuneParam &param) const { while ((unsigned)param.aux.x < policies.size()-1) { param.aux.x++; if(policies[param.aux.x] != DslashCoarsePolicy::DSLASH_COARSE_POLICY_DISABLED) return true; } param.aux.x = 0; return false; } bool advanceTuneParam(TuneParam &param) const { return advanceAux(param); } void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.aux.x = first_active_policy; param.aux.y = 0; param.aux.z = 0; param.aux.w = 0; } void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.aux.x = first_active_policy; param.aux.y = 0; param.aux.z = 0; param.aux.w = 0; } TuneKey tuneKey() const { return TuneKey(dslash.inA.VolString(), typeid(*this).name(), aux); } long long flops() const { int nDim = 4; int Ns = dslash.inA.Nspin(); int Nc = dslash.inA.Ncolor(); int nParity = dslash.inA.SiteSubset(); int volumeCB = dslash.inA.VolumeCB(); return ((dslash.dslash*2*nDim+dslash.clover*1)*(8*Ns*Nc*Ns*Nc)-2*Ns*Nc)*nParity*volumeCB; } long long bytes() const { int nParity = dslash.inA.SiteSubset(); return (dslash.dslash||dslash.clover) * dslash.out.Bytes() + dslash.dslash*8*dslash.inA.Bytes() + dslash.clover*dslash.inB.Bytes() + nParity*(dslash.dslash*dslash.Y.Bytes()*dslash.Y.VolumeCB()/(2*dslash.Y.Stride()) + dslash.clover*dslash.X.Bytes()/2); // multiply Y by volume / stride to correct for pad } }; //Apply the coarse Dirac matrix to a coarse grid vector //out(x) = M*in = X*in - kappa*\sum_mu Y_{-\mu}(x)in(x+mu) + Y^\dagger_mu(x-mu)in(x-mu) // or //out(x) = M^dagger*in = X^dagger*in - kappa*\sum_mu Y^\dagger_{-\mu}(x)in(x+mu) + Y_mu(x-mu)in(x-mu) //Uses the kappa normalization for the Wilson operator. void ApplyCoarse(ColorSpinorField &out, const ColorSpinorField &inA, const ColorSpinorField &inB, const GaugeField &Y, const GaugeField &X, double kappa, int parity, bool dslash, bool clover, bool dagger, const int *commDim, QudaPrecision halo_precision) { DslashCoarseLaunch Dslash(out, inA, inB, Y, X, kappa, parity, dslash, clover, dagger, commDim, halo_precision); DslashCoarsePolicyTune policy(Dslash); policy.apply(0); }//ApplyCoarse } // namespace quda
a808ce26a4df54ab60440fe9c2ffcc7b3736b134.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <quda_internal.h> #include <color_spinor_field.h> #include <blas_quda.h> #include <test_util.h> #include <face_quda.h> // include because of nasty globals used in the tests #include <dslash_util.h> // Wilson, clover-improved Wilson, and twisted mass are supported. extern QudaDslashType dslash_type; extern bool tune; extern int device; extern int xdim; extern int ydim; extern int zdim; extern int tdim; extern int gridsize_from_cmdline[]; extern int niter; extern void usage(char** ); const int Nkernels = 31; using namespace quda; cpuColorSpinorField *xH, *yH, *zH, *wH, *vH, *hH, *lH; cudaColorSpinorField *xD, *yD, *zD, *wD, *vD, *hD, *lD; int Nspin; void setPrec(ColorSpinorParam &param, const QudaPrecision precision) { param.precision = precision; if (Nspin == 1 || precision == QUDA_DOUBLE_PRECISION) { param.fieldOrder = QUDA_FLOAT2_FIELD_ORDER; } else { param.fieldOrder = QUDA_FLOAT4_FIELD_ORDER; } } void display_test_info() { printfQuda("running the following test:\n"); printfQuda("S_dimension T_dimension Nspin\n"); printfQuda("%d/%d/%d %d %d\n", xdim, ydim, zdim, tdim, Nspin); printfQuda("Grid partition info: X Y Z T\n"); printfQuda(" %d %d %d %d\n", commDimPartitioned(0), commDimPartitioned(1), commDimPartitioned(2), commDimPartitioned(3)); return; } void initFields(int prec) { // precisions used for the source field in the copyCuda() benchmark QudaPrecision high_aux_prec; QudaPrecision low_aux_prec; ColorSpinorParam param; param.nColor = 3; // set spin according to the type of dslash Nspin = (dslash_type == QUDA_ASQTAD_DSLASH) ? 1 : 4; param.nSpin = Nspin; param.nDim = 4; // number of spacetime dimensions param.pad = 0; // padding must be zero for cpu fields param.siteSubset = QUDA_PARITY_SITE_SUBSET; if (param.siteSubset == QUDA_PARITY_SITE_SUBSET) param.x[0] = xdim/2; else param.x[0] = xdim; param.x[1] = ydim; param.x[2] = zdim; param.x[3] = tdim; param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER; param.gammaBasis = QUDA_DEGRAND_ROSSI_GAMMA_BASIS; param.precision = QUDA_DOUBLE_PRECISION; param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER; param.create = QUDA_ZERO_FIELD_CREATE; vH = new cpuColorSpinorField(param); wH = new cpuColorSpinorField(param); xH = new cpuColorSpinorField(param); yH = new cpuColorSpinorField(param); zH = new cpuColorSpinorField(param); hH = new cpuColorSpinorField(param); lH = new cpuColorSpinorField(param); vH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); wH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); xH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); yH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); zH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); hH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); lH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); // Now set the parameters for the cuda fields param.pad = 0; //LX*LY*LZ/2; if (param.nSpin == 4) param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS; param.create = QUDA_ZERO_FIELD_CREATE; switch(prec) { case 0: setPrec(param, QUDA_HALF_PRECISION); high_aux_prec = QUDA_DOUBLE_PRECISION; low_aux_prec = QUDA_SINGLE_PRECISION; break; case 1: setPrec(param, QUDA_SINGLE_PRECISION); high_aux_prec = QUDA_DOUBLE_PRECISION; low_aux_prec = QUDA_HALF_PRECISION; break; case 2: setPrec(param, QUDA_DOUBLE_PRECISION); high_aux_prec = QUDA_SINGLE_PRECISION; low_aux_prec = QUDA_HALF_PRECISION; break; } checkCudaError(); vD = new cudaColorSpinorField(param); wD = new cudaColorSpinorField(param); xD = new cudaColorSpinorField(param); yD = new cudaColorSpinorField(param); zD = new cudaColorSpinorField(param); setPrec(param, high_aux_prec); hD = new cudaColorSpinorField(param); setPrec(param, low_aux_prec); lD = new cudaColorSpinorField(param); // check for successful allocation checkCudaError(); *vD = *vH; *wD = *wH; *xD = *xH; *yD = *yH; *zD = *zH; *hD = *hH; *lD = *lH; } void freeFields() { // release memory delete vD; delete wD; delete xD; delete yD; delete zD; delete hD; delete lD; // release memory delete vH; delete wH; delete xH; delete yH; delete zH; delete hH; delete lH; } double benchmark(int kernel, const int niter) { double a, b, c; quda::Complex a2, b2, c2; hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start, 0); for (int i=0; i < niter; ++i) { switch (kernel) { case 0: copyCuda(*yD, *hD); break; case 1: copyCuda(*yD, *lD); break; case 2: axpbyCuda(a, *xD, b, *yD); break; case 3: xpyCuda(*xD, *yD); break; case 4: axpyCuda(a, *xD, *yD); break; case 5: xpayCuda(*xD, a, *yD); break; case 6: mxpyCuda(*xD, *yD); break; case 7: axCuda(a, *xD); break; case 8: caxpyCuda(a2, *xD, *yD); break; case 9: caxpbyCuda(a2, *xD, b2, *yD); break; case 10: cxpaypbzCuda(*xD, a2, *yD, b2, *zD); break; case 11: axpyBzpcxCuda(a, *xD, *yD, b, *zD, c); break; case 12: axpyZpbxCuda(a, *xD, *yD, *zD, b); break; case 13: caxpbypzYmbwCuda(a2, *xD, b2, *yD, *zD, *wD); break; case 14: cabxpyAxCuda(a, b2, *xD, *yD); break; case 15: caxpbypzCuda(a2, *xD, b2, *yD, *zD); break; case 16: caxpbypczpwCuda(a2, *xD, b2, *yD, c2, *zD, *wD); break; case 17: caxpyXmazCuda(a2, *xD, *yD, *zD); break; // double case 18: normCuda(*xD); break; case 19: reDotProductCuda(*xD, *yD); break; case 20: axpyNormCuda(a, *xD, *yD); break; case 21: xmyNormCuda(*xD, *yD); break; case 22: caxpyNormCuda(a2, *xD, *yD); break; case 23: caxpyXmazNormXCuda(a2, *xD, *yD, *zD); break; case 24: cabxpyAxNormCuda(a, b2, *xD, *yD); break; // double2 case 25: cDotProductCuda(*xD, *yD); break; case 26: xpaycDotzyCuda(*xD, a, *yD, *zD); break; case 27: caxpyDotzyCuda(a2, *xD, *yD, *zD); break; // double3 case 28: cDotProductNormACuda(*xD, *yD); break; case 29: cDotProductNormBCuda(*xD, *yD); break; case 30: caxpbypzYmbwcDotProductUYNormYCuda(a2, *xD, b2, *yD, *zD, *wD, *vD); break; default: errorQuda("Undefined blas kernel %d\n", kernel); } } hipEventRecord(end, 0); hipEventSynchronize(end); float runTime; hipEventElapsedTime(&runTime, start, end); hipEventDestroy(start); hipEventDestroy(end); double secs = runTime / 1000; return secs; } #define ERROR(a) fabs(norm2(*a##D) - norm2(*a##H)) / norm2(*a##H) double test(int kernel) { double a = 1.5, b = 2.5, c = 3.5; quda::Complex a2(a, b), b2(b, -c), c2(a+b, c*a); double error = 0; switch (kernel) { case 0: *hD = *hH; copyCuda(*yD, *hD); yH->copy(*hH); error = ERROR(y); break; case 1: *lD = *lH; copyCuda(*yD, *lD); yH->copy(*lH); error = ERROR(y); break; case 2: *xD = *xH; *yD = *yH; axpbyCuda(a, *xD, b, *yD); axpbyCpu(a, *xH, b, *yH); error = ERROR(y); break; case 3: *xD = *xH; *yD = *yH; xpyCuda(*xD, *yD); xpyCpu(*xH, *yH); error = ERROR(y); break; case 4: *xD = *xH; *yD = *yH; axpyCuda(a, *xD, *yD); axpyCpu(a, *xH, *yH); error = ERROR(y); break; case 5: *xD = *xH; *yD = *yH; xpayCuda(*xD, a, *yD); xpayCpu(*xH, a, *yH); error = ERROR(y); break; case 6: *xD = *xH; *yD = *yH; mxpyCuda(*xD, *yD); mxpyCpu(*xH, *yH); error = ERROR(y); break; case 7: *xD = *xH; axCuda(a, *xD); axCpu(a, *xH); error = ERROR(x); break; case 8: *xD = *xH; *yD = *yH; caxpyCuda(a2, *xD, *yD); caxpyCpu(a2, *xH, *yH); error = ERROR(y); break; case 9: *xD = *xH; *yD = *yH; caxpbyCuda(a2, *xD, b2, *yD); caxpbyCpu(a2, *xH, b2, *yH); error = ERROR(y); break; case 10: *xD = *xH; *yD = *yH; *zD = *zH; cxpaypbzCuda(*xD, a2, *yD, b2, *zD); cxpaypbzCpu(*xH, a2, *yH, b2, *zH); error = ERROR(z); break; case 11: *xD = *xH; *yD = *yH; *zD = *zH; axpyBzpcxCuda(a, *xD, *yD, b, *zD, c); axpyBzpcxCpu(a, *xH, *yH, b, *zH, c); error = ERROR(x) + ERROR(y); break; case 12: *xD = *xH; *yD = *yH; *zD = *zH; axpyZpbxCuda(a, *xD, *yD, *zD, b); axpyZpbxCpu(a, *xH, *yH, *zH, b); error = ERROR(x) + ERROR(y); break; case 13: *xD = *xH; *yD = *yH; *zD = *zH; *wD = *wH; caxpbypzYmbwCuda(a2, *xD, b2, *yD, *zD, *wD); caxpbypzYmbwCpu(a2, *xH, b2, *yH, *zH, *wH); error = ERROR(z) + ERROR(y); break; case 14: *xD = *xH; *yD = *yH; cabxpyAxCuda(a, b2, *xD, *yD); cabxpyAxCpu(a, b2, *xH, *yH); error = ERROR(y) + ERROR(x); break; case 15: *xD = *xH; *yD = *yH; *zD = *zH; {caxpbypzCuda(a2, *xD, b2, *yD, *zD); caxpbypzCpu(a2, *xH, b2, *yH, *zH); error = ERROR(z); } break; case 16: *xD = *xH; *yD = *yH; *zD = *zH; *wD = *wH; {caxpbypczpwCuda(a2, *xD, b2, *yD, c2, *zD, *wD); caxpbypczpwCpu(a2, *xH, b2, *yH, c2, *zH, *wH); error = ERROR(w); } break; case 17: *xD = *xH; *yD = *yH; *zD = *zH; {caxpyXmazCuda(a, *xD, *yD, *zD); caxpyXmazCpu(a, *xH, *yH, *zH); error = ERROR(y) + ERROR(x);} break; // double case 18: *xD = *xH; error = fabs(normCuda(*xD) - normCpu(*xH)) / normCpu(*xH); break; case 19: *xD = *xH; *yD = *yH; error = fabs(reDotProductCuda(*xD, *yD) - reDotProductCpu(*xH, *yH)) / fabs(reDotProductCpu(*xH, *yH)); break; case 20: *xD = *xH; *yD = *yH; {double d = axpyNormCuda(a, *xD, *yD); double h = axpyNormCpu(a, *xH, *yH); error = ERROR(y) + fabs(d-h)/fabs(h);} break; case 21: *xD = *xH; *yD = *yH; {double d = xmyNormCuda(*xD, *yD); double h = xmyNormCpu(*xH, *yH); error = ERROR(y) + fabs(d-h)/fabs(h);} break; case 22: *xD = *xH; *yD = *yH; {double d = caxpyNormCuda(a, *xD, *yD); double h = caxpyNormCpu(a, *xH, *yH); error = ERROR(y) + fabs(d-h)/fabs(h);} break; case 23: *xD = *xH; *yD = *yH; *zD = *zH; {double d = caxpyXmazNormXCuda(a, *xD, *yD, *zD); double h = caxpyXmazNormXCpu(a, *xH, *yH, *zH); error = ERROR(y) + ERROR(x) + fabs(d-h)/fabs(h);} break; case 24: *xD = *xH; *yD = *yH; {double d = cabxpyAxNormCuda(a, b2, *xD, *yD); double h = cabxpyAxNormCpu(a, b2, *xH, *yH); error = ERROR(x) + ERROR(y) + fabs(d-h)/fabs(h);} break; // double2 case 25: *xD = *xH; *yD = *yH; error = abs(cDotProductCuda(*xD, *yD) - cDotProductCpu(*xH, *yH)) / abs(cDotProductCpu(*xH, *yH)); break; case 26: *xD = *xH; *yD = *yH; *zD = *zH; { quda::Complex d = xpaycDotzyCuda(*xD, a, *yD, *zD); quda::Complex h = xpaycDotzyCpu(*xH, a, *yH, *zH); error = fabs(norm2(*yD) - norm2(*yH)) / norm2(*yH) + abs(d-h)/abs(h); } break; case 27: *xD = *xH; *yD = *yH; *zD = *zH; {quda::Complex d = caxpyDotzyCuda(a, *xD, *yD, *zD); quda::Complex h = caxpyDotzyCpu(a, *xH, *yH, *zH); error = ERROR(y) + abs(d-h)/abs(h);} break; // double3 case 28: *xD = *xH; *yD = *yH; { double3 d = cDotProductNormACuda(*xD, *yD); double3 h = cDotProductNormACpu(*xH, *yH); error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); } break; case 29: *xD = *xH; *yD = *yH; { double3 d = cDotProductNormBCuda(*xD, *yD); double3 h = cDotProductNormBCpu(*xH, *yH); error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); } break; case 30: *xD = *xH; *yD = *yH; *zD = *zH; *wD = *wH; *vD = *vH; { double3 d = caxpbypzYmbwcDotProductUYNormYCuda(a2, *xD, b2, *yD, *zD, *wD, *vD); double3 h = caxpbypzYmbwcDotProductUYNormYCpu(a2, *xH, b2, *yH, *zH, *wH, *vH); error = ERROR(z) + ERROR(y) + fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); } break; default: errorQuda("Undefined blas kernel %d\n", kernel); } return error; } int main(int argc, char** argv) { for (int i = 1; i < argc; i++){ if(process_command_line_option(argc, argv, &i) == 0){ continue; } printfQuda("ERROR: Invalid option:%s\n", argv[i]); usage(argv); } setSpinorSiteSize(24); initCommsQuda(argc, argv, gridsize_from_cmdline, 4); display_test_info(); initQuda(device); char *names[] = { "copyHS", "copyLS", "axpby", "xpy", "axpy", "xpay", "mxpy", "ax", "caxpy", "caxpby", "cxpaypbz", "axpyBzpcx", "axpyZpbx", "caxpbypzYmbw", "cabxpyAx", "caxpbypz", "caxpbypczpw", "caxpyXmaz", "norm", "reDotProduct", "axpyNorm", "xmyNorm", "caxpyNorm", "caxpyXmazNormX", "cabxpyAxNorm", "cDotProduct", "xpaycDotzy", "caxpyDotzy", "cDotProductNormA", "cDotProductNormB", "caxpbypzYmbwcDotProductWYNormY" }; char *prec_str[] = {"half", "single", "double"}; // Only benchmark double precision if supported #if (__COMPUTE_CAPABILITY__ >= 130) int Nprec = 3; #else int Nprec = 2; #endif // enable the tuning quda::setBlasTuning(QUDA_TUNE_YES, QUDA_SILENT); for (int prec = 0; prec < Nprec; prec++) { printfQuda("\nBenchmarking %s precision with %d iterations...\n\n", prec_str[prec], niter); initFields(prec); for (int kernel = 0; kernel < Nkernels; kernel++) { // only benchmark "high precision" copyCuda() if double is supported if ((Nprec < 3) && (kernel == 0)) continue; // do the initial tune benchmark(kernel, 1); // now rerun with more iterations to get accurate speed measurements quda::blas_flops = 0; quda::blas_bytes = 0; double secs = benchmark(kernel, niter); double gflops = (quda::blas_flops*1e-9)/(secs); double gbytes = quda::blas_bytes/(secs*1e9); printfQuda("%-31s: Gflop/s = %6.1f, GB/s = %6.1f\n", names[kernel], gflops, gbytes); } freeFields(); } // clear the error state hipGetLastError(); // lastly check for correctness for (int prec = 0; prec < Nprec; prec++) { printfQuda("\nTesting %s precision...\n\n", prec_str[prec]); initFields(prec); for (int kernel = 0; kernel < Nkernels; kernel++) { // only benchmark "high precision" copyCuda() if double is supported if ((Nprec < 3) && (kernel == 0)) continue; double error = test(kernel); printfQuda("%-35s error = %e, \n", names[kernel], error); } freeFields(); } endQuda(); endCommsQuda(); }
a808ce26a4df54ab60440fe9c2ffcc7b3736b134.cu
#include <stdio.h> #include <stdlib.h> #include <quda_internal.h> #include <color_spinor_field.h> #include <blas_quda.h> #include <test_util.h> #include <face_quda.h> // include because of nasty globals used in the tests #include <dslash_util.h> // Wilson, clover-improved Wilson, and twisted mass are supported. extern QudaDslashType dslash_type; extern bool tune; extern int device; extern int xdim; extern int ydim; extern int zdim; extern int tdim; extern int gridsize_from_cmdline[]; extern int niter; extern void usage(char** ); const int Nkernels = 31; using namespace quda; cpuColorSpinorField *xH, *yH, *zH, *wH, *vH, *hH, *lH; cudaColorSpinorField *xD, *yD, *zD, *wD, *vD, *hD, *lD; int Nspin; void setPrec(ColorSpinorParam &param, const QudaPrecision precision) { param.precision = precision; if (Nspin == 1 || precision == QUDA_DOUBLE_PRECISION) { param.fieldOrder = QUDA_FLOAT2_FIELD_ORDER; } else { param.fieldOrder = QUDA_FLOAT4_FIELD_ORDER; } } void display_test_info() { printfQuda("running the following test:\n"); printfQuda("S_dimension T_dimension Nspin\n"); printfQuda("%d/%d/%d %d %d\n", xdim, ydim, zdim, tdim, Nspin); printfQuda("Grid partition info: X Y Z T\n"); printfQuda(" %d %d %d %d\n", commDimPartitioned(0), commDimPartitioned(1), commDimPartitioned(2), commDimPartitioned(3)); return; } void initFields(int prec) { // precisions used for the source field in the copyCuda() benchmark QudaPrecision high_aux_prec; QudaPrecision low_aux_prec; ColorSpinorParam param; param.nColor = 3; // set spin according to the type of dslash Nspin = (dslash_type == QUDA_ASQTAD_DSLASH) ? 1 : 4; param.nSpin = Nspin; param.nDim = 4; // number of spacetime dimensions param.pad = 0; // padding must be zero for cpu fields param.siteSubset = QUDA_PARITY_SITE_SUBSET; if (param.siteSubset == QUDA_PARITY_SITE_SUBSET) param.x[0] = xdim/2; else param.x[0] = xdim; param.x[1] = ydim; param.x[2] = zdim; param.x[3] = tdim; param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER; param.gammaBasis = QUDA_DEGRAND_ROSSI_GAMMA_BASIS; param.precision = QUDA_DOUBLE_PRECISION; param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER; param.create = QUDA_ZERO_FIELD_CREATE; vH = new cpuColorSpinorField(param); wH = new cpuColorSpinorField(param); xH = new cpuColorSpinorField(param); yH = new cpuColorSpinorField(param); zH = new cpuColorSpinorField(param); hH = new cpuColorSpinorField(param); lH = new cpuColorSpinorField(param); vH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); wH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); xH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); yH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); zH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); hH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); lH->Source(QUDA_RANDOM_SOURCE, 0, 0, 0); // Now set the parameters for the cuda fields param.pad = 0; //LX*LY*LZ/2; if (param.nSpin == 4) param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS; param.create = QUDA_ZERO_FIELD_CREATE; switch(prec) { case 0: setPrec(param, QUDA_HALF_PRECISION); high_aux_prec = QUDA_DOUBLE_PRECISION; low_aux_prec = QUDA_SINGLE_PRECISION; break; case 1: setPrec(param, QUDA_SINGLE_PRECISION); high_aux_prec = QUDA_DOUBLE_PRECISION; low_aux_prec = QUDA_HALF_PRECISION; break; case 2: setPrec(param, QUDA_DOUBLE_PRECISION); high_aux_prec = QUDA_SINGLE_PRECISION; low_aux_prec = QUDA_HALF_PRECISION; break; } checkCudaError(); vD = new cudaColorSpinorField(param); wD = new cudaColorSpinorField(param); xD = new cudaColorSpinorField(param); yD = new cudaColorSpinorField(param); zD = new cudaColorSpinorField(param); setPrec(param, high_aux_prec); hD = new cudaColorSpinorField(param); setPrec(param, low_aux_prec); lD = new cudaColorSpinorField(param); // check for successful allocation checkCudaError(); *vD = *vH; *wD = *wH; *xD = *xH; *yD = *yH; *zD = *zH; *hD = *hH; *lD = *lH; } void freeFields() { // release memory delete vD; delete wD; delete xD; delete yD; delete zD; delete hD; delete lD; // release memory delete vH; delete wH; delete xH; delete yH; delete zH; delete hH; delete lH; } double benchmark(int kernel, const int niter) { double a, b, c; quda::Complex a2, b2, c2; cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); for (int i=0; i < niter; ++i) { switch (kernel) { case 0: copyCuda(*yD, *hD); break; case 1: copyCuda(*yD, *lD); break; case 2: axpbyCuda(a, *xD, b, *yD); break; case 3: xpyCuda(*xD, *yD); break; case 4: axpyCuda(a, *xD, *yD); break; case 5: xpayCuda(*xD, a, *yD); break; case 6: mxpyCuda(*xD, *yD); break; case 7: axCuda(a, *xD); break; case 8: caxpyCuda(a2, *xD, *yD); break; case 9: caxpbyCuda(a2, *xD, b2, *yD); break; case 10: cxpaypbzCuda(*xD, a2, *yD, b2, *zD); break; case 11: axpyBzpcxCuda(a, *xD, *yD, b, *zD, c); break; case 12: axpyZpbxCuda(a, *xD, *yD, *zD, b); break; case 13: caxpbypzYmbwCuda(a2, *xD, b2, *yD, *zD, *wD); break; case 14: cabxpyAxCuda(a, b2, *xD, *yD); break; case 15: caxpbypzCuda(a2, *xD, b2, *yD, *zD); break; case 16: caxpbypczpwCuda(a2, *xD, b2, *yD, c2, *zD, *wD); break; case 17: caxpyXmazCuda(a2, *xD, *yD, *zD); break; // double case 18: normCuda(*xD); break; case 19: reDotProductCuda(*xD, *yD); break; case 20: axpyNormCuda(a, *xD, *yD); break; case 21: xmyNormCuda(*xD, *yD); break; case 22: caxpyNormCuda(a2, *xD, *yD); break; case 23: caxpyXmazNormXCuda(a2, *xD, *yD, *zD); break; case 24: cabxpyAxNormCuda(a, b2, *xD, *yD); break; // double2 case 25: cDotProductCuda(*xD, *yD); break; case 26: xpaycDotzyCuda(*xD, a, *yD, *zD); break; case 27: caxpyDotzyCuda(a2, *xD, *yD, *zD); break; // double3 case 28: cDotProductNormACuda(*xD, *yD); break; case 29: cDotProductNormBCuda(*xD, *yD); break; case 30: caxpbypzYmbwcDotProductUYNormYCuda(a2, *xD, b2, *yD, *zD, *wD, *vD); break; default: errorQuda("Undefined blas kernel %d\n", kernel); } } cudaEventRecord(end, 0); cudaEventSynchronize(end); float runTime; cudaEventElapsedTime(&runTime, start, end); cudaEventDestroy(start); cudaEventDestroy(end); double secs = runTime / 1000; return secs; } #define ERROR(a) fabs(norm2(*a##D) - norm2(*a##H)) / norm2(*a##H) double test(int kernel) { double a = 1.5, b = 2.5, c = 3.5; quda::Complex a2(a, b), b2(b, -c), c2(a+b, c*a); double error = 0; switch (kernel) { case 0: *hD = *hH; copyCuda(*yD, *hD); yH->copy(*hH); error = ERROR(y); break; case 1: *lD = *lH; copyCuda(*yD, *lD); yH->copy(*lH); error = ERROR(y); break; case 2: *xD = *xH; *yD = *yH; axpbyCuda(a, *xD, b, *yD); axpbyCpu(a, *xH, b, *yH); error = ERROR(y); break; case 3: *xD = *xH; *yD = *yH; xpyCuda(*xD, *yD); xpyCpu(*xH, *yH); error = ERROR(y); break; case 4: *xD = *xH; *yD = *yH; axpyCuda(a, *xD, *yD); axpyCpu(a, *xH, *yH); error = ERROR(y); break; case 5: *xD = *xH; *yD = *yH; xpayCuda(*xD, a, *yD); xpayCpu(*xH, a, *yH); error = ERROR(y); break; case 6: *xD = *xH; *yD = *yH; mxpyCuda(*xD, *yD); mxpyCpu(*xH, *yH); error = ERROR(y); break; case 7: *xD = *xH; axCuda(a, *xD); axCpu(a, *xH); error = ERROR(x); break; case 8: *xD = *xH; *yD = *yH; caxpyCuda(a2, *xD, *yD); caxpyCpu(a2, *xH, *yH); error = ERROR(y); break; case 9: *xD = *xH; *yD = *yH; caxpbyCuda(a2, *xD, b2, *yD); caxpbyCpu(a2, *xH, b2, *yH); error = ERROR(y); break; case 10: *xD = *xH; *yD = *yH; *zD = *zH; cxpaypbzCuda(*xD, a2, *yD, b2, *zD); cxpaypbzCpu(*xH, a2, *yH, b2, *zH); error = ERROR(z); break; case 11: *xD = *xH; *yD = *yH; *zD = *zH; axpyBzpcxCuda(a, *xD, *yD, b, *zD, c); axpyBzpcxCpu(a, *xH, *yH, b, *zH, c); error = ERROR(x) + ERROR(y); break; case 12: *xD = *xH; *yD = *yH; *zD = *zH; axpyZpbxCuda(a, *xD, *yD, *zD, b); axpyZpbxCpu(a, *xH, *yH, *zH, b); error = ERROR(x) + ERROR(y); break; case 13: *xD = *xH; *yD = *yH; *zD = *zH; *wD = *wH; caxpbypzYmbwCuda(a2, *xD, b2, *yD, *zD, *wD); caxpbypzYmbwCpu(a2, *xH, b2, *yH, *zH, *wH); error = ERROR(z) + ERROR(y); break; case 14: *xD = *xH; *yD = *yH; cabxpyAxCuda(a, b2, *xD, *yD); cabxpyAxCpu(a, b2, *xH, *yH); error = ERROR(y) + ERROR(x); break; case 15: *xD = *xH; *yD = *yH; *zD = *zH; {caxpbypzCuda(a2, *xD, b2, *yD, *zD); caxpbypzCpu(a2, *xH, b2, *yH, *zH); error = ERROR(z); } break; case 16: *xD = *xH; *yD = *yH; *zD = *zH; *wD = *wH; {caxpbypczpwCuda(a2, *xD, b2, *yD, c2, *zD, *wD); caxpbypczpwCpu(a2, *xH, b2, *yH, c2, *zH, *wH); error = ERROR(w); } break; case 17: *xD = *xH; *yD = *yH; *zD = *zH; {caxpyXmazCuda(a, *xD, *yD, *zD); caxpyXmazCpu(a, *xH, *yH, *zH); error = ERROR(y) + ERROR(x);} break; // double case 18: *xD = *xH; error = fabs(normCuda(*xD) - normCpu(*xH)) / normCpu(*xH); break; case 19: *xD = *xH; *yD = *yH; error = fabs(reDotProductCuda(*xD, *yD) - reDotProductCpu(*xH, *yH)) / fabs(reDotProductCpu(*xH, *yH)); break; case 20: *xD = *xH; *yD = *yH; {double d = axpyNormCuda(a, *xD, *yD); double h = axpyNormCpu(a, *xH, *yH); error = ERROR(y) + fabs(d-h)/fabs(h);} break; case 21: *xD = *xH; *yD = *yH; {double d = xmyNormCuda(*xD, *yD); double h = xmyNormCpu(*xH, *yH); error = ERROR(y) + fabs(d-h)/fabs(h);} break; case 22: *xD = *xH; *yD = *yH; {double d = caxpyNormCuda(a, *xD, *yD); double h = caxpyNormCpu(a, *xH, *yH); error = ERROR(y) + fabs(d-h)/fabs(h);} break; case 23: *xD = *xH; *yD = *yH; *zD = *zH; {double d = caxpyXmazNormXCuda(a, *xD, *yD, *zD); double h = caxpyXmazNormXCpu(a, *xH, *yH, *zH); error = ERROR(y) + ERROR(x) + fabs(d-h)/fabs(h);} break; case 24: *xD = *xH; *yD = *yH; {double d = cabxpyAxNormCuda(a, b2, *xD, *yD); double h = cabxpyAxNormCpu(a, b2, *xH, *yH); error = ERROR(x) + ERROR(y) + fabs(d-h)/fabs(h);} break; // double2 case 25: *xD = *xH; *yD = *yH; error = abs(cDotProductCuda(*xD, *yD) - cDotProductCpu(*xH, *yH)) / abs(cDotProductCpu(*xH, *yH)); break; case 26: *xD = *xH; *yD = *yH; *zD = *zH; { quda::Complex d = xpaycDotzyCuda(*xD, a, *yD, *zD); quda::Complex h = xpaycDotzyCpu(*xH, a, *yH, *zH); error = fabs(norm2(*yD) - norm2(*yH)) / norm2(*yH) + abs(d-h)/abs(h); } break; case 27: *xD = *xH; *yD = *yH; *zD = *zH; {quda::Complex d = caxpyDotzyCuda(a, *xD, *yD, *zD); quda::Complex h = caxpyDotzyCpu(a, *xH, *yH, *zH); error = ERROR(y) + abs(d-h)/abs(h);} break; // double3 case 28: *xD = *xH; *yD = *yH; { double3 d = cDotProductNormACuda(*xD, *yD); double3 h = cDotProductNormACpu(*xH, *yH); error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); } break; case 29: *xD = *xH; *yD = *yH; { double3 d = cDotProductNormBCuda(*xD, *yD); double3 h = cDotProductNormBCpu(*xH, *yH); error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); } break; case 30: *xD = *xH; *yD = *yH; *zD = *zH; *wD = *wH; *vD = *vH; { double3 d = caxpbypzYmbwcDotProductUYNormYCuda(a2, *xD, b2, *yD, *zD, *wD, *vD); double3 h = caxpbypzYmbwcDotProductUYNormYCpu(a2, *xH, b2, *yH, *zH, *wH, *vH); error = ERROR(z) + ERROR(y) + fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); } break; default: errorQuda("Undefined blas kernel %d\n", kernel); } return error; } int main(int argc, char** argv) { for (int i = 1; i < argc; i++){ if(process_command_line_option(argc, argv, &i) == 0){ continue; } printfQuda("ERROR: Invalid option:%s\n", argv[i]); usage(argv); } setSpinorSiteSize(24); initCommsQuda(argc, argv, gridsize_from_cmdline, 4); display_test_info(); initQuda(device); char *names[] = { "copyHS", "copyLS", "axpby", "xpy", "axpy", "xpay", "mxpy", "ax", "caxpy", "caxpby", "cxpaypbz", "axpyBzpcx", "axpyZpbx", "caxpbypzYmbw", "cabxpyAx", "caxpbypz", "caxpbypczpw", "caxpyXmaz", "norm", "reDotProduct", "axpyNorm", "xmyNorm", "caxpyNorm", "caxpyXmazNormX", "cabxpyAxNorm", "cDotProduct", "xpaycDotzy", "caxpyDotzy", "cDotProductNormA", "cDotProductNormB", "caxpbypzYmbwcDotProductWYNormY" }; char *prec_str[] = {"half", "single", "double"}; // Only benchmark double precision if supported #if (__COMPUTE_CAPABILITY__ >= 130) int Nprec = 3; #else int Nprec = 2; #endif // enable the tuning quda::setBlasTuning(QUDA_TUNE_YES, QUDA_SILENT); for (int prec = 0; prec < Nprec; prec++) { printfQuda("\nBenchmarking %s precision with %d iterations...\n\n", prec_str[prec], niter); initFields(prec); for (int kernel = 0; kernel < Nkernels; kernel++) { // only benchmark "high precision" copyCuda() if double is supported if ((Nprec < 3) && (kernel == 0)) continue; // do the initial tune benchmark(kernel, 1); // now rerun with more iterations to get accurate speed measurements quda::blas_flops = 0; quda::blas_bytes = 0; double secs = benchmark(kernel, niter); double gflops = (quda::blas_flops*1e-9)/(secs); double gbytes = quda::blas_bytes/(secs*1e9); printfQuda("%-31s: Gflop/s = %6.1f, GB/s = %6.1f\n", names[kernel], gflops, gbytes); } freeFields(); } // clear the error state cudaGetLastError(); // lastly check for correctness for (int prec = 0; prec < Nprec; prec++) { printfQuda("\nTesting %s precision...\n\n", prec_str[prec]); initFields(prec); for (int kernel = 0; kernel < Nkernels; kernel++) { // only benchmark "high precision" copyCuda() if double is supported if ((Nprec < 3) && (kernel == 0)) continue; double error = test(kernel); printfQuda("%-35s error = %e, \n", names[kernel], error); } freeFields(); } endQuda(); endCommsQuda(); }
cb6d6b76e7ee4000401186c61239b8be298b4f46.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <clover_field_order.h> #include <tune_quda.h> namespace quda { using namespace clover; #ifdef GPU_CLOVER_DIRAC /** Kernel argument struct */ template <typename Out, typename In> struct CopyCloverArg { Out out; const In in; int volumeCB; CopyCloverArg (const Out &out, const In in, int volume) : out(out), in(in), volumeCB(in.volumeCB) { } }; /** Generic CPU clover reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> void copyClover(CopyCloverArg<Out,In> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { for (int x=0; x<arg.volumeCB; x++) { RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, parity); } } } /** Generic CUDA clover reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> __global__ void copyCloverKernel(CopyCloverArg<Out,In> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= arg.volumeCB) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, parity); #pragma unroll for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, parity); } template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> class CopyClover : TunableVectorY { CopyCloverArg<Out,In> arg; const CloverField &meta; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.volumeCB; } public: CopyClover(CopyCloverArg<Out,In> &arg, const CloverField &meta) : TunableVectorY(2), arg(arg), meta(meta) { writeAuxString("out_stride=%d,in_stride=%d", arg.out.stride, arg.in.stride); } virtual ~CopyClover() { ; } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); hipLaunchKernelGGL(( copyCloverKernel<FloatOut, FloatIn, length, Out, In>) , dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg); } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } long long bytes() const { return 2*arg.volumeCB*(arg.in.Bytes() + arg.out.Bytes()); } }; template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyClover(OutOrder outOrder, const InOrder inOrder, const CloverField &out, QudaFieldLocation location) { CopyCloverArg<OutOrder,InOrder> arg(outOrder, inOrder, out.Volume()); if (location == QUDA_CPU_FIELD_LOCATION) { copyClover<FloatOut, FloatIn, length, OutOrder, InOrder>(arg); } else if (location == QUDA_CUDA_FIELD_LOCATION) { CopyClover<FloatOut, FloatIn, length, OutOrder, InOrder> cloverCopier(arg, out); cloverCopier.apply(0); } else { errorQuda("Undefined field location %d for copyClover", location); } } template <typename FloatOut, typename FloatIn, int length, typename InOrder> void copyClover(const InOrder &inOrder, CloverField &out, bool inverse, QudaFieldLocation location, FloatOut *Out, float *outNorm) { if (out.isNative()) { const bool override = true; typedef typename clover_mapper<FloatOut>::type C; copyClover<FloatOut,FloatIn,length>(C(out, inverse, Out, outNorm, override), inOrder, out, location); } else if (out.Order() == QUDA_PACKED_CLOVER_ORDER) { copyClover<FloatOut,FloatIn,length> (QDPOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location); } else if (out.Order() == QUDA_QDPJIT_CLOVER_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyClover<FloatOut,FloatIn,length> (QDPJITOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (out.Order() == QUDA_BQCD_CLOVER_ORDER) { errorQuda("BQCD output not supported"); } else { errorQuda("Clover field %d order not supported", out.Order()); } } template <typename FloatOut, typename FloatIn, int length> void copyClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location, FloatOut *Out, FloatIn *In, float *outNorm, float *inNorm) { // reconstruction only supported on FloatN fields currently if (in.isNative()) { const bool override = true; typedef typename clover_mapper<FloatIn>::type C; copyClover<FloatOut,FloatIn,length>(C(in, inverse, In, inNorm, override), out, inverse, location, Out, outNorm); } else if (in.Order() == QUDA_PACKED_CLOVER_ORDER) { copyClover<FloatOut,FloatIn,length> (QDPOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); } else if (in.Order() == QUDA_QDPJIT_CLOVER_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyClover<FloatOut,FloatIn,length> (QDPJITOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (in.Order() == QUDA_BQCD_CLOVER_ORDER) { #ifdef BUILD_BQCD_INTERFACE copyClover<FloatOut,FloatIn,length> (BQCDOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); #else errorQuda("BQCD interface has not been built\n"); #endif } else { errorQuda("Clover field %d order not supported", in.Order()); } } #endif // this is the function that is actually called, from here on down we instantiate all required templates void copyGenericClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location, void *Out, void *In, void *outNorm, void *inNorm) { #ifdef GPU_CLOVER_DIRAC if (out.Precision() == QUDA_HALF_PRECISION && out.Order() > 4) errorQuda("Half precision not supported for order %d", out.Order()); if (in.Precision() == QUDA_HALF_PRECISION && in.Order() > 4) errorQuda("Half precision not supported for order %d", in.Order()); if (out.Precision() == QUDA_DOUBLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyClover<double,double,72>(out, in, inverse, location, (double*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<double,float,72>(out, in, inverse, location, (double*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<double,short,72>(out, in, inverse, location, (double*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } else if (out.Precision() == QUDA_SINGLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyClover<float,double,72>(out, in, inverse, location, (float*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<float,float,72>(out, in, inverse, location, (float*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<float,short,72>(out, in, inverse, location, (float*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } else if (out.Precision() == QUDA_HALF_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION){ copyClover<short,double,72>(out, in, inverse, location, (short*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<short,float,72>(out, in, inverse, location, (short*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<short,short,72>(out, in, inverse, location, (short*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } #else errorQuda("Clover has not been built"); #endif } } // namespace quda
cb6d6b76e7ee4000401186c61239b8be298b4f46.cu
#include <clover_field_order.h> #include <tune_quda.h> namespace quda { using namespace clover; #ifdef GPU_CLOVER_DIRAC /** Kernel argument struct */ template <typename Out, typename In> struct CopyCloverArg { Out out; const In in; int volumeCB; CopyCloverArg (const Out &out, const In in, int volume) : out(out), in(in), volumeCB(in.volumeCB) { } }; /** Generic CPU clover reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> void copyClover(CopyCloverArg<Out,In> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { for (int x=0; x<arg.volumeCB; x++) { RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, parity); } } } /** Generic CUDA clover reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> __global__ void copyCloverKernel(CopyCloverArg<Out,In> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= arg.volumeCB) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, parity); #pragma unroll for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, parity); } template <typename FloatOut, typename FloatIn, int length, typename Out, typename In> class CopyClover : TunableVectorY { CopyCloverArg<Out,In> arg; const CloverField &meta; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.volumeCB; } public: CopyClover(CopyCloverArg<Out,In> &arg, const CloverField &meta) : TunableVectorY(2), arg(arg), meta(meta) { writeAuxString("out_stride=%d,in_stride=%d", arg.out.stride, arg.in.stride); } virtual ~CopyClover() { ; } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); copyCloverKernel<FloatOut, FloatIn, length, Out, In> <<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg); } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return 0; } long long bytes() const { return 2*arg.volumeCB*(arg.in.Bytes() + arg.out.Bytes()); } }; template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyClover(OutOrder outOrder, const InOrder inOrder, const CloverField &out, QudaFieldLocation location) { CopyCloverArg<OutOrder,InOrder> arg(outOrder, inOrder, out.Volume()); if (location == QUDA_CPU_FIELD_LOCATION) { copyClover<FloatOut, FloatIn, length, OutOrder, InOrder>(arg); } else if (location == QUDA_CUDA_FIELD_LOCATION) { CopyClover<FloatOut, FloatIn, length, OutOrder, InOrder> cloverCopier(arg, out); cloverCopier.apply(0); } else { errorQuda("Undefined field location %d for copyClover", location); } } template <typename FloatOut, typename FloatIn, int length, typename InOrder> void copyClover(const InOrder &inOrder, CloverField &out, bool inverse, QudaFieldLocation location, FloatOut *Out, float *outNorm) { if (out.isNative()) { const bool override = true; typedef typename clover_mapper<FloatOut>::type C; copyClover<FloatOut,FloatIn,length>(C(out, inverse, Out, outNorm, override), inOrder, out, location); } else if (out.Order() == QUDA_PACKED_CLOVER_ORDER) { copyClover<FloatOut,FloatIn,length> (QDPOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location); } else if (out.Order() == QUDA_QDPJIT_CLOVER_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyClover<FloatOut,FloatIn,length> (QDPJITOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (out.Order() == QUDA_BQCD_CLOVER_ORDER) { errorQuda("BQCD output not supported"); } else { errorQuda("Clover field %d order not supported", out.Order()); } } template <typename FloatOut, typename FloatIn, int length> void copyClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location, FloatOut *Out, FloatIn *In, float *outNorm, float *inNorm) { // reconstruction only supported on FloatN fields currently if (in.isNative()) { const bool override = true; typedef typename clover_mapper<FloatIn>::type C; copyClover<FloatOut,FloatIn,length>(C(in, inverse, In, inNorm, override), out, inverse, location, Out, outNorm); } else if (in.Order() == QUDA_PACKED_CLOVER_ORDER) { copyClover<FloatOut,FloatIn,length> (QDPOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); } else if (in.Order() == QUDA_QDPJIT_CLOVER_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyClover<FloatOut,FloatIn,length> (QDPJITOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (in.Order() == QUDA_BQCD_CLOVER_ORDER) { #ifdef BUILD_BQCD_INTERFACE copyClover<FloatOut,FloatIn,length> (BQCDOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm); #else errorQuda("BQCD interface has not been built\n"); #endif } else { errorQuda("Clover field %d order not supported", in.Order()); } } #endif // this is the function that is actually called, from here on down we instantiate all required templates void copyGenericClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location, void *Out, void *In, void *outNorm, void *inNorm) { #ifdef GPU_CLOVER_DIRAC if (out.Precision() == QUDA_HALF_PRECISION && out.Order() > 4) errorQuda("Half precision not supported for order %d", out.Order()); if (in.Precision() == QUDA_HALF_PRECISION && in.Order() > 4) errorQuda("Half precision not supported for order %d", in.Order()); if (out.Precision() == QUDA_DOUBLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyClover<double,double,72>(out, in, inverse, location, (double*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<double,float,72>(out, in, inverse, location, (double*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<double,short,72>(out, in, inverse, location, (double*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } else if (out.Precision() == QUDA_SINGLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyClover<float,double,72>(out, in, inverse, location, (float*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<float,float,72>(out, in, inverse, location, (float*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<float,short,72>(out, in, inverse, location, (float*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } else if (out.Precision() == QUDA_HALF_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION){ copyClover<short,double,72>(out, in, inverse, location, (short*)Out, (double*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyClover<short,float,72>(out, in, inverse, location, (short*)Out, (float*)In, (float*)outNorm, (float*)inNorm); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyClover<short,short,72>(out, in, inverse, location, (short*)Out, (short*)In, (float*)outNorm, (float*)inNorm); } } #else errorQuda("Clover has not been built"); #endif } } // namespace quda
16b2f53ab3d9a813f6cb1d7800f21edcd4bb2bc0.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <math.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> // includes, kernels #include "vector_reduction_kernel.hip" #include <fstream> #include <string> #include "vector_reduction_gold.cpp" // For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements. #define NUM_ELEMENTS 512 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int ReadFile(float*, char* file_name); float computeOnDevice(float* h_data, int array_mem_size); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run naive scan test //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int num_elements = NUM_ELEMENTS; int errorM = 0; const unsigned int array_mem_size = sizeof( float) * num_elements; // allocate host memory to store the input data float* h_data = (float*) malloc( array_mem_size); // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Read the input data array from the given file. switch(argc-1) { case 1: // One Argument errorM = ReadFile(h_data, argv[1]); if(errorM != 1) { printf("Error reading input file!\n"); exit(1); } break; default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); } break; } // compute reference solution float reference = 0.0f; computeGold(&reference , h_data, num_elements); // **===-------- Modify the body of this function -----------===** float result = computeOnDevice(h_data, num_elements); // **===-----------------------------------------------------------===** // We can use an epsilon of 0 since values are integral and in a range // that can be exactly represented float epsilon = 0.0f; unsigned int result_regtest = (abs(result - reference) <= epsilon); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); printf( "device: %f host: %f\n", result, reference); // cleanup memory free( h_data); } /*--make your own--*/ int ReadFile(float* M, char* file_name) { std::ifstream file(file_name); for (int i = 0; i < NUM_ELEMENTS; ++i) { if (!(file >> M[i])) return 0; } return 1; } // **===----------------- Modify this function ---------------------===** // Take h_data from host, copies it to device, setup grid and thread // dimentions, excutes kernel function, and copy result of scan back // to h_data. // Note: float* h_data is both the input and the output of this function. float computeOnDevice(float* h_data, int num_elements) { size_t data_size = sizeof(float)*num_elements; float* d_data; hipMalloc(&d_data, data_size); hipMemcpy(d_data, h_data, data_size, hipMemcpyHostToDevice); // as per assignment spec, only 512 elements hipLaunchKernelGGL(( reduction), dim3(1),dim3(num_elements), 0, 0, d_data, num_elements); hipMemcpy(h_data, d_data, data_size, hipMemcpyDeviceToHost); float result= h_data[0]; hipFree(h_data); return result; }
16b2f53ab3d9a813f6cb1d7800f21edcd4bb2bc0.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <math.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> // includes, kernels #include "vector_reduction_kernel.cu" #include <fstream> #include <string> #include "vector_reduction_gold.cpp" // For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements. #define NUM_ELEMENTS 512 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int ReadFile(float*, char* file_name); float computeOnDevice(float* h_data, int array_mem_size); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run naive scan test //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int num_elements = NUM_ELEMENTS; int errorM = 0; const unsigned int array_mem_size = sizeof( float) * num_elements; // allocate host memory to store the input data float* h_data = (float*) malloc( array_mem_size); // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Read the input data array from the given file. switch(argc-1) { case 1: // One Argument errorM = ReadFile(h_data, argv[1]); if(errorM != 1) { printf("Error reading input file!\n"); exit(1); } break; default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); } break; } // compute reference solution float reference = 0.0f; computeGold(&reference , h_data, num_elements); // **===-------- Modify the body of this function -----------===** float result = computeOnDevice(h_data, num_elements); // **===-----------------------------------------------------------===** // We can use an epsilon of 0 since values are integral and in a range // that can be exactly represented float epsilon = 0.0f; unsigned int result_regtest = (abs(result - reference) <= epsilon); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); printf( "device: %f host: %f\n", result, reference); // cleanup memory free( h_data); } /*--make your own--*/ int ReadFile(float* M, char* file_name) { std::ifstream file(file_name); for (int i = 0; i < NUM_ELEMENTS; ++i) { if (!(file >> M[i])) return 0; } return 1; } // **===----------------- Modify this function ---------------------===** // Take h_data from host, copies it to device, setup grid and thread // dimentions, excutes kernel function, and copy result of scan back // to h_data. // Note: float* h_data is both the input and the output of this function. float computeOnDevice(float* h_data, int num_elements) { size_t data_size = sizeof(float)*num_elements; float* d_data; cudaMalloc(&d_data, data_size); cudaMemcpy(d_data, h_data, data_size, cudaMemcpyHostToDevice); // as per assignment spec, only 512 elements reduction<<<1,num_elements>>>(d_data, num_elements); cudaMemcpy(h_data, d_data, data_size, cudaMemcpyDeviceToHost); float result= h_data[0]; cudaFree(h_data); return result; }
db7933a6ebc93b83d861e0b4130d856ff18d3fe3.hip
// !!! This is a file automatically generated by hipify!!! //=================================================================// // CUDA BFS kernel // Topological-Driven: one node per thread, no atomic instructions // Reference: // Pawan Harish, Accelerating large graph algorithms // on the GPU using CUDA (HiPC 2007) //=================================================================// #include <hip/hip_runtime.h> #include <stdint.h> #include <stdio.h> #include "cudaGraph.h" __global__ void initialize(bool * d_graph_frontier, bool * d_updating_graph_frontier, bool * d_graph_visited, uint32_t * d_graph_property, uint64_t num_vertex) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < num_vertex ) { d_graph_frontier[tid] = false; d_updating_graph_frontier[tid] = false; d_graph_visited[tid] = false; d_graph_property[tid] = MY_INFINITY; } } __global__ void BFS_kernel_1( cudaGraph d_graph, bool * device_graph_frontier, bool * device_updating_graph_frontier, bool * device_graph_visited, uint32_t * device_vpl ) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid<d_graph.vertex_cnt && device_graph_frontier[tid] ) { device_graph_frontier[tid] = false; uint64_t eidx = d_graph.get_firstedge_index(tid); uint64_t sz = d_graph.get_vertex_degree(tid); for (size_t i=eidx; i<sz+eidx; i++) { uint64_t vid = d_graph.get_edge_dest(i); if (device_graph_visited[vid]==false) { device_vpl[vid] = device_vpl[tid]+1; device_updating_graph_frontier[vid] = true; } } } } __global__ void BFS_kernel_2( bool * device_graph_frontier, bool * device_updating_graph_frontier, bool * device_graph_visited, bool * device_over, uint64_t vl_sz ) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < vl_sz && device_updating_graph_frontier[tid] ) { device_graph_frontier[tid] = true; device_graph_visited[tid] = true; device_updating_graph_frontier[tid] = false; *device_over = true; } } void cuda_BFS(uint64_t * vertexlist, uint64_t * degreelist, uint64_t * edgelist, uint32_t * vproplist, uint64_t vertex_cnt, uint64_t edge_cnt, uint64_t root) { uint32_t * device_vpl = 0; bool * device_graph_frontier = 0; bool * device_updating_graph_frontier = 0; bool * device_graph_visited = 0; bool * device_over = 0; float h2d_copy_time = 0; // host to device data transfer time float d2h_copy_time = 0; // device to host data transfer time float kernel_time = 0; // kernel execution time int device; hipGetDevice(&device); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp,device); // Try to use as many threads as possible so that each thread // is processing one vertex. If max thread is reached, // split them into multiple blocks. unsigned int num_thread_per_block = (unsigned int) vertex_cnt; if (num_thread_per_block > devProp.maxThreadsPerBlock) num_thread_per_block = devProp.maxThreadsPerBlock; unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block ); // malloc of gpu side cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) ); cudaErrCheck( hipMalloc((void**)&device_graph_frontier, vertex_cnt*sizeof(bool)) ); cudaErrCheck( hipMalloc((void**)&device_updating_graph_frontier, vertex_cnt*sizeof(bool)) ); cudaErrCheck( hipMalloc((void**)&device_graph_visited, vertex_cnt*sizeof(bool)) ); cudaErrCheck( hipMalloc((void**)&device_over, sizeof(bool)) ); hipEvent_t start_event, stop_event; cudaErrCheck( hipEventCreate(&start_event) ); cudaErrCheck( hipEventCreate(&stop_event) ); // initialization hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_vpl, vertex_cnt); // prepare graph struct // one for host side, one for device side cudaGraph h_graph, d_graph; // here copy only the pointers h_graph.read(vertexlist, degreelist, edgelist, vertex_cnt, edge_cnt); bool true_flag=true; uint32_t zero_flag=0; // memcpy from host to device hipEventRecord(start_event, 0); // copy graph data to device h_graph.cudaGraphCopy(&d_graph); cudaErrCheck( hipMemcpy(&(device_graph_frontier[root]), &true_flag, sizeof(bool), hipMemcpyHostToDevice) ); // set root vertex as the first frontier cudaErrCheck( hipMemcpy(&(device_graph_visited[root]), &true_flag, sizeof(bool), hipMemcpyHostToDevice) ); // set root vertex as visited cudaErrCheck( hipMemcpy(&(device_vpl[root]), &zero_flag, sizeof(uint32_t), hipMemcpyHostToDevice) ); // set root vertex as visited hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&h2d_copy_time, start_event, stop_event); // BFS traversal bool stop; hipEventRecord(start_event, 0); int k=0; do { // Each iteration processes // one level of BFS traversal stop = false; cudaErrCheck( hipMemcpy(device_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ); // step 1 hipLaunchKernelGGL(( BFS_kernel_1), dim3(num_block), dim3(num_thread_per_block), 0, 0, d_graph, device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_vpl); // step 2 hipLaunchKernelGGL(( BFS_kernel_2), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_over, vertex_cnt); cudaErrCheck( hipMemcpy(&stop, device_over, sizeof(bool), hipMemcpyDeviceToHost) ); k++; }while(stop); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&kernel_time, start_event, stop_event); hipEventRecord(start_event, 0); cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t), hipMemcpyDeviceToHost) ); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&d2h_copy_time, start_event, stop_event); printf("== iteration #: %d\n", k); printf("== host->device copy time: %f ms\n", h2d_copy_time); printf("== device->host copy time: %f ms\n", d2h_copy_time); printf("== kernel time: %f ms\n", kernel_time); hipEventDestroy(start_event); hipEventDestroy(stop_event); // free graph struct on device side d_graph.cudaGraphFree(); cudaErrCheck( hipFree(device_vpl) ); cudaErrCheck( hipFree(device_graph_frontier) ); cudaErrCheck( hipFree(device_updating_graph_frontier) ); cudaErrCheck( hipFree(device_graph_visited) ); }
db7933a6ebc93b83d861e0b4130d856ff18d3fe3.cu
//=================================================================// // CUDA BFS kernel // Topological-Driven: one node per thread, no atomic instructions // Reference: // Pawan Harish, Accelerating large graph algorithms // on the GPU using CUDA (HiPC 2007) //=================================================================// #include <cuda.h> #include <stdint.h> #include <stdio.h> #include "cudaGraph.h" __global__ void initialize(bool * d_graph_frontier, bool * d_updating_graph_frontier, bool * d_graph_visited, uint32_t * d_graph_property, uint64_t num_vertex) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < num_vertex ) { d_graph_frontier[tid] = false; d_updating_graph_frontier[tid] = false; d_graph_visited[tid] = false; d_graph_property[tid] = MY_INFINITY; } } __global__ void BFS_kernel_1( cudaGraph d_graph, bool * device_graph_frontier, bool * device_updating_graph_frontier, bool * device_graph_visited, uint32_t * device_vpl ) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid<d_graph.vertex_cnt && device_graph_frontier[tid] ) { device_graph_frontier[tid] = false; uint64_t eidx = d_graph.get_firstedge_index(tid); uint64_t sz = d_graph.get_vertex_degree(tid); for (size_t i=eidx; i<sz+eidx; i++) { uint64_t vid = d_graph.get_edge_dest(i); if (device_graph_visited[vid]==false) { device_vpl[vid] = device_vpl[tid]+1; device_updating_graph_frontier[vid] = true; } } } } __global__ void BFS_kernel_2( bool * device_graph_frontier, bool * device_updating_graph_frontier, bool * device_graph_visited, bool * device_over, uint64_t vl_sz ) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < vl_sz && device_updating_graph_frontier[tid] ) { device_graph_frontier[tid] = true; device_graph_visited[tid] = true; device_updating_graph_frontier[tid] = false; *device_over = true; } } void cuda_BFS(uint64_t * vertexlist, uint64_t * degreelist, uint64_t * edgelist, uint32_t * vproplist, uint64_t vertex_cnt, uint64_t edge_cnt, uint64_t root) { uint32_t * device_vpl = 0; bool * device_graph_frontier = 0; bool * device_updating_graph_frontier = 0; bool * device_graph_visited = 0; bool * device_over = 0; float h2d_copy_time = 0; // host to device data transfer time float d2h_copy_time = 0; // device to host data transfer time float kernel_time = 0; // kernel execution time int device; cudaGetDevice(&device); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp,device); // Try to use as many threads as possible so that each thread // is processing one vertex. If max thread is reached, // split them into multiple blocks. unsigned int num_thread_per_block = (unsigned int) vertex_cnt; if (num_thread_per_block > devProp.maxThreadsPerBlock) num_thread_per_block = devProp.maxThreadsPerBlock; unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block ); // malloc of gpu side cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) ); cudaErrCheck( cudaMalloc((void**)&device_graph_frontier, vertex_cnt*sizeof(bool)) ); cudaErrCheck( cudaMalloc((void**)&device_updating_graph_frontier, vertex_cnt*sizeof(bool)) ); cudaErrCheck( cudaMalloc((void**)&device_graph_visited, vertex_cnt*sizeof(bool)) ); cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) ); cudaEvent_t start_event, stop_event; cudaErrCheck( cudaEventCreate(&start_event) ); cudaErrCheck( cudaEventCreate(&stop_event) ); // initialization initialize<<<num_block, num_thread_per_block>>>( device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_vpl, vertex_cnt); // prepare graph struct // one for host side, one for device side cudaGraph h_graph, d_graph; // here copy only the pointers h_graph.read(vertexlist, degreelist, edgelist, vertex_cnt, edge_cnt); bool true_flag=true; uint32_t zero_flag=0; // memcpy from host to device cudaEventRecord(start_event, 0); // copy graph data to device h_graph.cudaGraphCopy(&d_graph); cudaErrCheck( cudaMemcpy(&(device_graph_frontier[root]), &true_flag, sizeof(bool), cudaMemcpyHostToDevice) ); // set root vertex as the first frontier cudaErrCheck( cudaMemcpy(&(device_graph_visited[root]), &true_flag, sizeof(bool), cudaMemcpyHostToDevice) ); // set root vertex as visited cudaErrCheck( cudaMemcpy(&(device_vpl[root]), &zero_flag, sizeof(uint32_t), cudaMemcpyHostToDevice) ); // set root vertex as visited cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event); // BFS traversal bool stop; cudaEventRecord(start_event, 0); int k=0; do { // Each iteration processes // one level of BFS traversal stop = false; cudaErrCheck( cudaMemcpy(device_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ); // step 1 BFS_kernel_1<<<num_block, num_thread_per_block>>>(d_graph, device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_vpl); // step 2 BFS_kernel_2<<<num_block, num_thread_per_block>>>( device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_over, vertex_cnt); cudaErrCheck( cudaMemcpy(&stop, device_over, sizeof(bool), cudaMemcpyDeviceToHost) ); k++; }while(stop); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&kernel_time, start_event, stop_event); cudaEventRecord(start_event, 0); cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t), cudaMemcpyDeviceToHost) ); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event); printf("== iteration #: %d\n", k); printf("== host->device copy time: %f ms\n", h2d_copy_time); printf("== device->host copy time: %f ms\n", d2h_copy_time); printf("== kernel time: %f ms\n", kernel_time); cudaEventDestroy(start_event); cudaEventDestroy(stop_event); // free graph struct on device side d_graph.cudaGraphFree(); cudaErrCheck( cudaFree(device_vpl) ); cudaErrCheck( cudaFree(device_graph_frontier) ); cudaErrCheck( cudaFree(device_updating_graph_frontier) ); cudaErrCheck( cudaFree(device_graph_visited) ); }
0a397395df35debd63411c9c324a0cf599450836.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 2 #define GRID_SIZE 2 #define N GRID_SIZE * BLOCK_SIZE __global__ void MatrixMul(float *A, float *B, float *C, int n) { // Each thread computes a single element of C int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; float sum = 0; for (int i = 0; i < n; ++i) { sum += (A[row*n + i] * B[i*n + col]); } C[row*n + col] = sum; printf("\n Block[%d][%d] : Thread[%d][%d] : Product = %.2f\n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, sum); } int main() { // Perform matrix multiplication C = A*B // where A, B and C are NxN matrices // Restricted to matrices where N = GRID_SIZE*BLOCGRID_SIZE_SIZE; float *hA, *hB, *hC; float *dA, *dB, *dC; int size = N * N * sizeof(float); printf("Executing Matrix Multiplcation\n"); printf("Matrix size: %d x %d\n", N,N); // Allocate memory on the host hA = (float *) malloc(size); hB = (float *) malloc(size); hC = (float *) malloc(size); // Initialize matrices on the host for (int j = 0; j<N; j++){ for (int i = 0; i<N; i++){ hA[j*N + i] = 2; hB[j*N + i] = 1; } } printf("Matrix 1:\n"); for (int j = 0; j<N; j++){ for (int i = 0; i<N; i++){ printf("%.2f ", hA[j*N + i]); } printf("\n"); } printf("\nMatrix 2:\n"); for (int j = 0; j<N; j++){ for (int i = 0; i<N; i++){ printf("%.2f ", hB[j*N + i]); } printf("\n"); } // Allocate memory on the device hipMalloc(&dA, size); hipMalloc(&dB, size); hipMalloc(&dC, size); dim3 threadBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(GRID_SIZE, GRID_SIZE); // Copy matrices from the host to device hipMemcpy(dA, hA, size, hipMemcpyHostToDevice); hipMemcpy(dB, hB, size, hipMemcpyHostToDevice); //Execute the matrix multiplication kernel printf("\n Launching Gird of size ( %dx%d ) with Blocks of size (%d x %d)\n", GRID_SIZE, GRID_SIZE, BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( MatrixMul) , dim3(grid), dim3(threadBlock) , 0, 0, dA, dB, dC, N); // Now copy the GPU result back to CPU hipMemcpy(hC, dC, size, hipMemcpyDeviceToHost); printf("\n The Product of A and B is:\n"); for (int j = 0; j<N; j++){ for (int i = 0; i<N; i++){ printf("%.2f ", hC[j*N + i]); } printf("\n"); } return 0; } /* output: student@B4L0106:~$ nvcc kernel.cu student@B4L0106:~$ ./a.out Executing Matrix Multiplcation Matrix size: 4 x 4< Matrix 1: 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 Matrix 2: 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 Launching Gird of size ( 2x2 ) with Blocks of size (2 x 2) Block[0][0] : Thread[0][0] : Product = 8.00 Block[0][0] : Thread[1][0] : Product = 8.00 Block[0][0] : Thread[0][1] : Product = 8.00 Block[0][0] : Thread[1][1] : Product = 8.00 Block[0][1] : Thread[0][0] : Product = 8.00 Block[0][1] : Thread[1][0] : Product = 8.00 Block[0][1] : Thread[0][1] : Product = 8.00 Block[0][1] : Thread[1][1] : Product = 8.00 Block[1][1] : Thread[0][0] : Product = 8.00 Block[1][1] : Thread[1][0] : Product = 8.00 Block[1][1] : Thread[0][1] : Product = 8.00 Block[1][1] : Thread[1][1] : Product = 8.00 Block[1][0] : Thread[0][0] : Product = 8.00 Block[1][0] : Thread[1][0] : Product = 8.00 Block[1][0] : Thread[0][1] : Product = 8.00 Block[1][0] : Thread[1][1] : Product = 8.00 The Product of A and B is: 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 */
0a397395df35debd63411c9c324a0cf599450836.cu
#include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 2 #define GRID_SIZE 2 #define N GRID_SIZE * BLOCK_SIZE __global__ void MatrixMul(float *A, float *B, float *C, int n) { // Each thread computes a single element of C int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; float sum = 0; for (int i = 0; i < n; ++i) { sum += (A[row*n + i] * B[i*n + col]); } C[row*n + col] = sum; printf("\n Block[%d][%d] : Thread[%d][%d] : Product = %.2f\n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, sum); } int main() { // Perform matrix multiplication C = A*B // where A, B and C are NxN matrices // Restricted to matrices where N = GRID_SIZE*BLOCGRID_SIZE_SIZE; float *hA, *hB, *hC; float *dA, *dB, *dC; int size = N * N * sizeof(float); printf("Executing Matrix Multiplcation\n"); printf("Matrix size: %d x %d\n", N,N); // Allocate memory on the host hA = (float *) malloc(size); hB = (float *) malloc(size); hC = (float *) malloc(size); // Initialize matrices on the host for (int j = 0; j<N; j++){ for (int i = 0; i<N; i++){ hA[j*N + i] = 2; hB[j*N + i] = 1; } } printf("Matrix 1:\n"); for (int j = 0; j<N; j++){ for (int i = 0; i<N; i++){ printf("%.2f ", hA[j*N + i]); } printf("\n"); } printf("\nMatrix 2:\n"); for (int j = 0; j<N; j++){ for (int i = 0; i<N; i++){ printf("%.2f ", hB[j*N + i]); } printf("\n"); } // Allocate memory on the device cudaMalloc(&dA, size); cudaMalloc(&dB, size); cudaMalloc(&dC, size); dim3 threadBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(GRID_SIZE, GRID_SIZE); // Copy matrices from the host to device cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice); cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice); //Execute the matrix multiplication kernel printf("\n Launching Gird of size ( %dx%d ) with Blocks of size (%d x %d)\n", GRID_SIZE, GRID_SIZE, BLOCK_SIZE, BLOCK_SIZE); MatrixMul <<<grid, threadBlock >>>(dA, dB, dC, N); // Now copy the GPU result back to CPU cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost); printf("\n The Product of A and B is:\n"); for (int j = 0; j<N; j++){ for (int i = 0; i<N; i++){ printf("%.2f ", hC[j*N + i]); } printf("\n"); } return 0; } /* output: student@B4L0106:~$ nvcc kernel.cu student@B4L0106:~$ ./a.out Executing Matrix Multiplcation Matrix size: 4 x 4< Matrix 1: 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 2.00 Matrix 2: 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 1.00 Launching Gird of size ( 2x2 ) with Blocks of size (2 x 2) Block[0][0] : Thread[0][0] : Product = 8.00 Block[0][0] : Thread[1][0] : Product = 8.00 Block[0][0] : Thread[0][1] : Product = 8.00 Block[0][0] : Thread[1][1] : Product = 8.00 Block[0][1] : Thread[0][0] : Product = 8.00 Block[0][1] : Thread[1][0] : Product = 8.00 Block[0][1] : Thread[0][1] : Product = 8.00 Block[0][1] : Thread[1][1] : Product = 8.00 Block[1][1] : Thread[0][0] : Product = 8.00 Block[1][1] : Thread[1][0] : Product = 8.00 Block[1][1] : Thread[0][1] : Product = 8.00 Block[1][1] : Thread[1][1] : Product = 8.00 Block[1][0] : Thread[0][0] : Product = 8.00 Block[1][0] : Thread[1][0] : Product = 8.00 Block[1][0] : Thread[0][1] : Product = 8.00 Block[1][0] : Thread[1][1] : Product = 8.00 The Product of A and B is: 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 8.00 */
f0a56042d02499a50986584a99efcc889df9604a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Alfred Shaker //November 13th 2015 //Tiled matrix multiplication #include <stdlib.h> #include <stdio.h> //tile dimention #define TILE_DIM 32 //kernel function __global__ void tileMatMul(float* matA, float* matB, float* matC, int aRows, int aCols, int bRows, int bCols, int cRows, int cCols) { //define row and column values int Row = blockIdx.y * TILE_DIM + threadIdx.y; int Col = blockIdx.x * TILE_DIM + threadIdx.x; //shared memory arrays __shared__ float sharedMatA[TILE_DIM][TILE_DIM]; __shared__ float sharedMatB[TILE_DIM][TILE_DIM]; float cResultValue = 0.0; //calculate tiled matrix multiplication on shared memory for(int i = 0; i < (aCols-1)/TILE_DIM+1; ++i) { if(Row < aRows && i*TILE_DIM+threadIdx.x < aCols) { sharedMatA[threadIdx.y][threadIdx.x] = matA[Row*aCols + i*TILE_DIM+threadIdx.x]; } else sharedMatA[threadIdx.y][threadIdx.x] = 0.0; if(Col < bCols && i*TILE_DIM+threadIdx.y < cRows) sharedMatB[threadIdx.y][threadIdx.x] = matB[(i*TILE_DIM+threadIdx.y)*bCols+Col]; else sharedMatB[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for(int j = 0; j < TILE_DIM; ++j) cResultValue += sharedMatA[threadIdx.y][j] * sharedMatB[j][threadIdx.x]; __syncthreads(); } //put the results in the result matrix if(Row < cRows && Col < cCols) matC[Row*cCols+Col] = cResultValue; } int main() { //define the host matrices float *hMatA, *hMatB, *hMatC; //define device matrices float *dMatA, *dMatB, *dMatC; //define matrix dimentions int aRows = 512; int aCols = 512; int bRows = 512; int bCols = 512; int cRows, cCols; //allocate space for host matrices hMatA = (float *) malloc(sizeof(float)*aRows*aCols); hMatB = (float *) malloc(sizeof(float)*bRows*bCols); //fill up the matrices with reamdom float values //between 0.0 and 1.0 for(int i = 0; i < aRows*aCols; ++i) { hMatA[i] = (float)rand()/(float)(RAND_MAX/1.0); hMatB[i] = (float)rand()/(float)(RAND_MAX/1.0); } //define the dimentions for the result variable cRows = aRows; cCols = bCols; //allocate host result matrix hMatC = (float *) malloc(sizeof(float)*cRows*cCols); //cuda alloate the device matrices hipMalloc((void**)&dMatA, sizeof(float)*aRows*aCols); hipMalloc((void**)&dMatB, sizeof(float)*bRows*bCols); hipMalloc((void**)&dMatC, sizeof(float)*cRows*cCols); //copy data from host to device matrices hipMemcpy(dMatA, hMatA, sizeof(float)*aRows*aCols, hipMemcpyHostToDevice); hipMemcpy(dMatB, hMatB, sizeof(float)*bRows*bCols, hipMemcpyHostToDevice); //define grid and block dimentions dim3 dimGrid((cCols - 1)/TILE_DIM+1, (cRows - 1)/TILE_DIM+1, 1); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); //call kernel function hipLaunchKernelGGL(( tileMatMul), dim3(dimGrid),dim3(dimBlock), 0, 0, dMatA, dMatB, dMatC, aRows, aCols, bRows, bCols, cRows, cCols); //sync the threads hipDeviceSynchronize(); //copy result from device to host hipMemcpy(hMatC, dMatC, sizeof(float)*cRows*cCols, hipMemcpyDeviceToHost); //print first 100 results for(int q = 0; q < 100; ++q) { printf("Result matrix #%d: %f\n",q, hMatC[q]); } //free device variables hipFree(dMatA); hipFree(dMatB); hipFree(dMatC); //free host variables free(hMatA); free(hMatB); free(hMatC); return 0; }
f0a56042d02499a50986584a99efcc889df9604a.cu
//Alfred Shaker //November 13th 2015 //Tiled matrix multiplication #include <stdlib.h> #include <stdio.h> //tile dimention #define TILE_DIM 32 //kernel function __global__ void tileMatMul(float* matA, float* matB, float* matC, int aRows, int aCols, int bRows, int bCols, int cRows, int cCols) { //define row and column values int Row = blockIdx.y * TILE_DIM + threadIdx.y; int Col = blockIdx.x * TILE_DIM + threadIdx.x; //shared memory arrays __shared__ float sharedMatA[TILE_DIM][TILE_DIM]; __shared__ float sharedMatB[TILE_DIM][TILE_DIM]; float cResultValue = 0.0; //calculate tiled matrix multiplication on shared memory for(int i = 0; i < (aCols-1)/TILE_DIM+1; ++i) { if(Row < aRows && i*TILE_DIM+threadIdx.x < aCols) { sharedMatA[threadIdx.y][threadIdx.x] = matA[Row*aCols + i*TILE_DIM+threadIdx.x]; } else sharedMatA[threadIdx.y][threadIdx.x] = 0.0; if(Col < bCols && i*TILE_DIM+threadIdx.y < cRows) sharedMatB[threadIdx.y][threadIdx.x] = matB[(i*TILE_DIM+threadIdx.y)*bCols+Col]; else sharedMatB[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for(int j = 0; j < TILE_DIM; ++j) cResultValue += sharedMatA[threadIdx.y][j] * sharedMatB[j][threadIdx.x]; __syncthreads(); } //put the results in the result matrix if(Row < cRows && Col < cCols) matC[Row*cCols+Col] = cResultValue; } int main() { //define the host matrices float *hMatA, *hMatB, *hMatC; //define device matrices float *dMatA, *dMatB, *dMatC; //define matrix dimentions int aRows = 512; int aCols = 512; int bRows = 512; int bCols = 512; int cRows, cCols; //allocate space for host matrices hMatA = (float *) malloc(sizeof(float)*aRows*aCols); hMatB = (float *) malloc(sizeof(float)*bRows*bCols); //fill up the matrices with reamdom float values //between 0.0 and 1.0 for(int i = 0; i < aRows*aCols; ++i) { hMatA[i] = (float)rand()/(float)(RAND_MAX/1.0); hMatB[i] = (float)rand()/(float)(RAND_MAX/1.0); } //define the dimentions for the result variable cRows = aRows; cCols = bCols; //allocate host result matrix hMatC = (float *) malloc(sizeof(float)*cRows*cCols); //cuda alloate the device matrices cudaMalloc((void**)&dMatA, sizeof(float)*aRows*aCols); cudaMalloc((void**)&dMatB, sizeof(float)*bRows*bCols); cudaMalloc((void**)&dMatC, sizeof(float)*cRows*cCols); //copy data from host to device matrices cudaMemcpy(dMatA, hMatA, sizeof(float)*aRows*aCols, cudaMemcpyHostToDevice); cudaMemcpy(dMatB, hMatB, sizeof(float)*bRows*bCols, cudaMemcpyHostToDevice); //define grid and block dimentions dim3 dimGrid((cCols - 1)/TILE_DIM+1, (cRows - 1)/TILE_DIM+1, 1); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); //call kernel function tileMatMul<<<dimGrid,dimBlock>>>(dMatA, dMatB, dMatC, aRows, aCols, bRows, bCols, cRows, cCols); //sync the threads cudaThreadSynchronize(); //copy result from device to host cudaMemcpy(hMatC, dMatC, sizeof(float)*cRows*cCols, cudaMemcpyDeviceToHost); //print first 100 results for(int q = 0; q < 100; ++q) { printf("Result matrix #%d: %f\n",q, hMatC[q]); } //free device variables cudaFree(dMatA); cudaFree(dMatB); cudaFree(dMatC); //free host variables free(hMatA); free(hMatB); free(hMatC); return 0; }
695b991d8822579e647f137011d9a9c2ca8dce46.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * Week 3 * Parallel Programming * 2011-2012 * University of Birmingham * * This is a first step towards implementing "parallel reduce". * Reducing means using an operation to aggregate the values of * a data type, such an array or a list. * * For example, to calculate the sum we aggregate addition: * a1 + a2 + a3 + a4 ... * To calculate the maximum we aggregate the max operation: * max (a1, max(a2, max(a3, ... * Note that the order in which the device map, which is parallel, * and the host map, which is sequential, will differ, therefore the * operation needs to be associative. * Operations such as +, * or max are associative, but function of * two arguments, in general, are not! */ using namespace std; const int ITERS = 500; /* * Reference CPU implementation, taken from http://www.songho.ca/dsp/convolution/convolution.html */ __global__ void convolve_optimised(float* data_in, float* data_out, float* kernel, int kernelSize, int BLOCK_SIZE) { int tx = threadIdx.x; int bk = blockIdx.x; extern __shared__ float data_in_shared[]; int pos = (bk * BLOCK_SIZE) + tx; data_in_shared[tx] = data_in[pos]; if(tx == 0){ for(int i = 0; i < kernelSize - 1; i++){ data_in_shared[BLOCK_SIZE + i] = data_in[(bk * BLOCK_SIZE) + BLOCK_SIZE + i]; } } __syncthreads(); data_out[pos] = 0; for(int i = 0; i < kernelSize; i++){ data_out[pos] += kernel[i] * data_in_shared[tx + i]; } }
695b991d8822579e647f137011d9a9c2ca8dce46.cu
#include "includes.h" /* * Week 3 * Parallel Programming * 2011-2012 * University of Birmingham * * This is a first step towards implementing "parallel reduce". * Reducing means using an operation to aggregate the values of * a data type, such an array or a list. * * For example, to calculate the sum we aggregate addition: * a1 + a2 + a3 + a4 ... * To calculate the maximum we aggregate the max operation: * max (a1, max(a2, max(a3, ... * Note that the order in which the device map, which is parallel, * and the host map, which is sequential, will differ, therefore the * operation needs to be associative. * Operations such as +, * or max are associative, but function of * two arguments, in general, are not! */ using namespace std; const int ITERS = 500; /* * Reference CPU implementation, taken from http://www.songho.ca/dsp/convolution/convolution.html */ __global__ void convolve_optimised(float* data_in, float* data_out, float* kernel, int kernelSize, int BLOCK_SIZE) { int tx = threadIdx.x; int bk = blockIdx.x; extern __shared__ float data_in_shared[]; int pos = (bk * BLOCK_SIZE) + tx; data_in_shared[tx] = data_in[pos]; if(tx == 0){ for(int i = 0; i < kernelSize - 1; i++){ data_in_shared[BLOCK_SIZE + i] = data_in[(bk * BLOCK_SIZE) + BLOCK_SIZE + i]; } } __syncthreads(); data_out[pos] = 0; for(int i = 0; i < kernelSize; i++){ data_out[pos] += kernel[i] * data_in_shared[tx + i]; } }
8724d80be151e50c1e10f377e2b47f63f755e7f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "color_ramps.h" #include "math.h" namespace dart { // -=-=-=-=-=-=-=-=-=- helper -=-=-=-=-=-=-=-=-=- static inline __host__ __device__ unsigned char clamp(int c) { return min(max(0,c),255); } inline __host__ __device__ uchar3 hsv2rgb(float h, float s, float v) { float c = v*s; float hPrime = h/60.0f; float x = c*(1 - fabs(fmodf(hPrime,2) - 1)); float m = v-c; int hPrimeInt = hPrime; switch (hPrimeInt) { case 0: return make_uchar3(255*(c+m),255*(x+m),255*(m)); case 1: return make_uchar3(255*(x+m),255*(c+m),255*(m)); case 2: return make_uchar3(255*(m),255*(c+m),255*(x+m)); case 3: return make_uchar3(255*(m),255*(x+m),255*(c+m)); case 4: return make_uchar3(255*(x+m),255*(m),255*(c+m)); case 5: return make_uchar3(255*(c+m),255*(m),255*(x+m)); } return make_uchar3(0,0,0); } // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- __global__ void gpu_colorRampHeatMap(uchar3 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar3 & imgVal = colored[index]; if (isnan(vals[index])) { imgVal = make_uchar3(0,0,0); return; } const float normVal = (vals[index] - minVal)/(maxVal-minVal); if (normVal < 0.25) { imgVal = make_uchar3(0,clamp(255*(normVal/0.25)),255); } else if (normVal < 0.5) { imgVal = make_uchar3(0,255,clamp(255*((0.5-normVal)/0.25))); } else if (normVal < 0.75) { imgVal = make_uchar3(clamp(255*((normVal - 0.5)/0.25)),255,0); } else { imgVal = make_uchar3(255,clamp(255*(1.0-normVal)/0.25),0); } } __global__ void gpu_colorRampHeatMap(uchar4 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 & imgVal = colored[index]; if (isnan(vals[index])) { imgVal = make_uchar4(0,0,0,0); return; } const float normVal = (vals[index] - minVal)/(maxVal-minVal); if (normVal < 0.25) { imgVal = make_uchar4(0,clamp(255*(normVal/0.25)),255,255); } else if (normVal < 0.5) { imgVal = make_uchar4(0,255,clamp(255*((0.5-normVal)/0.25)),255); } else if (normVal < 0.75) { imgVal = make_uchar4(clamp(255*((normVal - 0.5)/0.25)),255,0,255); } else { imgVal = make_uchar4(255,clamp(255*(1.0-normVal)/0.25),0,255); } } __global__ void gpu_colorRampHeatMapUnsat(uchar3 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar3 & imgVal = colored[index]; if (isnan(vals[index])) { imgVal = make_uchar3(255,255,255); return; } const float normVal = fmaxf(0,fminf((vals[index] - minVal)/(maxVal-minVal),1)); const float t = normVal == 1.0 ? 1.0 : fmodf(normVal,0.25)*4; uchar3 a, b; if (normVal < 0.25) { b = make_uchar3(32,191,139); a = make_uchar3(0x18,0x62,0x93); } else if (normVal < 0.5) { b = make_uchar3(241,232,137); a = make_uchar3(32,191,139); } else if (normVal < 0.75) { b = make_uchar3(198,132,63); a = make_uchar3(241,232,137); } else { b = make_uchar3(0xc0,0x43,0x36); a = make_uchar3(198,132,63); } imgVal = make_uchar3((1-t)*a.x + t*b.x, (1-t)*a.y + t*b.y, (1-t)*a.z + t*b.z); } __global__ void gpu_colorRampHeatMapUnsat(uchar4 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 & imgVal = colored[index]; if (isnan(vals[index])) { imgVal = make_uchar4(0,0,0,0); return; } const float normVal = fmaxf(0,fminf((vals[index] - minVal)/(maxVal-minVal),1)); const float t = normVal == 1.0 ? 1.0 : fmodf(normVal,0.25)*4; uchar3 a, b; if (normVal < 0.25) { b = make_uchar3(32,191,139); a = make_uchar3(0x18,0x62,0x93); } else if (normVal < 0.5) { b = make_uchar3(241,232,137); a = make_uchar3(32,191,139); } else if (normVal < 0.75) { b = make_uchar3(198,132,63); a = make_uchar3(241,232,137); } else { b = make_uchar3(0xc0,0x43,0x36); a = make_uchar3(198,132,63); } imgVal = make_uchar4((1-t)*a.x + t*b.x, (1-t)*a.y + t*b.y, (1-t)*a.z + t*b.z,255); } template <bool showZeroLevel> __global__ void gpu_colorRampTopographic(uchar4 * colored, const float * vals, const int width, const int height, const float lineThickness, const float lineSpacing) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 & imgVal = colored[index]; if (fabs(vals[index]) < 1.5*lineThickness) { if (showZeroLevel) { float g = clamp(2*255*(fabs(vals[index])-lineThickness)/lineThickness); imgVal = make_uchar4(g,g,g,255); } else { imgVal = make_uchar4(255,255,255,255); } } else { float c = fabs(fmodf(fabs(vals[index])+lineSpacing/2,lineSpacing)-lineSpacing/2); if (c < lineThickness ) { float g; if (showZeroLevel) { g = clamp(192+64*c/lineThickness); } else { g = clamp(64+192*c/lineThickness); } imgVal = make_uchar4(g,g,g,255); } else { imgVal = make_uchar4(255,255,255,255); } } } template <bool norm> __global__ void gpu_colorRamp2DGradient(uchar4 * colored, const float2 * grad, const int width, const int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float M_PI = std::acos(-1.0); if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 &imgVal = colored[index]; float2 g = grad[index]; if (norm) { float len = sqrtf(g.x*g.x + g.y*g.y); g = make_float2(g.x/len,g.y/len); } // uchar3 rgb = hsv2rgb(180+180*atan2(g.x,g.y)/M_PI,1,1); uchar3 rgb = hsv2rgb(180+180*atan2(g.x,g.y)/M_PI,1,1); imgVal = make_uchar4(rgb.x,rgb.y,rgb.z,255); } template <bool norm> __global__ void gpu_colorRamp3DGradient(uchar4 * colored, const float3 * grad, const int width, const int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 & imgVal = colored[index]; float3 g = grad[index]; if (norm) { float len = sqrtf(g.x*g.x+g.y*g.y+g.z*g.z); g = make_float3(g.x/len,g.y/len,g.z/len); } imgVal = make_uchar4(clamp(128-128*g.x),clamp(128-128*g.y),clamp(128-128*g.z),255); } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- void colorRampHeatMap(uchar3 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); hipLaunchKernelGGL(( gpu_colorRampHeatMap), dim3(grid),dim3(block), 0, 0, colored,vals,width,height,minVal,maxVal); } void colorRampHeatMap(uchar4 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); hipLaunchKernelGGL(( gpu_colorRampHeatMap), dim3(grid),dim3(block), 0, 0, colored,vals,width,height,minVal,maxVal); } void colorRampHeatMapUnsat(uchar3 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); hipLaunchKernelGGL(( gpu_colorRampHeatMapUnsat), dim3(grid),dim3(block), 0, 0, colored,vals,width,height,minVal,maxVal); } void colorRampHeatMapUnsat(uchar4 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); hipLaunchKernelGGL(( gpu_colorRampHeatMapUnsat), dim3(grid),dim3(block), 0, 0, colored,vals,width,height,minVal,maxVal); } void colorRampTopographic(uchar4 * colored, const float * vals, const int width, const int height, const float lineThickness, const float lineSpacing, const bool showZeroLevel) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); if (showZeroLevel) { hipLaunchKernelGGL(( gpu_colorRampTopographic<true>), dim3(grid),dim3(block), 0, 0, colored,vals,width,height,lineThickness,lineSpacing); } else { hipLaunchKernelGGL(( gpu_colorRampTopographic<false>), dim3(grid),dim3(block), 0, 0, colored,vals,width,height,lineThickness,lineSpacing); } } void colorRamp2DGradient(uchar4 * color, const float2 * grad, const int width, const int height, const bool normalize) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); if (normalize) { hipLaunchKernelGGL(( gpu_colorRamp2DGradient<true>), dim3(grid),dim3(block), 0, 0, color,grad,width,height); } else { hipLaunchKernelGGL(( gpu_colorRamp2DGradient<false>), dim3(grid),dim3(block), 0, 0, color,grad,width,height); } } void colorRamp3DGradient(uchar4 * color, const float3 * grad, const int width, const int height, const bool normalize) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); if (normalize) { hipLaunchKernelGGL(( gpu_colorRamp3DGradient<true>), dim3(grid),dim3(block), 0, 0, color,grad,width,height); } else { hipLaunchKernelGGL(( gpu_colorRamp3DGradient<false>), dim3(grid),dim3(block), 0, 0, color,grad,width,height); } } }
8724d80be151e50c1e10f377e2b47f63f755e7f2.cu
#include "color_ramps.h" #include "math.h" namespace dart { // -=-=-=-=-=-=-=-=-=- helper -=-=-=-=-=-=-=-=-=- static inline __host__ __device__ unsigned char clamp(int c) { return min(max(0,c),255); } inline __host__ __device__ uchar3 hsv2rgb(float h, float s, float v) { float c = v*s; float hPrime = h/60.0f; float x = c*(1 - fabs(fmodf(hPrime,2) - 1)); float m = v-c; int hPrimeInt = hPrime; switch (hPrimeInt) { case 0: return make_uchar3(255*(c+m),255*(x+m),255*(m)); case 1: return make_uchar3(255*(x+m),255*(c+m),255*(m)); case 2: return make_uchar3(255*(m),255*(c+m),255*(x+m)); case 3: return make_uchar3(255*(m),255*(x+m),255*(c+m)); case 4: return make_uchar3(255*(x+m),255*(m),255*(c+m)); case 5: return make_uchar3(255*(c+m),255*(m),255*(x+m)); } return make_uchar3(0,0,0); } // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- __global__ void gpu_colorRampHeatMap(uchar3 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar3 & imgVal = colored[index]; if (isnan(vals[index])) { imgVal = make_uchar3(0,0,0); return; } const float normVal = (vals[index] - minVal)/(maxVal-minVal); if (normVal < 0.25) { imgVal = make_uchar3(0,clamp(255*(normVal/0.25)),255); } else if (normVal < 0.5) { imgVal = make_uchar3(0,255,clamp(255*((0.5-normVal)/0.25))); } else if (normVal < 0.75) { imgVal = make_uchar3(clamp(255*((normVal - 0.5)/0.25)),255,0); } else { imgVal = make_uchar3(255,clamp(255*(1.0-normVal)/0.25),0); } } __global__ void gpu_colorRampHeatMap(uchar4 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 & imgVal = colored[index]; if (isnan(vals[index])) { imgVal = make_uchar4(0,0,0,0); return; } const float normVal = (vals[index] - minVal)/(maxVal-minVal); if (normVal < 0.25) { imgVal = make_uchar4(0,clamp(255*(normVal/0.25)),255,255); } else if (normVal < 0.5) { imgVal = make_uchar4(0,255,clamp(255*((0.5-normVal)/0.25)),255); } else if (normVal < 0.75) { imgVal = make_uchar4(clamp(255*((normVal - 0.5)/0.25)),255,0,255); } else { imgVal = make_uchar4(255,clamp(255*(1.0-normVal)/0.25),0,255); } } __global__ void gpu_colorRampHeatMapUnsat(uchar3 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar3 & imgVal = colored[index]; if (isnan(vals[index])) { imgVal = make_uchar3(255,255,255); return; } const float normVal = fmaxf(0,fminf((vals[index] - minVal)/(maxVal-minVal),1)); const float t = normVal == 1.0 ? 1.0 : fmodf(normVal,0.25)*4; uchar3 a, b; if (normVal < 0.25) { b = make_uchar3(32,191,139); a = make_uchar3(0x18,0x62,0x93); } else if (normVal < 0.5) { b = make_uchar3(241,232,137); a = make_uchar3(32,191,139); } else if (normVal < 0.75) { b = make_uchar3(198,132,63); a = make_uchar3(241,232,137); } else { b = make_uchar3(0xc0,0x43,0x36); a = make_uchar3(198,132,63); } imgVal = make_uchar3((1-t)*a.x + t*b.x, (1-t)*a.y + t*b.y, (1-t)*a.z + t*b.z); } __global__ void gpu_colorRampHeatMapUnsat(uchar4 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 & imgVal = colored[index]; if (isnan(vals[index])) { imgVal = make_uchar4(0,0,0,0); return; } const float normVal = fmaxf(0,fminf((vals[index] - minVal)/(maxVal-minVal),1)); const float t = normVal == 1.0 ? 1.0 : fmodf(normVal,0.25)*4; uchar3 a, b; if (normVal < 0.25) { b = make_uchar3(32,191,139); a = make_uchar3(0x18,0x62,0x93); } else if (normVal < 0.5) { b = make_uchar3(241,232,137); a = make_uchar3(32,191,139); } else if (normVal < 0.75) { b = make_uchar3(198,132,63); a = make_uchar3(241,232,137); } else { b = make_uchar3(0xc0,0x43,0x36); a = make_uchar3(198,132,63); } imgVal = make_uchar4((1-t)*a.x + t*b.x, (1-t)*a.y + t*b.y, (1-t)*a.z + t*b.z,255); } template <bool showZeroLevel> __global__ void gpu_colorRampTopographic(uchar4 * colored, const float * vals, const int width, const int height, const float lineThickness, const float lineSpacing) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 & imgVal = colored[index]; if (fabs(vals[index]) < 1.5*lineThickness) { if (showZeroLevel) { float g = clamp(2*255*(fabs(vals[index])-lineThickness)/lineThickness); imgVal = make_uchar4(g,g,g,255); } else { imgVal = make_uchar4(255,255,255,255); } } else { float c = fabs(fmodf(fabs(vals[index])+lineSpacing/2,lineSpacing)-lineSpacing/2); if (c < lineThickness ) { float g; if (showZeroLevel) { g = clamp(192+64*c/lineThickness); } else { g = clamp(64+192*c/lineThickness); } imgVal = make_uchar4(g,g,g,255); } else { imgVal = make_uchar4(255,255,255,255); } } } template <bool norm> __global__ void gpu_colorRamp2DGradient(uchar4 * colored, const float2 * grad, const int width, const int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float M_PI = std::acos(-1.0); if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 &imgVal = colored[index]; float2 g = grad[index]; if (norm) { float len = sqrtf(g.x*g.x + g.y*g.y); g = make_float2(g.x/len,g.y/len); } // uchar3 rgb = hsv2rgb(180+180*atan2(g.x,g.y)/M_PI,1,1); uchar3 rgb = hsv2rgb(180+180*atan2(g.x,g.y)/M_PI,1,1); imgVal = make_uchar4(rgb.x,rgb.y,rgb.z,255); } template <bool norm> __global__ void gpu_colorRamp3DGradient(uchar4 * colored, const float3 * grad, const int width, const int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; uchar4 & imgVal = colored[index]; float3 g = grad[index]; if (norm) { float len = sqrtf(g.x*g.x+g.y*g.y+g.z*g.z); g = make_float3(g.x/len,g.y/len,g.z/len); } imgVal = make_uchar4(clamp(128-128*g.x),clamp(128-128*g.y),clamp(128-128*g.z),255); } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- void colorRampHeatMap(uchar3 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_colorRampHeatMap<<<grid,block>>>(colored,vals,width,height,minVal,maxVal); } void colorRampHeatMap(uchar4 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_colorRampHeatMap<<<grid,block>>>(colored,vals,width,height,minVal,maxVal); } void colorRampHeatMapUnsat(uchar3 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_colorRampHeatMapUnsat<<<grid,block>>>(colored,vals,width,height,minVal,maxVal); } void colorRampHeatMapUnsat(uchar4 * colored, const float * vals, const int width, const int height, const float minVal, const float maxVal) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_colorRampHeatMapUnsat<<<grid,block>>>(colored,vals,width,height,minVal,maxVal); } void colorRampTopographic(uchar4 * colored, const float * vals, const int width, const int height, const float lineThickness, const float lineSpacing, const bool showZeroLevel) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); if (showZeroLevel) { gpu_colorRampTopographic<true><<<grid,block>>>(colored,vals,width,height,lineThickness,lineSpacing); } else { gpu_colorRampTopographic<false><<<grid,block>>>(colored,vals,width,height,lineThickness,lineSpacing); } } void colorRamp2DGradient(uchar4 * color, const float2 * grad, const int width, const int height, const bool normalize) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); if (normalize) { gpu_colorRamp2DGradient<true><<<grid,block>>>(color,grad,width,height); } else { gpu_colorRamp2DGradient<false><<<grid,block>>>(color,grad,width,height); } } void colorRamp3DGradient(uchar4 * color, const float3 * grad, const int width, const int height, const bool normalize) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); if (normalize) { gpu_colorRamp3DGradient<true><<<grid,block>>>(color,grad,width,height); } else { gpu_colorRamp3DGradient<false><<<grid,block>>>(color,grad,width,height); } } }
e7dac1a6dd941fb8ae39f1e0bc110c547ef05496.hip
// !!! This is a file automatically generated by hipify!!! /* * Software License Agreement (BSD License) * * Copyright (c) 2011, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author: Anatoly Baskeheev, Itseez Ltd, ([email protected]) */ #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include "internal.hpp" #include "hip/hip_runtime.h" using namespace std; using namespace thrust; namespace pcl { namespace device { struct InSphere { float x_, y_, z_, radius2_; InSphere(float x, float y, float z, float radius) : x_(x), y_(y), z_(z), radius2_(radius * radius) {} __device__ __host__ __forceinline__ bool operator()(const float3& point) const { float dx = point.x - x_; float dy = point.y - y_; float dz = point.z - z_; return (dx * dx + dy * dy + dz * dz) < radius2_; } __device__ __host__ __forceinline__ bool operator()(const float4& point) const { return (*this)(make_float3(point.x, point.y, point.z)); } }; } } #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION == 4000 //workaround of bug in Thrust typedef thrust::counting_iterator<int, thrust::use_default, thrust::use_default, thrust::use_default> It; template<> struct thrust::iterator_difference<It> { typedef int type; }; #endif void pcl::device::bruteForceRadiusSearch(const OctreeImpl::PointCloud& cloud, const OctreeImpl::PointType& query, float radius, DeviceArray<int>& result, DeviceArray<int>& buffer) { typedef OctreeImpl::PointType PointType; if (buffer.size() < cloud.size()) buffer.create(cloud.size()); InSphere cond(query.x, query.y, query.z, radius); device_ptr<const PointType> cloud_ptr((const PointType*)cloud.ptr()); device_ptr<int> res_ptr(buffer.ptr()); counting_iterator<int> first(0); counting_iterator<int> last = first + cloud.size(); //main bottle neck is a kernel call overhead/allocs //work time for 871k points ~0.8ms int count = (int)(thrust::copy_if(first, last, cloud_ptr, res_ptr, cond) - res_ptr); result = DeviceArray<int>(buffer.ptr(), count); }
e7dac1a6dd941fb8ae39f1e0bc110c547ef05496.cu
/* * Software License Agreement (BSD License) * * Copyright (c) 2011, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author: Anatoly Baskeheev, Itseez Ltd, ([email protected]) */ #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include "internal.hpp" #include "cuda.h" using namespace std; using namespace thrust; namespace pcl { namespace device { struct InSphere { float x_, y_, z_, radius2_; InSphere(float x, float y, float z, float radius) : x_(x), y_(y), z_(z), radius2_(radius * radius) {} __device__ __host__ __forceinline__ bool operator()(const float3& point) const { float dx = point.x - x_; float dy = point.y - y_; float dz = point.z - z_; return (dx * dx + dy * dy + dz * dz) < radius2_; } __device__ __host__ __forceinline__ bool operator()(const float4& point) const { return (*this)(make_float3(point.x, point.y, point.z)); } }; } } #if defined(CUDA_VERSION) && CUDA_VERSION == 4000 //workaround of bug in Thrust typedef thrust::counting_iterator<int, thrust::use_default, thrust::use_default, thrust::use_default> It; template<> struct thrust::iterator_difference<It> { typedef int type; }; #endif void pcl::device::bruteForceRadiusSearch(const OctreeImpl::PointCloud& cloud, const OctreeImpl::PointType& query, float radius, DeviceArray<int>& result, DeviceArray<int>& buffer) { typedef OctreeImpl::PointType PointType; if (buffer.size() < cloud.size()) buffer.create(cloud.size()); InSphere cond(query.x, query.y, query.z, radius); device_ptr<const PointType> cloud_ptr((const PointType*)cloud.ptr()); device_ptr<int> res_ptr(buffer.ptr()); counting_iterator<int> first(0); counting_iterator<int> last = first + cloud.size(); //main bottle neck is a kernel call overhead/allocs //work time for 871k points ~0.8ms int count = (int)(thrust::copy_if(first, last, cloud_ptr, res_ptr, cond) - res_ptr); result = DeviceArray<int>(buffer.ptr(), count); }
c42d9f304caa082f322daea7f88235f558eb9f5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/HIPApplyUtils.cuh> #ifdef WITH_CUDA #include "../box_iou_rotated/box_iou_rotated_utils.h" #endif // TODO avoid this when pytorch supports "same directory" hipification #ifdef WITH_HIP #include "box_iou_rotated/box_iou_rotated_utils.h" #endif using namespace detectron2; namespace { int const threadsPerBlock = sizeof(unsigned long long) * 8; } template <typename T> __global__ void nms_rotated_cuda_kernel( const int n_boxes, const float iou_threshold, const T* dev_boxes, unsigned long long* dev_mask) { // nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); // Compared to nms_cuda_kernel, where each box is represented with 4 values // (x1, y1, x2, y2), each rotated box is represented with 5 values // (x_center, y_center, width, height, angle_degrees) here. __shared__ T block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { // Instead of devIoU used by original horizontal nms, here // we use the single_box_iou_rotated function from box_iou_rotated_utils.h if (single_box_iou_rotated<T>(cur_box, block_boxes + i * 5) > iou_threshold) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } namespace detectron2 { at::Tensor nms_rotated_cuda( // input must be contiguous const at::Tensor& dets, const at::Tensor& scores, float iou_threshold) { // using scalar_t = float; AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); at::hip::HIPGuardMasqueradingAsCUDA device_guard(dets.device()); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto dets_sorted = dets.index_select(0, order_t); auto dets_num = dets.size(0); const int col_blocks = at::cuda::ATenCeilDiv(static_cast<int>(dets_num), threadsPerBlock); at::Tensor mask = at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES( dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] { hipLaunchKernelGGL(( nms_rotated_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dets_num, iou_threshold, dets_sorted.data_ptr<scalar_t>(), (unsigned long long*)mask.data_ptr<int64_t>()); }); at::Tensor mask_cpu = mask.to(at::kCPU); unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data_ptr<int64_t>(); int num_to_keep = 0; for (int i = 0; i < dets_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } AT_CUDA_CHECK(hipGetLastError()); return order_t.index( {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) .to(order_t.device(), keep.scalar_type())}); } } // namespace detectron2
c42d9f304caa082f322daea7f88235f558eb9f5c.cu
// Copyright (c) Facebook, Inc. and its affiliates. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #ifdef WITH_CUDA #include "../box_iou_rotated/box_iou_rotated_utils.h" #endif // TODO avoid this when pytorch supports "same directory" hipification #ifdef WITH_HIP #include "box_iou_rotated/box_iou_rotated_utils.h" #endif using namespace detectron2; namespace { int const threadsPerBlock = sizeof(unsigned long long) * 8; } template <typename T> __global__ void nms_rotated_cuda_kernel( const int n_boxes, const float iou_threshold, const T* dev_boxes, unsigned long long* dev_mask) { // nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); // Compared to nms_cuda_kernel, where each box is represented with 4 values // (x1, y1, x2, y2), each rotated box is represented with 5 values // (x_center, y_center, width, height, angle_degrees) here. __shared__ T block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { // Instead of devIoU used by original horizontal nms, here // we use the single_box_iou_rotated function from box_iou_rotated_utils.h if (single_box_iou_rotated<T>(cur_box, block_boxes + i * 5) > iou_threshold) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } namespace detectron2 { at::Tensor nms_rotated_cuda( // input must be contiguous const at::Tensor& dets, const at::Tensor& scores, float iou_threshold) { // using scalar_t = float; AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); at::cuda::CUDAGuard device_guard(dets.device()); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto dets_sorted = dets.index_select(0, order_t); auto dets_num = dets.size(0); const int col_blocks = at::cuda::ATenCeilDiv(static_cast<int>(dets_num), threadsPerBlock); at::Tensor mask = at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES( dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] { nms_rotated_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>( dets_num, iou_threshold, dets_sorted.data_ptr<scalar_t>(), (unsigned long long*)mask.data_ptr<int64_t>()); }); at::Tensor mask_cpu = mask.to(at::kCPU); unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data_ptr<int64_t>(); int num_to_keep = 0; for (int i = 0; i < dets_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } AT_CUDA_CHECK(cudaGetLastError()); return order_t.index( {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) .to(order_t.device(), keep.scalar_type())}); } } // namespace detectron2
d15d4a782994e75a023b374f3add5315454ec3ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <torch/extension.h> #include <ATen/ATen.h> #include "fast_lsh_cumulation.h" #include "fast_lsh_cumulation_cuda.h" #include "common_cuda.h" #include "common.h" #include <vector> ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// std::vector<at::Tensor> fast_hash_ver1_kernel( at::Tensor query_mask, at::Tensor query_vector, at::Tensor key_mask, at::Tensor key_vector, int num_hash_f, int hash_code_len, bool use_cuda ) { int batch_size = query_vector.size(0); int num_query = query_vector.size(1); int num_key = key_vector.size(1); int vector_dim = query_vector.size(2); int num_hash_per_part = vector_dim / hash_code_len; int num_part = max(1, ceil_divide(num_hash_f, num_hash_per_part)); at::Tensor Dmat = 2 * at::randint(0, 2, {batch_size, 3, num_part, vector_dim}, query_mask.options()) - 1; at::Tensor query_hash_code = at::zeros({batch_size, num_query, num_hash_f}, query_mask.options()); at::Tensor key_hash_code = at::zeros({batch_size, num_key, num_hash_f}, key_mask.options()); int *query_mask_ptr = query_mask.data_ptr<int>(); float *query_vector_ptr = query_vector.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); float *key_vector_ptr = key_vector.data_ptr<float>(); int *Dmat_ptr = Dmat.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); if (use_cuda) { { dim3 threads(vector_dim); dim3 blocks(num_part, num_query, batch_size); int shared_mem = vector_dim * sizeof(float); hipLaunchKernelGGL(( fast_hash_ver1_cuda_kernel), dim3(blocks), dim3(threads), shared_mem, 0, query_mask_ptr, query_vector_ptr, Dmat_ptr, query_hash_code_ptr, batch_size, num_query, vector_dim, num_part, num_hash_f, hash_code_len ); } { dim3 threads(vector_dim); dim3 blocks(num_part, num_key, batch_size); int shared_mem = vector_dim * sizeof(float); hipLaunchKernelGGL(( fast_hash_ver1_cuda_kernel), dim3(blocks), dim3(threads), shared_mem, 0, key_mask_ptr, key_vector_ptr, Dmat_ptr, key_hash_code_ptr, batch_size, num_key, vector_dim, num_part, num_hash_f, hash_code_len ); } } return {query_hash_code, key_hash_code}; } at::Tensor lsh_cumulation_ver1_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); at::Tensor hashtable_value = at::empty({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int threads_x = WARP_SIZE; int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE; int block_x_step1 = num_key / threads_y; int block_x_step2 = num_query / threads_y; int block_y = batch_size; dim3 threads(threads_x, threads_y); dim3 blocks_step1(block_x_step1, block_y); dim3 blocks_step2(block_x_step2, block_y); int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *value_ptr = value.data_ptr<float>(); float *hashtable_value_ptr = hashtable_value.data_ptr<float>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { hipMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float)); hipLaunchKernelGGL(( lsh_cumulation_ver1_step1_cuda_kernel), dim3(blocks_step1), dim3(threads), 0, 0, key_mask_ptr, key_hash_code_ptr, value_ptr, hashtable_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_key, value_dim, value_offset ); hipLaunchKernelGGL(( lsh_cumulation_ver1_step2_cuda_kernel), dim3(blocks_step2), dim3(threads), 0, 0, query_mask_ptr, query_hash_code_ptr, hashtable_value_ptr, cumulation_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_query, value_dim, value_offset ); } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver1_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor hashtable_value = at::zeros({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int threads_x = WARP_SIZE; int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE; int block_x_step1 = num_key / threads_y; int block_x_step2 = num_query / threads_y; int block_y = batch_size; dim3 threads(threads_x, threads_y); dim3 blocks_step1(block_x_step1, block_y); dim3 blocks_step2(block_x_step2, block_y); int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); float *hashtable_value_ptr = hashtable_value.data_ptr<float>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { for (int weight_idx = 0; weight_idx < weight_dim; weight_idx++) { hipMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float)); hipLaunchKernelGGL(( lsh_weighted_cumulation_ver1_step1_cuda_kernel), dim3(blocks_step1), dim3(threads), 0, 0, key_mask_ptr, key_hash_code_ptr, key_weight_ptr, value_ptr, hashtable_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_key, value_dim, weight_dim, value_offset, weight_idx ); hipLaunchKernelGGL(( lsh_weighted_cumulation_ver1_step2_cuda_kernel), dim3(blocks_step2), dim3(threads), 0, 0, query_mask_ptr, query_hash_code_ptr, query_weight_ptr, hashtable_value_ptr, cumulation_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_query, value_dim, weight_dim, value_offset, weight_idx ); } } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver2_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); at::Tensor key_sorted_idxes = at::zeros({batch_size, num_hash_f, num_key}, query_hash_code.options()); at::Tensor query_info = at::zeros({batch_size, num_query, 2, num_hash_f}, query_hash_code.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); int *count_sort_table_ptr = count_sort_table.data_ptr<int>(); int *key_sorted_idxes_ptr = key_sorted_idxes.data_ptr<int>(); int *query_info_ptr = query_info.data_ptr<int>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); { dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks_step13(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); dim3 blocks_step2(num_hash_f, batch_size); int shared_mem = hashtable_capacity * sizeof(float); hipLaunchKernelGGL(( count_sort_step1_cuda_kernel), dim3(blocks_step13), dim3(threads_step13), 0, 0, key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); hipLaunchKernelGGL(( count_sort_step2_cuda_kernel), dim3(blocks_step2), dim3(threads_step2), shared_mem, 0, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity ); hipLaunchKernelGGL(( count_sort_step3_cuda_kernel), dim3(blocks_step13), dim3(threads_step13), 0, 0, key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, key_sorted_idxes_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); } { dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); hipLaunchKernelGGL(( extract_query_info_cuda_kernel), dim3(blocks), dim3(threads), 0, 0, query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, query_info_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); } { dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); dim3 blocks(num_query, num_hash_f, batch_size); int shared_mem = (weight_dim + WARP_SIZE) * sizeof(float); hipLaunchKernelGGL(( lsh_weighted_cumulation_ver2_step2_cuda_kernel), dim3(blocks), dim3(threads), shared_mem, 0, query_mask_ptr, query_info_ptr, key_sorted_idxes_ptr, query_weight_ptr, key_weight_ptr, value_ptr, cumulation_value_ptr, batch_size, num_hash_f, num_query, num_key, value_dim, weight_dim ); } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver3_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options()); at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); int *count_sort_table_ptr = count_sort_table.data_ptr<int>(); int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>(); int *key_info_ptr = key_info.data_ptr<int>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); { dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); dim3 blocks_step2(num_hash_f, batch_size); int shared_mem = hashtable_capacity * sizeof(float); hipLaunchKernelGGL(( count_sort_step1_cuda_kernel), dim3(blocks_step13), dim3(threads_step13), 0, 0, query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); hipLaunchKernelGGL(( count_sort_step2_cuda_kernel), dim3(blocks_step2), dim3(threads_step2), shared_mem, 0, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity ); hipLaunchKernelGGL(( count_sort_step3_cuda_kernel), dim3(blocks_step13), dim3(threads_step13), 0, 0, query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, query_sorted_idxes_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); } { dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); hipLaunchKernelGGL(( extract_query_info_cuda_kernel), dim3(blocks), dim3(threads), 0, 0, key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, key_info_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); } { dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); dim3 blocks(num_key, num_hash_f, batch_size); int shared_mem = (weight_dim + value_dim + WARP_SIZE) * sizeof(float); hipLaunchKernelGGL(( lsh_weighted_cumulation_ver3_step2_cuda_kernel), dim3(blocks), dim3(threads), shared_mem, 0, query_sorted_idxes_ptr, key_mask_ptr, key_info_ptr, query_weight_ptr, key_weight_ptr, value_ptr, cumulation_value_ptr, batch_size, num_hash_f, num_query, num_key, value_dim, weight_dim ); } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver4_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options()); at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); int *count_sort_table_ptr = count_sort_table.data_ptr<int>(); int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>(); int *key_info_ptr = key_info.data_ptr<int>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); { dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); dim3 blocks_step2(num_hash_f, batch_size); int shared_mem = hashtable_capacity * sizeof(float); hipLaunchKernelGGL(( count_sort_step1_cuda_kernel), dim3(blocks_step13), dim3(threads_step13), 0, 0, query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); hipLaunchKernelGGL(( count_sort_step2_cuda_kernel), dim3(blocks_step2), dim3(threads_step2), shared_mem, 0, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity ); hipLaunchKernelGGL(( count_sort_step3_cuda_kernel), dim3(blocks_step13), dim3(threads_step13), 0, 0, query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, query_sorted_idxes_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); } { dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); hipLaunchKernelGGL(( extract_query_info_cuda_kernel), dim3(blocks), dim3(threads), 0, 0, key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, key_info_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); } { dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); dim3 blocks(num_key, batch_size); int shared_mem = (weight_dim + value_dim + 2 * num_hash_f) * sizeof(float); hipLaunchKernelGGL(( lsh_weighted_cumulation_ver4_step2_cuda_kernel), dim3(blocks), dim3(threads), shared_mem, 0, query_sorted_idxes_ptr, key_mask_ptr, key_info_ptr, query_weight_ptr, key_weight_ptr, value_ptr, cumulation_value_ptr, batch_size, num_hash_f, num_query, num_key, value_dim, weight_dim ); } } return cumulation_value; }
d15d4a782994e75a023b374f3add5315454ec3ae.cu
#include <torch/extension.h> #include <ATen/ATen.h> #include "fast_lsh_cumulation.h" #include "fast_lsh_cumulation_cuda.h" #include "common_cuda.h" #include "common.h" #include <vector> ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// std::vector<at::Tensor> fast_hash_ver1_kernel( at::Tensor query_mask, at::Tensor query_vector, at::Tensor key_mask, at::Tensor key_vector, int num_hash_f, int hash_code_len, bool use_cuda ) { int batch_size = query_vector.size(0); int num_query = query_vector.size(1); int num_key = key_vector.size(1); int vector_dim = query_vector.size(2); int num_hash_per_part = vector_dim / hash_code_len; int num_part = max(1, ceil_divide(num_hash_f, num_hash_per_part)); at::Tensor Dmat = 2 * at::randint(0, 2, {batch_size, 3, num_part, vector_dim}, query_mask.options()) - 1; at::Tensor query_hash_code = at::zeros({batch_size, num_query, num_hash_f}, query_mask.options()); at::Tensor key_hash_code = at::zeros({batch_size, num_key, num_hash_f}, key_mask.options()); int *query_mask_ptr = query_mask.data_ptr<int>(); float *query_vector_ptr = query_vector.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); float *key_vector_ptr = key_vector.data_ptr<float>(); int *Dmat_ptr = Dmat.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); if (use_cuda) { { dim3 threads(vector_dim); dim3 blocks(num_part, num_query, batch_size); int shared_mem = vector_dim * sizeof(float); fast_hash_ver1_cuda_kernel<<<blocks, threads, shared_mem>>>( query_mask_ptr, query_vector_ptr, Dmat_ptr, query_hash_code_ptr, batch_size, num_query, vector_dim, num_part, num_hash_f, hash_code_len ); } { dim3 threads(vector_dim); dim3 blocks(num_part, num_key, batch_size); int shared_mem = vector_dim * sizeof(float); fast_hash_ver1_cuda_kernel<<<blocks, threads, shared_mem>>>( key_mask_ptr, key_vector_ptr, Dmat_ptr, key_hash_code_ptr, batch_size, num_key, vector_dim, num_part, num_hash_f, hash_code_len ); } } return {query_hash_code, key_hash_code}; } at::Tensor lsh_cumulation_ver1_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); at::Tensor hashtable_value = at::empty({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int threads_x = WARP_SIZE; int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE; int block_x_step1 = num_key / threads_y; int block_x_step2 = num_query / threads_y; int block_y = batch_size; dim3 threads(threads_x, threads_y); dim3 blocks_step1(block_x_step1, block_y); dim3 blocks_step2(block_x_step2, block_y); int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *value_ptr = value.data_ptr<float>(); float *hashtable_value_ptr = hashtable_value.data_ptr<float>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float)); lsh_cumulation_ver1_step1_cuda_kernel<<<blocks_step1, threads>>>( key_mask_ptr, key_hash_code_ptr, value_ptr, hashtable_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_key, value_dim, value_offset ); lsh_cumulation_ver1_step2_cuda_kernel<<<blocks_step2, threads>>>( query_mask_ptr, query_hash_code_ptr, hashtable_value_ptr, cumulation_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_query, value_dim, value_offset ); } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver1_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor hashtable_value = at::zeros({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int threads_x = WARP_SIZE; int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE; int block_x_step1 = num_key / threads_y; int block_x_step2 = num_query / threads_y; int block_y = batch_size; dim3 threads(threads_x, threads_y); dim3 blocks_step1(block_x_step1, block_y); dim3 blocks_step2(block_x_step2, block_y); int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); float *hashtable_value_ptr = hashtable_value.data_ptr<float>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { for (int weight_idx = 0; weight_idx < weight_dim; weight_idx++) { cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float)); lsh_weighted_cumulation_ver1_step1_cuda_kernel<<<blocks_step1, threads>>>( key_mask_ptr, key_hash_code_ptr, key_weight_ptr, value_ptr, hashtable_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_key, value_dim, weight_dim, value_offset, weight_idx ); lsh_weighted_cumulation_ver1_step2_cuda_kernel<<<blocks_step2, threads>>>( query_mask_ptr, query_hash_code_ptr, query_weight_ptr, hashtable_value_ptr, cumulation_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_query, value_dim, weight_dim, value_offset, weight_idx ); } } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver2_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); at::Tensor key_sorted_idxes = at::zeros({batch_size, num_hash_f, num_key}, query_hash_code.options()); at::Tensor query_info = at::zeros({batch_size, num_query, 2, num_hash_f}, query_hash_code.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); int *count_sort_table_ptr = count_sort_table.data_ptr<int>(); int *key_sorted_idxes_ptr = key_sorted_idxes.data_ptr<int>(); int *query_info_ptr = query_info.data_ptr<int>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); { dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks_step13(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); dim3 blocks_step2(num_hash_f, batch_size); int shared_mem = hashtable_capacity * sizeof(float); count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>( key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>( count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity ); count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>( key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, key_sorted_idxes_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); } { dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); extract_query_info_cuda_kernel<<<blocks, threads>>>( query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, query_info_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); } { dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); dim3 blocks(num_query, num_hash_f, batch_size); int shared_mem = (weight_dim + WARP_SIZE) * sizeof(float); lsh_weighted_cumulation_ver2_step2_cuda_kernel<<<blocks, threads, shared_mem>>>( query_mask_ptr, query_info_ptr, key_sorted_idxes_ptr, query_weight_ptr, key_weight_ptr, value_ptr, cumulation_value_ptr, batch_size, num_hash_f, num_query, num_key, value_dim, weight_dim ); } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver3_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options()); at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); int *count_sort_table_ptr = count_sort_table.data_ptr<int>(); int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>(); int *key_info_ptr = key_info.data_ptr<int>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); { dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); dim3 blocks_step2(num_hash_f, batch_size); int shared_mem = hashtable_capacity * sizeof(float); count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>( query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>( count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity ); count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>( query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, query_sorted_idxes_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); } { dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); extract_query_info_cuda_kernel<<<blocks, threads>>>( key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, key_info_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); } { dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); dim3 blocks(num_key, num_hash_f, batch_size); int shared_mem = (weight_dim + value_dim + WARP_SIZE) * sizeof(float); lsh_weighted_cumulation_ver3_step2_cuda_kernel<<<blocks, threads, shared_mem>>>( query_sorted_idxes_ptr, key_mask_ptr, key_info_ptr, query_weight_ptr, key_weight_ptr, value_ptr, cumulation_value_ptr, batch_size, num_hash_f, num_query, num_key, value_dim, weight_dim ); } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver4_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options()); at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); int *count_sort_table_ptr = count_sort_table.data_ptr<int>(); int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>(); int *key_info_ptr = key_info.data_ptr<int>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); { dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); dim3 blocks_step2(num_hash_f, batch_size); int shared_mem = hashtable_capacity * sizeof(float); count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>( query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>( count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity ); count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>( query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, query_sorted_idxes_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); } { dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); extract_query_info_cuda_kernel<<<blocks, threads>>>( key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, key_info_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); } { dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); dim3 blocks(num_key, batch_size); int shared_mem = (weight_dim + value_dim + 2 * num_hash_f) * sizeof(float); lsh_weighted_cumulation_ver4_step2_cuda_kernel<<<blocks, threads, shared_mem>>>( query_sorted_idxes_ptr, key_mask_ptr, key_info_ptr, query_weight_ptr, key_weight_ptr, value_ptr, cumulation_value_ptr, batch_size, num_hash_f, num_query, num_key, value_dim, weight_dim ); } } return cumulation_value; }
d6d0b6eb7a0745b31069e4afc907215e8247f3c1.hip
// !!! This is a file automatically generated by hipify!!! #include <map> #include <string> #include <hip/hip_runtime.h> #include <fstream> #include "file_manager.h" #include "simulation_structs.h" #include "helpers_math.h" #include "helpers.h" __device__ int3 calculate_3d_idx(double3 position, double3 cell_length, int3 cells_dim, double3 boundary_min) { int3 particle_idx = double3ToInt3((position - boundary_min) / cell_length); particle_idx.x = particle_idx.x % (int)cells_dim.x; particle_idx.y = particle_idx.y % (int)cells_dim.y; particle_idx.z = particle_idx.z % (int)cells_dim.z; return particle_idx; } __global__ void binning_cell_parallel(Particle* particles, int* cell_list, int* particle_list, size_t N, size_t num_particles, double3 boundary_min, double3 boundary_max, double3 cells_dim, double3 cell_length) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { // reset list cell_list[tid] = -1; // calculate cell coordinate from id // (determines index order in cell_list) int z_idx = tid / (cells_dim.x * cells_dim.y); int tid_z = tid - (z_idx * cells_dim.x * cells_dim.y); int y_idx = tid_z / cells_dim.x; int x_idx = tid_z % (int)cells_dim.x; int3 current_cell = make_int3(x_idx, y_idx, z_idx); for (int i = 0; i < num_particles; i++) { int3 cell = calculate_3d_idx(particles[i].pos, cell_length, double3ToInt3(cells_dim), boundary_min); if (cell == current_cell) { particle_list[i] = cell_list[tid]; cell_list[tid] = i; } } } } __global__ void reset_binning_particle_parallel(int* cell_list, size_t N) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { // reset list cell_list[tid] = -1; } } __device__ int3 boundary_conditions(int3 neighbor_cell_idx, int3 cells_dim) { // boundary conditions if (neighbor_cell_idx.x < 0) { neighbor_cell_idx.x += cells_dim.x; } if (neighbor_cell_idx.y < 0) { neighbor_cell_idx.y += cells_dim.y; } if (neighbor_cell_idx.z < 0) { neighbor_cell_idx.z += cells_dim.z; } if (neighbor_cell_idx.x >= cells_dim.x) { neighbor_cell_idx.x -= cells_dim.x; } if (neighbor_cell_idx.y >= cells_dim.y) { neighbor_cell_idx.y -= cells_dim.y; } if (neighbor_cell_idx.z >= cells_dim.z) { neighbor_cell_idx.z -= cells_dim.z; } return neighbor_cell_idx; } __global__ void binning_particle_parallel(Particle* particles, int* cell_list, int* particle_list, size_t N, size_t num_particles, double3 boundary_min, double3 boundary_max, int3 cells_dim, double3 cell_length) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < num_particles) { Particle& particle = particles[tid]; double3 domain = boundary_max - boundary_min; int3 particle_cell_idx = calculate_3d_idx(particle.pos, cell_length, cells_dim, boundary_min); // make flat index out of int3 index int particle_flat_cell_idx = particle_cell_idx.x + particle_cell_idx.y * cells_dim.x + particle_cell_idx.z * cells_dim.x * cells_dim.y; /* atomic: particle_list[tid] = cell_list[particle_flat_cell_idx]; cell_list[particle_flat_cell_idx] = tid; */ int old = atomicExch(&cell_list[particle_flat_cell_idx], tid); particle_list[tid] = old; } } __global__ void pre_integration(Particle* particles, double3* force, double deltaTime, size_t N, double3 boundary_min, double3 boundary_max) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { Particle& particle = particles[tid]; double3 position = particle.pos + deltaTime * particle.vel + (force[tid] * deltaTime * deltaTime) / (2 * particle.mass); double3 domain = boundary_max - boundary_min; //check if particles left the domain position.x = (position.x < boundary_min.x) ? boundary_max.x - fmod(abs(position.x), abs(domain.x)) : position.x; position.x = (position.x > boundary_max.x) ? boundary_min.x + fmod(abs(position.x), abs(domain.x)) : position.x; position.y = (position.y < boundary_min.y) ? boundary_max.y - fmod(abs(position.y), abs(domain.y)) : position.y; position.y = (position.y > boundary_max.y) ? boundary_min.y + fmod(abs(position.y), abs(domain.y)) : position.y; position.z = (position.z < boundary_min.z) ? boundary_max.z - fmod(abs(position.z), abs(domain.z)) : position.z; position.z = (position.z > boundary_max.z) ? boundary_min.z + fmod(abs(position.z), abs(domain.z)) : position.z; particle.pos = position; particle.vel += force[tid] * deltaTime / (2 * particle.mass); } } __global__ void post_integration(Particle* particles, double3* force, double deltaTime, size_t N) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { Particle& particle = particles[tid]; particle.vel += force[tid] * deltaTime / (2 * particle.mass); } } __device__ double3 calculate_force(Particle& particle, Particle& neighbor_particle, double3 domain, double eps, double s6, double r_cut) { double3 force_tmp{ 0.0, 0.0, 0.0 }; double3 dist = particle.pos - neighbor_particle.pos; // correct dist as maximal distance is now 0.5*domain double3 correction = make_double3(domain.x * round(dist.x / domain.x), domain.y * round(dist.y / domain.y), domain.z * round(dist.z / domain.z)); dist = dist - correction; double dist_norm_sq = norm_sq(dist); // use square norm for optimization if (dist_norm_sq <= r_cut * r_cut) { double t = 1.0 / dist_norm_sq; double f1 = 24 * eps * t; // multiply t instead of divide by dist_norm_sq double f2 = s6 * t * t * t; double f3 = 2 * f2 - 1; force_tmp += f1 * f2 * f3 * dist; } return force_tmp; } __global__ void calculate_forces_brute_force(Particle* particles, double3* force, double eps, double s6, size_t N, double3 boundary_min, double3 boundary_max, double r_cut) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { Particle& particle = particles[tid]; double3 force_tmp{ 0.0, 0.0, 0.0 }; double3 domain = boundary_max - boundary_min; for (int i = 0; i < N; i++) { if (i == tid) continue; // do force calculation force_tmp += calculate_force(particle, particles[i], domain, eps, s6, r_cut); } force[tid] = force_tmp; } } __global__ void calculate_forces_particle_binning(Particle* particles, double3* force, double eps, double s6, size_t N, double3 boundary_min, double3 boundary_max, double r_cut, double3 cell_length, int3 cells_dim, int* cell_list, int* particles_list) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { Particle& particle = particles[tid]; double3 force_tmp{ 0.0, 0.0, 0.0 }; double3 domain = boundary_max - boundary_min; int3 particle_cell_idx = calculate_3d_idx(particle.pos, cell_length, cells_dim, boundary_min); for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { for (int z = -1; z <= 1; z++) { int3 neighbor_cell_idx = particle_cell_idx + make_int3(x, y, z); // boundary conditions neighbor_cell_idx = boundary_conditions(neighbor_cell_idx, cells_dim); // make flat index out of int3 index int neighbor_flat_cell_idx = neighbor_cell_idx.x + neighbor_cell_idx.y * cells_dim.x + neighbor_cell_idx.z * cells_dim.x * cells_dim.y; int neighbor_particle_idx = cell_list[neighbor_flat_cell_idx]; while (neighbor_particle_idx != -1) { //check if particle_idx is this particle if (neighbor_particle_idx == tid) { neighbor_particle_idx = particles_list[neighbor_particle_idx]; continue; } Particle neighbor_particle = particles[neighbor_particle_idx]; // do force calculation force_tmp += calculate_force(particle, neighbor_particle, domain, eps, s6, r_cut); // follow linked list neighbor_particle_idx = particles_list[neighbor_particle_idx]; } } } } force[tid] = force_tmp; } } __global__ void calculate_forces_cell_binning(Particle* particles, double3* force, double eps, double s6, size_t N, double3 boundary_min, double3 boundary_max, double r_cut, double3 cell_length, int3 cells_dim, int* cell_list, int* particles_list, size_t num_particles) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { int first_cell_particle_idx = cell_list[tid]; // no particles in this cell if (first_cell_particle_idx == -1) { return; } double3 domain = boundary_max - boundary_min; int z_idx = tid / (cells_dim.x * cells_dim.y); int tid_z = tid - (z_idx * cells_dim.x * cells_dim.y); int y_idx = tid_z / cells_dim.x; int x_idx = tid_z % (int)cells_dim.x; int3 current_cell = make_int3(x_idx, y_idx, z_idx); // reset force for each particle in the cell int cell_particle_idx = first_cell_particle_idx; while (cell_particle_idx != -1) { force[cell_particle_idx] = { 0,0,0 }; cell_particle_idx = particles_list[cell_particle_idx]; } // iterate over all neighbor cells for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { for (int z = -1; z <= 1; z++) { int3 neighbor_cell_idx = current_cell + make_int3(x, y, z); // check boundary conditions neighbor_cell_idx = boundary_conditions(neighbor_cell_idx, cells_dim); // make flat index out of int3 index int neighbor_flat_cell_idx = neighbor_cell_idx.x + neighbor_cell_idx.y * cells_dim.x + neighbor_cell_idx.z * cells_dim.x * cells_dim.y; int neighbor_particle_idx = cell_list[neighbor_flat_cell_idx]; cell_particle_idx = first_cell_particle_idx; int first_neighbor_cell_particle_idx = neighbor_particle_idx; //iterate over all particles in cell_particle_idx while (cell_particle_idx != -1) { Particle& particle = particles[cell_particle_idx]; // reset neighbor_particle_idx with first particle of the neighbor cell neighbor_particle_idx = first_neighbor_cell_particle_idx; // iterate over all neighbor particles while (neighbor_particle_idx != -1) { Particle neighbor_particle = particles[neighbor_particle_idx]; //check if particle_idx is the current particle if (neighbor_particle_idx == cell_particle_idx) { neighbor_particle_idx = particles_list[neighbor_particle_idx]; continue; } // do force calculation force[cell_particle_idx] += calculate_force(particle, neighbor_particle, domain, eps, s6, r_cut); // move on to next neighbor particle in this neighbor cell neighbor_particle_idx = particles_list[neighbor_particle_idx]; } // follow linked list cell_particle_idx = particles_list[cell_particle_idx]; } } } } } } __global__ void calculate_forces_neighbor_lists(Particle* particles, double3* force, double eps, double s6, size_t N, double3 domain, double r_cut, double3 cell_length, int3 cells_dim, int* cell_list, int* particles_list, int neighborlist_length, int* neighborlists, int* num_neighs) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { Particle& particle = particles[tid]; double3 force_tmp{ 0.0, 0.0, 0.0 }; for (int i = 0; i < num_neighs[tid]; i++) { int neighbor_particle_idx = neighborlists[tid * neighborlist_length + i]; Particle neighbor_particle = particles[neighbor_particle_idx]; // do force calculation force_tmp += calculate_force(particle, neighbor_particle, domain, eps, s6, r_cut); } force[tid] = force_tmp; } } __global__ void neighbor_lists(Particle* particles, int* cell_list, int* particle_list, size_t N, double3 boundary_min, double3 boundary_max, int3 cells_dim, double3 cell_length, int neighborlist_length, int* neighborlists, int* num_neighs, int r_cut, int r_skin) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { int index = 0; Particle& particle = particles[tid]; double3 domain = boundary_max - boundary_min; int3 particle_cell_idx = calculate_3d_idx(particle.pos, cell_length, cells_dim, boundary_min); for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { for (int z = -1; z <= 1; z++) { int3 neighbor_cell_idx = particle_cell_idx + make_int3(x, y, z); // boundary conditions neighbor_cell_idx = boundary_conditions(neighbor_cell_idx, cells_dim); // make flat index out of int3 index int neighbor_flat_cell_idx = neighbor_cell_idx.x + neighbor_cell_idx.y * cells_dim.x + neighbor_cell_idx.z * cells_dim.x * cells_dim.y; int neighbor_particle_idx = cell_list[neighbor_flat_cell_idx]; while (neighbor_particle_idx != -1) { //check if particle_idx is this particle if (neighbor_particle_idx == tid) { neighbor_particle_idx = particle_list[neighbor_particle_idx]; continue; } // skin: Particle neighbor_particle = particles[neighbor_particle_idx]; double3 dist = particle.pos - neighbor_particle.pos; double3 correction = make_double3(domain.x * round(dist.x / domain.x), domain.y * round(dist.y / domain.y), domain.z * round(dist.z / domain.z)); dist = dist - correction; double dist_norm = norm(dist); if (dist_norm <= r_cut + r_skin) { // add to neighbor list for current particle neighborlists[tid * neighborlist_length + index] = neighbor_particle_idx; index++; // check if neighbor list is full if (index == neighborlist_length) goto Lbreak_loops; } // follow linked list neighbor_particle_idx = particle_list[neighbor_particle_idx]; } } } } Lbreak_loops: num_neighs[tid] = index; } } int main() { FileManager file_manager("./input", "./output", "blocks_big.par"); Parameters params = file_manager.readParams(); std::vector<Particle> particles = file_manager.readInitialState(params.part_input_file, params.boundary_min, params.boundary_max); size_t num_particles = particles.size(); double3 domain = params.boundary_max - params.boundary_min; double s6 = pow(params.sigma, 6); // binning structures std::vector<int> particle_list(num_particles, -1); size_t num_cells = params.cells_dim.x * params.cells_dim.y * params.cells_dim.z; double3 cell_length = domain / params.cells_dim; std::vector<int> cell_list(num_cells, -1); // Allocate memory on device Particle* d_particles; double3 *d_force; int *d_particle_list, *d_cell_list; size_t bytes_vec = sizeof(double3) * num_particles; size_t bytes_struct = sizeof(Particle) * num_particles; size_t bytes_particle_list = sizeof(int) * num_particles; size_t bytes_cell_list = sizeof(int) * num_cells; checkError(hipMalloc((void**)&d_particle_list, bytes_particle_list)); checkError(hipMalloc((void**)&d_cell_list, bytes_cell_list)); checkError(hipMalloc((void**)&d_particles, bytes_struct)); checkError(hipMalloc(&d_force, bytes_vec)); // Copy data to GPU checkError(hipMemcpy(d_particles, particles.data(), bytes_struct, hipMemcpyHostToDevice)); checkError(hipMemcpy(d_particle_list, particle_list.data(), bytes_particle_list, hipMemcpyHostToDevice)); checkError(hipMemcpy(d_cell_list, cell_list.data(), bytes_cell_list, hipMemcpyHostToDevice)); dim3 threads_per_block(params.threads_per_block); dim3 numBlocks((num_particles + threads_per_block.x - 1) / threads_per_block.x); dim3 numBlocksCell((num_cells + threads_per_block.x - 1) / threads_per_block.x); //binning_cell_parallel<<<numBlocksCell, threads_per_block>>>(d_particles, d_cell_list, d_particle_list, num_cells, num_particles, params.boundary_min, params.boundary_max, params.cells_dim, cell_length); // OR hipLaunchKernelGGL(( reset_binning_particle_parallel), dim3(numBlocksCell), dim3(threads_per_block), 0, 0, d_cell_list, num_cells); hipLaunchKernelGGL(( binning_particle_parallel), dim3(numBlocks), dim3(threads_per_block), 0, 0, d_particles, d_cell_list, d_particle_list, num_cells, num_particles, params.boundary_min, params.boundary_max, double3ToInt3(params.cells_dim), cell_length); checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); // DEBUG binning /*checkError(hipMemcpy(particle_list.data(), d_particle_list, bytes_particle_list, hipMemcpyDeviceToHost)); checkError(hipMemcpy(cell_list.data(), d_cell_list, bytes_cell_list, hipMemcpyDeviceToHost)); for (int i = 0; i < cell_list.size(); i++) { if (cell_list.at(i) == -1) continue; std::cout << "Cell " << i << ": " << cell_list.at(i) << std::endl; } for (int i = 0; i < particle_list.size(); i++) { std::cout << "Particle " << i << ": " << particle_list.at(i) << std::endl; }*/ int *d_neighborlists, *d_num_neighs; checkError(hipMalloc((void**)&d_neighborlists, sizeof(int) * num_particles * params.neighborlist_length)); checkError(hipMalloc((void**)&d_num_neighs, sizeof(int) * num_particles)); hipLaunchKernelGGL(( neighbor_lists), dim3(numBlocks), dim3(threads_per_block), 0, 0, d_particles, d_cell_list, d_particle_list, num_particles, params.boundary_min, params.boundary_max, double3ToInt3(params.cells_dim), cell_length, params.neighborlist_length, d_neighborlists, d_num_neighs, params.r_cut, params.r_skin); checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); // Initialize forces hipLaunchKernelGGL(( calculate_forces_brute_force), dim3(numBlocks), dim3(threads_per_block), 0, 0, d_particles, d_force, params.eps, s6, num_particles, params.boundary_min, params.boundary_max, params.r_cut); checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); std::cout << "Simulation started ..." << std::endl; size_t iterations = (size_t) (params.time_end / params.time_step + 1); for (size_t i = 0; i < iterations; i++) { // Save simulation states at given frequncies if (i % params.part_out_freq == 0) { checkError(hipMemcpy(particles.data(), d_particles, bytes_struct, hipMemcpyDeviceToHost)); file_manager.bufferOutput(particles); } if (i % params.vtk_out_freq == 0) { checkError(hipMemcpy(particles.data(), d_particles, bytes_struct, hipMemcpyDeviceToHost)); file_manager.bufferVTK(particles); } // Verlet scheme: Integrate position, calculate forces, and integrate velocity hipLaunchKernelGGL(( pre_integration), dim3(numBlocks), dim3(threads_per_block), 0, 0, d_particles, d_force, params.time_step, num_particles, params.boundary_min, params.boundary_max); checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); if (i % params.bin_freq == 0) { hipLaunchKernelGGL(( binning_cell_parallel), dim3(numBlocksCell), dim3(threads_per_block), 0, 0, d_particles, d_cell_list, d_particle_list, num_cells, num_particles, params.boundary_min, params.boundary_max, params.cells_dim, cell_length); // OR //reset_binning_particle_parallel << <numBlocksCell, threads_per_block >> > (d_particles, d_cell_list, d_particle_list, num_cells, num_particles, params.boundary_min, params.boundary_max, params.cells_dim, cell_length); //binning_particle_parallel << <numBlocks, threads_per_block >> > (d_particles, d_cell_list, d_particle_list, num_cells, num_particles, params.boundary_min, params.boundary_max, double3ToInt3(params.cells_dim), cell_length); checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); neighbor_lists << <numBlocks, threads_per_block >> > (d_particles, d_cell_list, d_particle_list, num_particles, params.boundary_min, params.boundary_max, double3ToInt3(params.cells_dim), cell_length, params.neighborlist_length, d_neighborlists, d_num_neighs, params.r_cut, params.r_skin); checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); } // choose one: //calculate_forces_brute_force<<<numBlocks, threads_per_block>>>(d_particles, d_force, params.eps, s6, num_particles, params.boundary_min, params.boundary_max, params.r_cut); //calculate_forces_particle_binning<<<numBlocks, threads_per_block>>>(d_particles, d_force, params.eps, s6, num_particles, params.boundary_min, params.boundary_max, params.r_cut, cell_length, double3ToInt3(params.cells_dim), d_cell_list, d_particle_list); hipLaunchKernelGGL(( calculate_forces_cell_binning), dim3(numBlocksCell), dim3(threads_per_block), 0, 0, d_particles, d_force, params.eps, s6, num_cells, params.boundary_min, params.boundary_max, params.r_cut, cell_length, double3ToInt3(params.cells_dim), d_cell_list, d_particle_list, num_particles); //calculate_forces_neighbor_lists<<<numBlocks, threads_per_block>>>(d_particles, d_force, params.eps, s6, num_particles, domain, params.r_cut, cell_length, double3ToInt3(params.cells_dim), d_cell_list, d_particle_list, params.neighborlist_length, d_neighborlists, d_num_neighs); checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); hipLaunchKernelGGL(( post_integration), dim3(numBlocks), dim3(threads_per_block), 0, 0, d_particles, d_force, params.time_step, num_particles); checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); } std::cout << "Simulation finished!" << std::endl; file_manager.write(params.part_out_name, params.vtk_out_name); checkError(hipFree(d_force)); checkError(hipFree(d_particles)); }
d6d0b6eb7a0745b31069e4afc907215e8247f3c1.cu
#include <map> #include <string> #include <cuda_runtime.h> #include <fstream> #include "file_manager.h" #include "simulation_structs.h" #include "helpers_math.h" #include "helpers.h" __device__ int3 calculate_3d_idx(double3 position, double3 cell_length, int3 cells_dim, double3 boundary_min) { int3 particle_idx = double3ToInt3((position - boundary_min) / cell_length); particle_idx.x = particle_idx.x % (int)cells_dim.x; particle_idx.y = particle_idx.y % (int)cells_dim.y; particle_idx.z = particle_idx.z % (int)cells_dim.z; return particle_idx; } __global__ void binning_cell_parallel(Particle* particles, int* cell_list, int* particle_list, size_t N, size_t num_particles, double3 boundary_min, double3 boundary_max, double3 cells_dim, double3 cell_length) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { // reset list cell_list[tid] = -1; // calculate cell coordinate from id // (determines index order in cell_list) int z_idx = tid / (cells_dim.x * cells_dim.y); int tid_z = tid - (z_idx * cells_dim.x * cells_dim.y); int y_idx = tid_z / cells_dim.x; int x_idx = tid_z % (int)cells_dim.x; int3 current_cell = make_int3(x_idx, y_idx, z_idx); for (int i = 0; i < num_particles; i++) { int3 cell = calculate_3d_idx(particles[i].pos, cell_length, double3ToInt3(cells_dim), boundary_min); if (cell == current_cell) { particle_list[i] = cell_list[tid]; cell_list[tid] = i; } } } } __global__ void reset_binning_particle_parallel(int* cell_list, size_t N) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { // reset list cell_list[tid] = -1; } } __device__ int3 boundary_conditions(int3 neighbor_cell_idx, int3 cells_dim) { // boundary conditions if (neighbor_cell_idx.x < 0) { neighbor_cell_idx.x += cells_dim.x; } if (neighbor_cell_idx.y < 0) { neighbor_cell_idx.y += cells_dim.y; } if (neighbor_cell_idx.z < 0) { neighbor_cell_idx.z += cells_dim.z; } if (neighbor_cell_idx.x >= cells_dim.x) { neighbor_cell_idx.x -= cells_dim.x; } if (neighbor_cell_idx.y >= cells_dim.y) { neighbor_cell_idx.y -= cells_dim.y; } if (neighbor_cell_idx.z >= cells_dim.z) { neighbor_cell_idx.z -= cells_dim.z; } return neighbor_cell_idx; } __global__ void binning_particle_parallel(Particle* particles, int* cell_list, int* particle_list, size_t N, size_t num_particles, double3 boundary_min, double3 boundary_max, int3 cells_dim, double3 cell_length) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < num_particles) { Particle& particle = particles[tid]; double3 domain = boundary_max - boundary_min; int3 particle_cell_idx = calculate_3d_idx(particle.pos, cell_length, cells_dim, boundary_min); // make flat index out of int3 index int particle_flat_cell_idx = particle_cell_idx.x + particle_cell_idx.y * cells_dim.x + particle_cell_idx.z * cells_dim.x * cells_dim.y; /* atomic: particle_list[tid] = cell_list[particle_flat_cell_idx]; cell_list[particle_flat_cell_idx] = tid; */ int old = atomicExch(&cell_list[particle_flat_cell_idx], tid); particle_list[tid] = old; } } __global__ void pre_integration(Particle* particles, double3* force, double deltaTime, size_t N, double3 boundary_min, double3 boundary_max) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { Particle& particle = particles[tid]; double3 position = particle.pos + deltaTime * particle.vel + (force[tid] * deltaTime * deltaTime) / (2 * particle.mass); double3 domain = boundary_max - boundary_min; //check if particles left the domain position.x = (position.x < boundary_min.x) ? boundary_max.x - fmod(abs(position.x), abs(domain.x)) : position.x; position.x = (position.x > boundary_max.x) ? boundary_min.x + fmod(abs(position.x), abs(domain.x)) : position.x; position.y = (position.y < boundary_min.y) ? boundary_max.y - fmod(abs(position.y), abs(domain.y)) : position.y; position.y = (position.y > boundary_max.y) ? boundary_min.y + fmod(abs(position.y), abs(domain.y)) : position.y; position.z = (position.z < boundary_min.z) ? boundary_max.z - fmod(abs(position.z), abs(domain.z)) : position.z; position.z = (position.z > boundary_max.z) ? boundary_min.z + fmod(abs(position.z), abs(domain.z)) : position.z; particle.pos = position; particle.vel += force[tid] * deltaTime / (2 * particle.mass); } } __global__ void post_integration(Particle* particles, double3* force, double deltaTime, size_t N) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { Particle& particle = particles[tid]; particle.vel += force[tid] * deltaTime / (2 * particle.mass); } } __device__ double3 calculate_force(Particle& particle, Particle& neighbor_particle, double3 domain, double eps, double s6, double r_cut) { double3 force_tmp{ 0.0, 0.0, 0.0 }; double3 dist = particle.pos - neighbor_particle.pos; // correct dist as maximal distance is now 0.5*domain double3 correction = make_double3(domain.x * round(dist.x / domain.x), domain.y * round(dist.y / domain.y), domain.z * round(dist.z / domain.z)); dist = dist - correction; double dist_norm_sq = norm_sq(dist); // use square norm for optimization if (dist_norm_sq <= r_cut * r_cut) { double t = 1.0 / dist_norm_sq; double f1 = 24 * eps * t; // multiply t instead of divide by dist_norm_sq double f2 = s6 * t * t * t; double f3 = 2 * f2 - 1; force_tmp += f1 * f2 * f3 * dist; } return force_tmp; } __global__ void calculate_forces_brute_force(Particle* particles, double3* force, double eps, double s6, size_t N, double3 boundary_min, double3 boundary_max, double r_cut) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { Particle& particle = particles[tid]; double3 force_tmp{ 0.0, 0.0, 0.0 }; double3 domain = boundary_max - boundary_min; for (int i = 0; i < N; i++) { if (i == tid) continue; // do force calculation force_tmp += calculate_force(particle, particles[i], domain, eps, s6, r_cut); } force[tid] = force_tmp; } } __global__ void calculate_forces_particle_binning(Particle* particles, double3* force, double eps, double s6, size_t N, double3 boundary_min, double3 boundary_max, double r_cut, double3 cell_length, int3 cells_dim, int* cell_list, int* particles_list) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { Particle& particle = particles[tid]; double3 force_tmp{ 0.0, 0.0, 0.0 }; double3 domain = boundary_max - boundary_min; int3 particle_cell_idx = calculate_3d_idx(particle.pos, cell_length, cells_dim, boundary_min); for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { for (int z = -1; z <= 1; z++) { int3 neighbor_cell_idx = particle_cell_idx + make_int3(x, y, z); // boundary conditions neighbor_cell_idx = boundary_conditions(neighbor_cell_idx, cells_dim); // make flat index out of int3 index int neighbor_flat_cell_idx = neighbor_cell_idx.x + neighbor_cell_idx.y * cells_dim.x + neighbor_cell_idx.z * cells_dim.x * cells_dim.y; int neighbor_particle_idx = cell_list[neighbor_flat_cell_idx]; while (neighbor_particle_idx != -1) { //check if particle_idx is this particle if (neighbor_particle_idx == tid) { neighbor_particle_idx = particles_list[neighbor_particle_idx]; continue; } Particle neighbor_particle = particles[neighbor_particle_idx]; // do force calculation force_tmp += calculate_force(particle, neighbor_particle, domain, eps, s6, r_cut); // follow linked list neighbor_particle_idx = particles_list[neighbor_particle_idx]; } } } } force[tid] = force_tmp; } } __global__ void calculate_forces_cell_binning(Particle* particles, double3* force, double eps, double s6, size_t N, double3 boundary_min, double3 boundary_max, double r_cut, double3 cell_length, int3 cells_dim, int* cell_list, int* particles_list, size_t num_particles) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { int first_cell_particle_idx = cell_list[tid]; // no particles in this cell if (first_cell_particle_idx == -1) { return; } double3 domain = boundary_max - boundary_min; int z_idx = tid / (cells_dim.x * cells_dim.y); int tid_z = tid - (z_idx * cells_dim.x * cells_dim.y); int y_idx = tid_z / cells_dim.x; int x_idx = tid_z % (int)cells_dim.x; int3 current_cell = make_int3(x_idx, y_idx, z_idx); // reset force for each particle in the cell int cell_particle_idx = first_cell_particle_idx; while (cell_particle_idx != -1) { force[cell_particle_idx] = { 0,0,0 }; cell_particle_idx = particles_list[cell_particle_idx]; } // iterate over all neighbor cells for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { for (int z = -1; z <= 1; z++) { int3 neighbor_cell_idx = current_cell + make_int3(x, y, z); // check boundary conditions neighbor_cell_idx = boundary_conditions(neighbor_cell_idx, cells_dim); // make flat index out of int3 index int neighbor_flat_cell_idx = neighbor_cell_idx.x + neighbor_cell_idx.y * cells_dim.x + neighbor_cell_idx.z * cells_dim.x * cells_dim.y; int neighbor_particle_idx = cell_list[neighbor_flat_cell_idx]; cell_particle_idx = first_cell_particle_idx; int first_neighbor_cell_particle_idx = neighbor_particle_idx; //iterate over all particles in cell_particle_idx while (cell_particle_idx != -1) { Particle& particle = particles[cell_particle_idx]; // reset neighbor_particle_idx with first particle of the neighbor cell neighbor_particle_idx = first_neighbor_cell_particle_idx; // iterate over all neighbor particles while (neighbor_particle_idx != -1) { Particle neighbor_particle = particles[neighbor_particle_idx]; //check if particle_idx is the current particle if (neighbor_particle_idx == cell_particle_idx) { neighbor_particle_idx = particles_list[neighbor_particle_idx]; continue; } // do force calculation force[cell_particle_idx] += calculate_force(particle, neighbor_particle, domain, eps, s6, r_cut); // move on to next neighbor particle in this neighbor cell neighbor_particle_idx = particles_list[neighbor_particle_idx]; } // follow linked list cell_particle_idx = particles_list[cell_particle_idx]; } } } } } } __global__ void calculate_forces_neighbor_lists(Particle* particles, double3* force, double eps, double s6, size_t N, double3 domain, double r_cut, double3 cell_length, int3 cells_dim, int* cell_list, int* particles_list, int neighborlist_length, int* neighborlists, int* num_neighs) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { Particle& particle = particles[tid]; double3 force_tmp{ 0.0, 0.0, 0.0 }; for (int i = 0; i < num_neighs[tid]; i++) { int neighbor_particle_idx = neighborlists[tid * neighborlist_length + i]; Particle neighbor_particle = particles[neighbor_particle_idx]; // do force calculation force_tmp += calculate_force(particle, neighbor_particle, domain, eps, s6, r_cut); } force[tid] = force_tmp; } } __global__ void neighbor_lists(Particle* particles, int* cell_list, int* particle_list, size_t N, double3 boundary_min, double3 boundary_max, int3 cells_dim, double3 cell_length, int neighborlist_length, int* neighborlists, int* num_neighs, int r_cut, int r_skin) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { int index = 0; Particle& particle = particles[tid]; double3 domain = boundary_max - boundary_min; int3 particle_cell_idx = calculate_3d_idx(particle.pos, cell_length, cells_dim, boundary_min); for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { for (int z = -1; z <= 1; z++) { int3 neighbor_cell_idx = particle_cell_idx + make_int3(x, y, z); // boundary conditions neighbor_cell_idx = boundary_conditions(neighbor_cell_idx, cells_dim); // make flat index out of int3 index int neighbor_flat_cell_idx = neighbor_cell_idx.x + neighbor_cell_idx.y * cells_dim.x + neighbor_cell_idx.z * cells_dim.x * cells_dim.y; int neighbor_particle_idx = cell_list[neighbor_flat_cell_idx]; while (neighbor_particle_idx != -1) { //check if particle_idx is this particle if (neighbor_particle_idx == tid) { neighbor_particle_idx = particle_list[neighbor_particle_idx]; continue; } // skin: Particle neighbor_particle = particles[neighbor_particle_idx]; double3 dist = particle.pos - neighbor_particle.pos; double3 correction = make_double3(domain.x * round(dist.x / domain.x), domain.y * round(dist.y / domain.y), domain.z * round(dist.z / domain.z)); dist = dist - correction; double dist_norm = norm(dist); if (dist_norm <= r_cut + r_skin) { // add to neighbor list for current particle neighborlists[tid * neighborlist_length + index] = neighbor_particle_idx; index++; // check if neighbor list is full if (index == neighborlist_length) goto Lbreak_loops; } // follow linked list neighbor_particle_idx = particle_list[neighbor_particle_idx]; } } } } Lbreak_loops: num_neighs[tid] = index; } } int main() { FileManager file_manager("./input", "./output", "blocks_big.par"); Parameters params = file_manager.readParams(); std::vector<Particle> particles = file_manager.readInitialState(params.part_input_file, params.boundary_min, params.boundary_max); size_t num_particles = particles.size(); double3 domain = params.boundary_max - params.boundary_min; double s6 = pow(params.sigma, 6); // binning structures std::vector<int> particle_list(num_particles, -1); size_t num_cells = params.cells_dim.x * params.cells_dim.y * params.cells_dim.z; double3 cell_length = domain / params.cells_dim; std::vector<int> cell_list(num_cells, -1); // Allocate memory on device Particle* d_particles; double3 *d_force; int *d_particle_list, *d_cell_list; size_t bytes_vec = sizeof(double3) * num_particles; size_t bytes_struct = sizeof(Particle) * num_particles; size_t bytes_particle_list = sizeof(int) * num_particles; size_t bytes_cell_list = sizeof(int) * num_cells; checkError(cudaMalloc((void**)&d_particle_list, bytes_particle_list)); checkError(cudaMalloc((void**)&d_cell_list, bytes_cell_list)); checkError(cudaMalloc((void**)&d_particles, bytes_struct)); checkError(cudaMalloc(&d_force, bytes_vec)); // Copy data to GPU checkError(cudaMemcpy(d_particles, particles.data(), bytes_struct, cudaMemcpyHostToDevice)); checkError(cudaMemcpy(d_particle_list, particle_list.data(), bytes_particle_list, cudaMemcpyHostToDevice)); checkError(cudaMemcpy(d_cell_list, cell_list.data(), bytes_cell_list, cudaMemcpyHostToDevice)); dim3 threads_per_block(params.threads_per_block); dim3 numBlocks((num_particles + threads_per_block.x - 1) / threads_per_block.x); dim3 numBlocksCell((num_cells + threads_per_block.x - 1) / threads_per_block.x); //binning_cell_parallel<<<numBlocksCell, threads_per_block>>>(d_particles, d_cell_list, d_particle_list, num_cells, num_particles, params.boundary_min, params.boundary_max, params.cells_dim, cell_length); // OR reset_binning_particle_parallel<<<numBlocksCell, threads_per_block>>>(d_cell_list, num_cells); binning_particle_parallel<<<numBlocks, threads_per_block>>>(d_particles, d_cell_list, d_particle_list, num_cells, num_particles, params.boundary_min, params.boundary_max, double3ToInt3(params.cells_dim), cell_length); checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); // DEBUG binning /*checkError(cudaMemcpy(particle_list.data(), d_particle_list, bytes_particle_list, cudaMemcpyDeviceToHost)); checkError(cudaMemcpy(cell_list.data(), d_cell_list, bytes_cell_list, cudaMemcpyDeviceToHost)); for (int i = 0; i < cell_list.size(); i++) { if (cell_list.at(i) == -1) continue; std::cout << "Cell " << i << ": " << cell_list.at(i) << std::endl; } for (int i = 0; i < particle_list.size(); i++) { std::cout << "Particle " << i << ": " << particle_list.at(i) << std::endl; }*/ int *d_neighborlists, *d_num_neighs; checkError(cudaMalloc((void**)&d_neighborlists, sizeof(int) * num_particles * params.neighborlist_length)); checkError(cudaMalloc((void**)&d_num_neighs, sizeof(int) * num_particles)); neighbor_lists<<<numBlocks, threads_per_block>>>(d_particles, d_cell_list, d_particle_list, num_particles, params.boundary_min, params.boundary_max, double3ToInt3(params.cells_dim), cell_length, params.neighborlist_length, d_neighborlists, d_num_neighs, params.r_cut, params.r_skin); checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); // Initialize forces calculate_forces_brute_force<<<numBlocks, threads_per_block>>>(d_particles, d_force, params.eps, s6, num_particles, params.boundary_min, params.boundary_max, params.r_cut); checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); std::cout << "Simulation started ..." << std::endl; size_t iterations = (size_t) (params.time_end / params.time_step + 1); for (size_t i = 0; i < iterations; i++) { // Save simulation states at given frequncies if (i % params.part_out_freq == 0) { checkError(cudaMemcpy(particles.data(), d_particles, bytes_struct, cudaMemcpyDeviceToHost)); file_manager.bufferOutput(particles); } if (i % params.vtk_out_freq == 0) { checkError(cudaMemcpy(particles.data(), d_particles, bytes_struct, cudaMemcpyDeviceToHost)); file_manager.bufferVTK(particles); } // Verlet scheme: Integrate position, calculate forces, and integrate velocity pre_integration<<<numBlocks, threads_per_block>>>(d_particles, d_force, params.time_step, num_particles, params.boundary_min, params.boundary_max); checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); if (i % params.bin_freq == 0) { binning_cell_parallel<<<numBlocksCell, threads_per_block>>>(d_particles, d_cell_list, d_particle_list, num_cells, num_particles, params.boundary_min, params.boundary_max, params.cells_dim, cell_length); // OR //reset_binning_particle_parallel << <numBlocksCell, threads_per_block >> > (d_particles, d_cell_list, d_particle_list, num_cells, num_particles, params.boundary_min, params.boundary_max, params.cells_dim, cell_length); //binning_particle_parallel << <numBlocks, threads_per_block >> > (d_particles, d_cell_list, d_particle_list, num_cells, num_particles, params.boundary_min, params.boundary_max, double3ToInt3(params.cells_dim), cell_length); checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); neighbor_lists << <numBlocks, threads_per_block >> > (d_particles, d_cell_list, d_particle_list, num_particles, params.boundary_min, params.boundary_max, double3ToInt3(params.cells_dim), cell_length, params.neighborlist_length, d_neighborlists, d_num_neighs, params.r_cut, params.r_skin); checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); } // choose one: //calculate_forces_brute_force<<<numBlocks, threads_per_block>>>(d_particles, d_force, params.eps, s6, num_particles, params.boundary_min, params.boundary_max, params.r_cut); //calculate_forces_particle_binning<<<numBlocks, threads_per_block>>>(d_particles, d_force, params.eps, s6, num_particles, params.boundary_min, params.boundary_max, params.r_cut, cell_length, double3ToInt3(params.cells_dim), d_cell_list, d_particle_list); calculate_forces_cell_binning<<<numBlocksCell, threads_per_block>>>(d_particles, d_force, params.eps, s6, num_cells, params.boundary_min, params.boundary_max, params.r_cut, cell_length, double3ToInt3(params.cells_dim), d_cell_list, d_particle_list, num_particles); //calculate_forces_neighbor_lists<<<numBlocks, threads_per_block>>>(d_particles, d_force, params.eps, s6, num_particles, domain, params.r_cut, cell_length, double3ToInt3(params.cells_dim), d_cell_list, d_particle_list, params.neighborlist_length, d_neighborlists, d_num_neighs); checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); post_integration<<<numBlocks, threads_per_block>>>(d_particles, d_force, params.time_step, num_particles); checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); } std::cout << "Simulation finished!" << std::endl; file_manager.write(params.part_out_name, params.vtk_out_name); checkError(cudaFree(d_force)); checkError(cudaFree(d_particles)); }
8a73981fd3e0aa830493c0a246f82728a738199b.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda_utils.h" #include <algorithm> #include <cassert> #include <cstdint> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <math.h> #include <stdlib.h> #include <vector> #include <iomanip> #include <iostream> #include <ctime> __device__ inline float H(float arg) { return -(arg) * log2f(arg); } __device__ inline float H2(int arg1, int arg2, float p) { return H((arg1 + p) / (arg1 + arg2 + 1.0f)) + H((arg2 + 1.0f - p) / (arg1 + arg2 + 1.0f)); } __device__ float compute_gig_1_2(const uint32_t* const __restrict__ v1_1, const uint32_t* const __restrict__ v1_2, const uint32_t* const __restrict__ v2_1, const uint32_t* const __restrict__ v2_2, const uint32_t* const __restrict__ ds_1, int num_objects, float p) { // how many objects having [d][x][y], where d [0, 1] is descriptive value, // x [0, 2] is v1 value and y [0, 2] v2 value int count[2][3][3] = {0}; #pragma unroll for (int obj = 0; obj < (num_objects / 32); obj++) { #pragma unroll for (int d = 0; d <= 1; d++) { #pragma unroll for (int v1 = 0; v1 <= 2; v1++) { #pragma unroll for (int v2 = 0; v2 <= 2; v2++) { // No need to put __ldg here. uint32_t d_b = (d == 1) ? ds_1[obj] : ~ds_1[obj]; uint32_t v1_b = (v1 == 0) ? (~v1_1[obj] & ~v1_2[obj]) : (v1 == 1 ? v1_1[obj] : v1_2[obj]); uint32_t v2_b = (v2 == 0) ? (~v2_1[obj] & ~v2_2[obj]) : (v2 == 1 ? v2_1[obj] : v2_2[obj]); count[d][v1][v2] += __popc(d_b & v1_b & v2_b); } } } } // Do the same thing, but only for the rest of the bits. if (num_objects % 32) { uint32_t rest = (~0u) >> (32 - num_objects % 32); int last_obj_ind = num_objects / 32; #pragma unroll for (int d = 0; d <= 1; d++) { #pragma unroll for (int v1 = 0; v1 <= 2; v1++) { #pragma unroll for (int v2 = 0; v2 <= 2; v2++) { uint32_t d_b = (d == 1) ? ds_1[last_obj_ind] : ~ds_1[last_obj_ind]; uint32_t v1_b = (v1 == 0) ? (~v1_1[last_obj_ind] & ~v1_2[last_obj_ind]) : (v1 == 1 ? v1_1[last_obj_ind] : v1_2[last_obj_ind]); uint32_t v2_b = (v2 == 0) ? (~v2_1[last_obj_ind] & ~v2_2[last_obj_ind]) : (v2 == 1 ? v2_1[last_obj_ind] : v2_2[last_obj_ind]); count[d][v1][v2] += __popc(d_b & v1_b & v2_b & rest); } } } } int sum_n2_n3 = 0; int sum_n2_n3_2 = 0; #pragma unroll for (int i = 0; i <= 2; i++) { #pragma unroll for (int v2 = 0; v2 <= 2; v2++) { sum_n2_n3 += count[0][i][v2]; sum_n2_n3_2 += count[1][i][v2]; } } const float h_p = H2(sum_n2_n3, sum_n2_n3_2, p); float ig1 = h_p, ig2 = h_p; #pragma unroll for (int v = 0; v <= 2; v++) { int sum3_1 = 0, sum3_2 = 0, sum2_1 = 0, sum2_2 = 0; #pragma unroll for (int v2 = 0; v2 <= 2; v2++) { sum3_1 += count[0][v][v2]; sum3_2 += count[1][v][v2]; sum2_1 += count[0][v2][v]; sum2_2 += count[1][v2][v]; } ig1 -= (sum3_1 + sum3_2) * H2(sum3_1, sum3_2, p); ig2 -= (sum2_1 + sum2_2) * H2(sum2_1, sum2_2, p); } float ig12 = h_p; #pragma unroll for (int v1 = 0; v1 <= 2; v1++) { #pragma unroll for (int v2 = 0; v2 <= 2; v2++) { ig12 -= (count[0][v1][v2] + count[1][v1][v2]) * H2(count[0][v1][v2], count[1][v1][v2], p); } } auto result = ig12 - ((ig1 > ig2) ? ig1 : ig2); return result; } struct GigStruct { float gig; int v1, v2; bool operator<(GigStruct other) const { return gig > other.gig; }; friend std::ostream& operator<<(std::ostream& os, const GigStruct& gig) { return os << std::fixed << std::setprecision(3) << gig.gig << " " << gig.v1 << " " << gig.v2; } }; #define OFFSET(var_number, objects_count) \ ((var_number) * (32 - ((objects_count) % 32))) __global__ void compute_gig_kernel(const uint32_t* const __restrict__ vars_1, const uint32_t* const __restrict__ vars_2, const uint32_t* const __restrict__ ds_1, int num_objects, int num_vars, GigStruct* const __restrict__ r_gig, float threshold, float p, int* __restrict__ atomic_counter) { const int v1_p = blockDim.x * blockIdx.x + threadIdx.x; const int v2_p = blockDim.y * blockIdx.y + threadIdx.y; if (v1_p >= v2_p || v1_p >= num_vars || v2_p >= num_vars) return; const auto ind1 = (v1_p * num_objects + OFFSET(v1_p, num_objects)) / 32; const auto ind2 = (v2_p * num_objects + OFFSET(v2_p, num_objects)) / 32; assert((v1_p * num_objects + OFFSET(v1_p, num_objects)) % 32 == 0); assert((v2_p * num_objects + OFFSET(v2_p, num_objects)) % 32 == 0); const auto result = compute_gig_1_2(&vars_1[ind1], &vars_2[ind1], &vars_1[ind2], &vars_2[ind2], ds_1, num_objects, p); if (!(result > threshold)) return; // not large enough result. const int index = atomicAdd(atomic_counter, 1); r_gig[index] = {result, v1_p, v2_p}; } int getCounterValue(int* atomic_counter_d) { int result = -1; checkCudaErrors(hipMemcpy(&result, atomic_counter_d, sizeof(int), hipMemcpyDeviceToHost)); return result; } int* getDeviceCounter() { int* atomic_counter_d; int init = 0; checkCudaErrors(hipMalloc((void**)&atomic_counter_d, sizeof(int))); checkCudaErrors( hipMemcpy(atomic_counter_d, &init, sizeof(int), hipMemcpyHostToDevice)); checkCudaErrors(hipDeviceSynchronize()); return atomic_counter_d; } template <typename Allocator> void setBit(std::vector<uint32_t, Allocator>& bitmask, int index) { bitmask.at(index / 32) |= (1u << (index % 32)); } int getBitmaskSize(int objects) { return (objects + 31) / 32; } struct KernelLaunchInfo { int num_vars; int num_objects; float threshold; float a_priori; KernelLaunchInfo(int num_vars, int num_objects, float threshold, float a_priori) : num_vars(num_vars), num_objects(num_objects), threshold(threshold), a_priori(a_priori) {} }; using pinnedBitmaskVector = std::vector<uint32_t, pinned_allocator<uint32_t>>; std::vector<GigStruct, pinned_allocator<GigStruct> > launch_kernel( pinnedBitmaskVector ds, pinnedBitmaskVector vars_1, pinnedBitmaskVector vars_2, KernelLaunchInfo info, int result_size) { const auto vars1_d = make_device_vector(vars_1), vars2_d = make_device_vector(vars_2); const auto ds1_d = make_device_vector(ds); auto gigs_d = make_device_vector<GigStruct>(result_size); auto* atomic_counter_d = getDeviceCounter(); const int BlockSize = 16; // It is possible that it will be faster with 32. auto gridSize = (info.num_vars + BlockSize - 1) / BlockSize; dim3 grid(gridSize, gridSize); dim3 block(BlockSize, BlockSize); hipLaunchKernelGGL(( compute_gig_kernel), dim3(grid), dim3(block), 0, 0, vars1_d.dev(), vars2_d.dev(), ds1_d.dev(), info.num_objects, info.num_vars, gigs_d.dev(), info.threshold, info.a_priori, atomic_counter_d); checkCudaErrors(hipDeviceSynchronize()); const int computed_result_size = getCounterValue(atomic_counter_d); if (computed_result_size > result_size) printf("%d %d ub happend\n", computed_result_size, result_size); auto gigs = make_vector<GigStruct, pinned_allocator<GigStruct>> (gigs_d, ::min(result_size, computed_result_size)); std::sort(gigs.begin(), gigs.end()); return gigs; } float calculateThreshold(const pinnedBitmaskVector& ds, pinnedBitmaskVector vars1, pinnedBitmaskVector vars2, int num_vars, int num_objects, int normal_result_size, float experimentSize, float a_priori) { KernelLaunchInfo info(num_vars, num_objects, -std::numeric_limits<float>::infinity(), a_priori); int current_result_size = (num_vars * (num_vars - 1)) / 2; // want to get gig for every // unique pair in smaller vars. auto gigs = launch_kernel(ds, std::move(vars1), std::move(vars2), info, current_result_size); const float cutoffPercent = experimentSize * experimentSize; int cutoffLevel = normal_result_size * cutoffPercent; return gigs[cutoffLevel].gig; } std::vector<int> getIndexes(int ofSize, int outOfSize) { std::vector<int> indexes(outOfSize); std::iota(indexes.begin(), indexes.end(), 0); std::random_shuffle(indexes.begin(), indexes.end()); indexes.resize(ofSize); std::sort(indexes.begin(), indexes.end()); return indexes; } void init() { hipDeviceSetCacheConfig(hipFuncCachePreferL1); std::srand(time(nullptr)); } // Function for fast reading int. uint32_t getInt() { char c; while(true) { c = getchar_unlocked(); if ('0' <= c && c <= '9') break; // got digit } uint32_t result = c - '0'; while (true) { c = getchar_unlocked(); if (!('0' <= c && c <= '9')) break; // finished reading digits result *= 10; result += c - '0'; } return result; } int un; int main() { init(); std::ios_base::sync_with_stdio(false); int num_objects, num_vars, result_size; float a_priori; const float sampleSizeMultiplier = 0.10; // 10% un = scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori); const int LastBitmaskElement = num_vars * num_objects + OFFSET(num_vars - 1, num_objects); const auto varsBitmaskSize = getBitmaskSize(LastBitmaskElement); std::vector<uint32_t, pinned_allocator<uint32_t>> vars_1(varsBitmaskSize); std::vector<uint32_t, pinned_allocator<uint32_t>> vars_2(varsBitmaskSize); const auto DescriptiveMaskSize = getBitmaskSize(num_objects); pinnedBitmaskVector ds(DescriptiveMaskSize); const int smallVarsRow = num_vars * sampleSizeMultiplier; const int sampleVarsSize = smallVarsRow * num_objects; std::vector<int8_t> small_vars(sampleVarsSize, -1); auto indexes = getIndexes(smallVarsRow, num_vars); // Firstly read num_object descriptive values. for (int i = 0; i < num_objects; ++i) { int d; d = getInt(); if (d == 1) setBit(ds, i); assert(d == 1 || d == 0); } int smallIndex = 0; // Then read num_vars lines of num_objects vars. for (int j = 0; j < num_vars; ++j) { bool useForSampling = false; if (smallIndex < indexes.size() && indexes[smallIndex] == j) { useForSampling = true; } for (int i = 0; i < num_objects; ++i) { int var; var = getInt(); const int offset = OFFSET(j, num_objects); if (var == 1) setBit(vars_1, j * num_objects + i + offset); else if (var == 2) setBit(vars_2, j * num_objects + i + offset); else assert(var == 0); if (useForSampling) { const int newIndex = smallIndex * num_objects + i; assert(small_vars.at(newIndex) == -1); small_vars.at(newIndex) = var; } } if (useForSampling) smallIndex++; assert(smallIndex == smallVarsRow && smallIndex == indexes.size() && "not all small_vars filled"); } for (int i = 0; i < indexes.size(); i++) { // random shuffle values of each type. std::random_shuffle(small_vars.begin() + i * num_objects, small_vars.begin() + (i + 1) * num_objects); } const int lastSmallBitmaskElement = sampleVarsSize * num_objects + OFFSET(sampleVarsSize - 1, num_objects); auto smallVarsBitmaskSize = getBitmaskSize(lastSmallBitmaskElement); pinnedBitmaskVector small_vars_1(smallVarsBitmaskSize), small_vars_2(smallVarsBitmaskSize); int varNum = 0; for (int i = 0; i < small_vars.size(); i++) { if (i % num_objects == 0) varNum++; int var = small_vars.at(i); if (var == 1) setBit(small_vars_1, i + OFFSET(varNum, num_objects)); else if (var == 2) setBit(small_vars_2, i + OFFSET(varNum, num_objects)); else assert(var == 0); } const float threshold = calculateThreshold( ds, std::move(small_vars_1), std::move(small_vars_2), smallVarsRow, num_objects, result_size, sampleSizeMultiplier, a_priori); KernelLaunchInfo info(num_vars, num_objects, threshold, a_priori); // We should not get more than 2.25x results greater than threshold. const float EXTRA_ELEMENTS_MULTIPLIER = 2.5; auto gigs = launch_kernel(std::move(ds), std::move(vars_1), std::move(vars_2), info, result_size * EXTRA_ELEMENTS_MULTIPLIER); if (gigs.size() > result_size) gigs.resize(result_size); for (int i = 0; i < gigs.size(); ++i) std::cout << gigs[i] << '\n'; return 0; }
8a73981fd3e0aa830493c0a246f82728a738199b.cu
#include "cuda_utils.h" #include <algorithm> #include <cassert> #include <cstdint> #include <cuda_runtime.h> #include <helper_cuda.h> #include <math.h> #include <stdlib.h> #include <vector> #include <iomanip> #include <iostream> #include <ctime> __device__ inline float H(float arg) { return -(arg) * log2f(arg); } __device__ inline float H2(int arg1, int arg2, float p) { return H((arg1 + p) / (arg1 + arg2 + 1.0f)) + H((arg2 + 1.0f - p) / (arg1 + arg2 + 1.0f)); } __device__ float compute_gig_1_2(const uint32_t* const __restrict__ v1_1, const uint32_t* const __restrict__ v1_2, const uint32_t* const __restrict__ v2_1, const uint32_t* const __restrict__ v2_2, const uint32_t* const __restrict__ ds_1, int num_objects, float p) { // how many objects having [d][x][y], where d [0, 1] is descriptive value, // x [0, 2] is v1 value and y [0, 2] v2 value int count[2][3][3] = {0}; #pragma unroll for (int obj = 0; obj < (num_objects / 32); obj++) { #pragma unroll for (int d = 0; d <= 1; d++) { #pragma unroll for (int v1 = 0; v1 <= 2; v1++) { #pragma unroll for (int v2 = 0; v2 <= 2; v2++) { // No need to put __ldg here. uint32_t d_b = (d == 1) ? ds_1[obj] : ~ds_1[obj]; uint32_t v1_b = (v1 == 0) ? (~v1_1[obj] & ~v1_2[obj]) : (v1 == 1 ? v1_1[obj] : v1_2[obj]); uint32_t v2_b = (v2 == 0) ? (~v2_1[obj] & ~v2_2[obj]) : (v2 == 1 ? v2_1[obj] : v2_2[obj]); count[d][v1][v2] += __popc(d_b & v1_b & v2_b); } } } } // Do the same thing, but only for the rest of the bits. if (num_objects % 32) { uint32_t rest = (~0u) >> (32 - num_objects % 32); int last_obj_ind = num_objects / 32; #pragma unroll for (int d = 0; d <= 1; d++) { #pragma unroll for (int v1 = 0; v1 <= 2; v1++) { #pragma unroll for (int v2 = 0; v2 <= 2; v2++) { uint32_t d_b = (d == 1) ? ds_1[last_obj_ind] : ~ds_1[last_obj_ind]; uint32_t v1_b = (v1 == 0) ? (~v1_1[last_obj_ind] & ~v1_2[last_obj_ind]) : (v1 == 1 ? v1_1[last_obj_ind] : v1_2[last_obj_ind]); uint32_t v2_b = (v2 == 0) ? (~v2_1[last_obj_ind] & ~v2_2[last_obj_ind]) : (v2 == 1 ? v2_1[last_obj_ind] : v2_2[last_obj_ind]); count[d][v1][v2] += __popc(d_b & v1_b & v2_b & rest); } } } } int sum_n2_n3 = 0; int sum_n2_n3_2 = 0; #pragma unroll for (int i = 0; i <= 2; i++) { #pragma unroll for (int v2 = 0; v2 <= 2; v2++) { sum_n2_n3 += count[0][i][v2]; sum_n2_n3_2 += count[1][i][v2]; } } const float h_p = H2(sum_n2_n3, sum_n2_n3_2, p); float ig1 = h_p, ig2 = h_p; #pragma unroll for (int v = 0; v <= 2; v++) { int sum3_1 = 0, sum3_2 = 0, sum2_1 = 0, sum2_2 = 0; #pragma unroll for (int v2 = 0; v2 <= 2; v2++) { sum3_1 += count[0][v][v2]; sum3_2 += count[1][v][v2]; sum2_1 += count[0][v2][v]; sum2_2 += count[1][v2][v]; } ig1 -= (sum3_1 + sum3_2) * H2(sum3_1, sum3_2, p); ig2 -= (sum2_1 + sum2_2) * H2(sum2_1, sum2_2, p); } float ig12 = h_p; #pragma unroll for (int v1 = 0; v1 <= 2; v1++) { #pragma unroll for (int v2 = 0; v2 <= 2; v2++) { ig12 -= (count[0][v1][v2] + count[1][v1][v2]) * H2(count[0][v1][v2], count[1][v1][v2], p); } } auto result = ig12 - ((ig1 > ig2) ? ig1 : ig2); return result; } struct GigStruct { float gig; int v1, v2; bool operator<(GigStruct other) const { return gig > other.gig; }; friend std::ostream& operator<<(std::ostream& os, const GigStruct& gig) { return os << std::fixed << std::setprecision(3) << gig.gig << " " << gig.v1 << " " << gig.v2; } }; #define OFFSET(var_number, objects_count) \ ((var_number) * (32 - ((objects_count) % 32))) __global__ void compute_gig_kernel(const uint32_t* const __restrict__ vars_1, const uint32_t* const __restrict__ vars_2, const uint32_t* const __restrict__ ds_1, int num_objects, int num_vars, GigStruct* const __restrict__ r_gig, float threshold, float p, int* __restrict__ atomic_counter) { const int v1_p = blockDim.x * blockIdx.x + threadIdx.x; const int v2_p = blockDim.y * blockIdx.y + threadIdx.y; if (v1_p >= v2_p || v1_p >= num_vars || v2_p >= num_vars) return; const auto ind1 = (v1_p * num_objects + OFFSET(v1_p, num_objects)) / 32; const auto ind2 = (v2_p * num_objects + OFFSET(v2_p, num_objects)) / 32; assert((v1_p * num_objects + OFFSET(v1_p, num_objects)) % 32 == 0); assert((v2_p * num_objects + OFFSET(v2_p, num_objects)) % 32 == 0); const auto result = compute_gig_1_2(&vars_1[ind1], &vars_2[ind1], &vars_1[ind2], &vars_2[ind2], ds_1, num_objects, p); if (!(result > threshold)) return; // not large enough result. const int index = atomicAdd(atomic_counter, 1); r_gig[index] = {result, v1_p, v2_p}; } int getCounterValue(int* atomic_counter_d) { int result = -1; checkCudaErrors(cudaMemcpy(&result, atomic_counter_d, sizeof(int), cudaMemcpyDeviceToHost)); return result; } int* getDeviceCounter() { int* atomic_counter_d; int init = 0; checkCudaErrors(cudaMalloc((void**)&atomic_counter_d, sizeof(int))); checkCudaErrors( cudaMemcpy(atomic_counter_d, &init, sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaDeviceSynchronize()); return atomic_counter_d; } template <typename Allocator> void setBit(std::vector<uint32_t, Allocator>& bitmask, int index) { bitmask.at(index / 32) |= (1u << (index % 32)); } int getBitmaskSize(int objects) { return (objects + 31) / 32; } struct KernelLaunchInfo { int num_vars; int num_objects; float threshold; float a_priori; KernelLaunchInfo(int num_vars, int num_objects, float threshold, float a_priori) : num_vars(num_vars), num_objects(num_objects), threshold(threshold), a_priori(a_priori) {} }; using pinnedBitmaskVector = std::vector<uint32_t, pinned_allocator<uint32_t>>; std::vector<GigStruct, pinned_allocator<GigStruct> > launch_kernel( pinnedBitmaskVector ds, pinnedBitmaskVector vars_1, pinnedBitmaskVector vars_2, KernelLaunchInfo info, int result_size) { const auto vars1_d = make_device_vector(vars_1), vars2_d = make_device_vector(vars_2); const auto ds1_d = make_device_vector(ds); auto gigs_d = make_device_vector<GigStruct>(result_size); auto* atomic_counter_d = getDeviceCounter(); const int BlockSize = 16; // It is possible that it will be faster with 32. auto gridSize = (info.num_vars + BlockSize - 1) / BlockSize; dim3 grid(gridSize, gridSize); dim3 block(BlockSize, BlockSize); compute_gig_kernel<<<grid, block>>>(vars1_d.dev(), vars2_d.dev(), ds1_d.dev(), info.num_objects, info.num_vars, gigs_d.dev(), info.threshold, info.a_priori, atomic_counter_d); checkCudaErrors(cudaDeviceSynchronize()); const int computed_result_size = getCounterValue(atomic_counter_d); if (computed_result_size > result_size) printf("%d %d ub happend\n", computed_result_size, result_size); auto gigs = make_vector<GigStruct, pinned_allocator<GigStruct>> (gigs_d, std::min(result_size, computed_result_size)); std::sort(gigs.begin(), gigs.end()); return gigs; } float calculateThreshold(const pinnedBitmaskVector& ds, pinnedBitmaskVector vars1, pinnedBitmaskVector vars2, int num_vars, int num_objects, int normal_result_size, float experimentSize, float a_priori) { KernelLaunchInfo info(num_vars, num_objects, -std::numeric_limits<float>::infinity(), a_priori); int current_result_size = (num_vars * (num_vars - 1)) / 2; // want to get gig for every // unique pair in smaller vars. auto gigs = launch_kernel(ds, std::move(vars1), std::move(vars2), info, current_result_size); const float cutoffPercent = experimentSize * experimentSize; int cutoffLevel = normal_result_size * cutoffPercent; return gigs[cutoffLevel].gig; } std::vector<int> getIndexes(int ofSize, int outOfSize) { std::vector<int> indexes(outOfSize); std::iota(indexes.begin(), indexes.end(), 0); std::random_shuffle(indexes.begin(), indexes.end()); indexes.resize(ofSize); std::sort(indexes.begin(), indexes.end()); return indexes; } void init() { cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); std::srand(time(nullptr)); } // Function for fast reading int. uint32_t getInt() { char c; while(true) { c = getchar_unlocked(); if ('0' <= c && c <= '9') break; // got digit } uint32_t result = c - '0'; while (true) { c = getchar_unlocked(); if (!('0' <= c && c <= '9')) break; // finished reading digits result *= 10; result += c - '0'; } return result; } int un; int main() { init(); std::ios_base::sync_with_stdio(false); int num_objects, num_vars, result_size; float a_priori; const float sampleSizeMultiplier = 0.10; // 10% un = scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori); const int LastBitmaskElement = num_vars * num_objects + OFFSET(num_vars - 1, num_objects); const auto varsBitmaskSize = getBitmaskSize(LastBitmaskElement); std::vector<uint32_t, pinned_allocator<uint32_t>> vars_1(varsBitmaskSize); std::vector<uint32_t, pinned_allocator<uint32_t>> vars_2(varsBitmaskSize); const auto DescriptiveMaskSize = getBitmaskSize(num_objects); pinnedBitmaskVector ds(DescriptiveMaskSize); const int smallVarsRow = num_vars * sampleSizeMultiplier; const int sampleVarsSize = smallVarsRow * num_objects; std::vector<int8_t> small_vars(sampleVarsSize, -1); auto indexes = getIndexes(smallVarsRow, num_vars); // Firstly read num_object descriptive values. for (int i = 0; i < num_objects; ++i) { int d; d = getInt(); if (d == 1) setBit(ds, i); assert(d == 1 || d == 0); } int smallIndex = 0; // Then read num_vars lines of num_objects vars. for (int j = 0; j < num_vars; ++j) { bool useForSampling = false; if (smallIndex < indexes.size() && indexes[smallIndex] == j) { useForSampling = true; } for (int i = 0; i < num_objects; ++i) { int var; var = getInt(); const int offset = OFFSET(j, num_objects); if (var == 1) setBit(vars_1, j * num_objects + i + offset); else if (var == 2) setBit(vars_2, j * num_objects + i + offset); else assert(var == 0); if (useForSampling) { const int newIndex = smallIndex * num_objects + i; assert(small_vars.at(newIndex) == -1); small_vars.at(newIndex) = var; } } if (useForSampling) smallIndex++; assert(smallIndex == smallVarsRow && smallIndex == indexes.size() && "not all small_vars filled"); } for (int i = 0; i < indexes.size(); i++) { // random shuffle values of each type. std::random_shuffle(small_vars.begin() + i * num_objects, small_vars.begin() + (i + 1) * num_objects); } const int lastSmallBitmaskElement = sampleVarsSize * num_objects + OFFSET(sampleVarsSize - 1, num_objects); auto smallVarsBitmaskSize = getBitmaskSize(lastSmallBitmaskElement); pinnedBitmaskVector small_vars_1(smallVarsBitmaskSize), small_vars_2(smallVarsBitmaskSize); int varNum = 0; for (int i = 0; i < small_vars.size(); i++) { if (i % num_objects == 0) varNum++; int var = small_vars.at(i); if (var == 1) setBit(small_vars_1, i + OFFSET(varNum, num_objects)); else if (var == 2) setBit(small_vars_2, i + OFFSET(varNum, num_objects)); else assert(var == 0); } const float threshold = calculateThreshold( ds, std::move(small_vars_1), std::move(small_vars_2), smallVarsRow, num_objects, result_size, sampleSizeMultiplier, a_priori); KernelLaunchInfo info(num_vars, num_objects, threshold, a_priori); // We should not get more than 2.25x results greater than threshold. const float EXTRA_ELEMENTS_MULTIPLIER = 2.5; auto gigs = launch_kernel(std::move(ds), std::move(vars_1), std::move(vars_2), info, result_size * EXTRA_ELEMENTS_MULTIPLIER); if (gigs.size() > result_size) gigs.resize(result_size); for (int i = 0; i < gigs.size(); ++i) std::cout << gigs[i] << '\n'; return 0; }
855e86a8939fd3349548f156dbe83f55b94e4b9c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <gflags/gflags.h> #include <cub/grid/grid_barrier.cuh> #include <groute/event_pool.h> #include <groute/fused_distributed_worklist.h> #include <groute/fused_worker.h> #include <groute/cta_work.h> #include <groute/graphs/csr_graph.h> #include <groute/graphs/traversal_algo.h> #include <groute/graphs/fused_solver.h> #include <utils/parser.h> #include <utils/utils.h> #include <utils/stopwatch.h> #include "pr_common.h" namespace pr { namespace opt { struct RankData { index_t node; rank_t rank; __host__ __device__ __forceinline__ RankData(index_t node, rank_t rank) : node(node), rank(rank) { } __host__ __device__ __forceinline__ RankData() : node(UINT_MAX), rank(-1.0f) { } }; typedef index_t local_work_t; typedef RankData remote_work_t; template< typename TGraph, typename RankDatum, typename ResidualDatum> struct PageRankWorkNP { template<typename WorkSource> __device__ static void work( const WorkSource& work_source, groute::dev::CircularWorklist<local_work_t>& rwl_in, groute::dev::CircularWorklist<remote_work_t>& rwl_out, const TGraph& graph, RankDatum& current_ranks, ResidualDatum& residual ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<rank_t> np_local = { 0, 0, 0.0 }; if (i < work_size) { index_t node = work_source.get_work(i); rank_t res = atomicExch(residual.get_item_ptr(node), 0); if (res > 0) { current_ranks[node] += res; np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; if (np_local.size > 0) // Just in case { rank_t update = res * ALPHA / np_local.size; np_local.meta_data = update; } } } groute::dev::CTAWorkScheduler<rank_t>::template schedule( np_local, [&graph, &residual, &rwl_in, &rwl_out](index_t edge, rank_t update) { index_t dest = graph.edge_dest(edge); rank_t prev = atomicAdd(residual.get_item_ptr(dest), update); if (graph.owns(dest)) { if (prev + update > EPSILON && prev <= EPSILON) { rwl_in.prepend_warp(dest); } } else { if (prev == 0) // no EPSILON check for remote nodes { rwl_out.append_warp( RankData(dest, atomicExch(residual.get_item_ptr(dest), 0))); } } } ); } } }; template< typename TGraph, typename RankDatum, typename ResidualDatum> struct PageRankWork { template<typename WorkSource> __device__ static void work( const WorkSource& work_source, groute::dev::CircularWorklist<local_work_t>& rwl_in, groute::dev::CircularWorklist<remote_work_t>& rwl_out, const TGraph& graph, RankDatum& current_ranks, ResidualDatum& residual ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.get_work(i); rank_t res = atomicExch(residual.get_item_ptr(node), 0); if (res == 0) continue; // might happen if work_source has duplicates current_ranks[node] += res; index_t begin_edge = graph.begin_edge(node), end_edge = graph.end_edge(node), out_degree = end_edge - begin_edge; if (out_degree == 0) continue; rank_t update = res * ALPHA / out_degree; for (index_t edge = begin_edge; edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); rank_t prev = atomicAdd(residual.get_item_ptr(dest), update); if (graph.owns(dest)) { if (prev + update > EPSILON && prev <= EPSILON) { rwl_in.prepend_warp(dest); } } else { if (prev == 0) // no EPSILON check for remote nodes { rwl_out.append_warp( RankData(dest, atomicExch(residual.get_item_ptr(dest), 0))); } } } } } }; template< typename TGraph, typename RankDatum, typename ResidualDatum> __global__ void PageRankFusedInit(TGraph graph, RankDatum current_ranks, ResidualDatum residual, groute::dev::CircularWorklist<local_work_t> rwl_in, // prepending work here groute::dev::CircularWorklist<remote_work_t> rwl_out, // appending work here volatile int* host_high_work_counter, volatile int* host_low_work_counter, volatile int * send_signal_ptr, cub::GridBarrier gbar) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; index_t start_node = graph.owned_start_node(); index_t end_node = start_node + graph.owned_nnodes(); // Do init step 1 // for (index_t node = start_node + tid; node < end_node; node += nthreads) { current_ranks[node] = 1.0 - ALPHA; index_t begin_edge = graph.begin_edge(node), end_edge = graph.end_edge(node), out_degree = end_edge - begin_edge; if (out_degree == 0) continue; rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree; for (index_t edge = begin_edge; edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); if (graph.owns(dest)) { atomicAdd(residual.get_item_ptr(dest), update); } else // we only append remote nodes, since all owned nodes are processed at step 2 { // Write directly to remote out without atomics rwl_out.append_warp(RankData(dest, update)); } } } gbar.Sync(); int prev_start; // Transmit work if (GTID == 0) { uint32_t remote_work_count = rwl_out.get_alloc_count_and_sync(); if (remote_work_count > 0) IncreaseHostFlag(send_signal_ptr, remote_work_count); prev_start = rwl_in.get_start(); } gbar.Sync(); // Do init step 2 // PageRankWork<TGraph, RankDatum, ResidualDatum>::work( groute::dev::WorkSourceRange<index_t>( graph.owned_start_node(), graph.owned_nnodes()), rwl_in, rwl_out, graph, current_ranks, residual ); gbar.Sync(); // Transmit and report work if (GTID == 0) { uint32_t remote_work_count = rwl_out.get_alloc_count_and_sync(); if (remote_work_count > 0) IncreaseHostFlag(send_signal_ptr, remote_work_count); __threadfence(); // Report work *host_high_work_counter = rwl_in.get_start_diff(prev_start) - graph.owned_nnodes(); *host_low_work_counter = 0; } } struct SplitOps { private: groute::graphs::dev::CSRGraphSeg m_graph_seg; groute::graphs::dev::GraphDatum<rank_t> m_residual; public: template<typename...UnusedData> SplitOps( const groute::graphs::dev::CSRGraphSeg& graph_seg, const groute::graphs::dev::GraphDatum<rank_t>& residual, const groute::graphs::dev::GraphDatumSeg<rank_t>& current_ranks, UnusedData&... data) : m_graph_seg(graph_seg), m_residual(residual) { } SplitOps( const groute::graphs::dev::CSRGraphSeg& graph_seg, const groute::graphs::dev::GraphDatum<rank_t>& residual) : m_graph_seg(graph_seg), m_residual(residual) { } __device__ __forceinline__ groute::opt::SplitFlags on_receive(const remote_work_t& work) { if (m_graph_seg.owns(work.node)) { rank_t prev = atomicAdd(m_residual.get_item_ptr(work.node), work.rank); return (prev + work.rank > EPSILON && prev < EPSILON) ? groute::opt::SF_Take : groute::opt::SF_None; } return groute::opt::SF_Pass; } __device__ __forceinline__ bool is_high_prio(const local_work_t& work, const rank_t& global_prio) { return true; // NOTE: Can soft-priority be supported for PR? } __device__ __forceinline__ groute::opt::SplitFlags on_send(local_work_t work) { return (m_graph_seg.owns(work)) ? groute::opt::SF_Take : groute::opt::SF_Pass; } __device__ __forceinline__ remote_work_t pack(local_work_t work) { return RankData(work, atomicExch(m_residual.get_item_ptr(work), 0)); } __device__ __forceinline__ local_work_t unpack(const remote_work_t& work) { return work.node; } }; /* * The per-device Page Rank problem */ template< typename TGraph, template <typename> class ResidualDatum, template <typename> class RankDatum> struct FusedProblem { TGraph m_graph; ResidualDatum<rank_t> m_residual; RankDatum<rank_t> m_current_ranks; typedef PageRankWork<TGraph, RankDatum<rank_t>, ResidualDatum<rank_t>> WorkType; typedef PageRankWorkNP<TGraph, RankDatum<rank_t>, ResidualDatum<rank_t>> WorkTypeNP; FusedProblem( const TGraph& graph, const ResidualDatum<rank_t>& residual, const RankDatum<rank_t>& current_ranks) : m_graph(graph), m_residual(residual), m_current_ranks(current_ranks) { } // Initial init. Called before a global CPU+GPU barrier void Init(groute::Stream& stream) const { GROUTE_CUDA_CHECK( hipMemsetAsync( m_residual.data_ptr, 0, m_residual.size * sizeof(rank_t), stream.cuda_stream)); } bool DoFusedInit(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low, groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out, int fused_chunk_size, rank_t global_prio, volatile int *high_work_counter, volatile int *low_work_counter, uint32_t *kernel_internal_counter, volatile int *send_signal_ptr, cub::GridBarrierLifetime& barrier_lifetime, dim3 grid_dims, dim3 block_dims, groute::Stream& stream) { hipLaunchKernelGGL(( PageRankFusedInit) , dim3(grid_dims), dim3(block_dims), 0, stream.cuda_stream , m_graph, m_current_ranks, m_residual, rwl_in->DeviceObject(), rwl_out->DeviceObject(), high_work_counter, low_work_counter, send_signal_ptr, barrier_lifetime ); return true; } void DoFusedWork(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low, groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out, int fused_chunk_size, rank_t global_prio, volatile int *high_work_counter, volatile int *low_work_counter, uint32_t *kernel_internal_counter, volatile int *send_signal_ptr, cub::GridBarrierLifetime& barrier_lifetime, dim3 grid_dims, dim3 block_dims, groute::Stream& stream) { if (FLAGS_iteration_fusion) { if (FLAGS_cta_np) { groute::FusedWork < groute::NeverStop, local_work_t, remote_work_t, rank_t, SplitOps, WorkTypeNP, TGraph, RankDatum<rank_t>, ResidualDatum<rank_t> > << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( lwl_high->DeviceObject(), lwl_low->DeviceObject(), rwl_in->DeviceObject(), rwl_out->DeviceObject(), fused_chunk_size, global_prio, high_work_counter, low_work_counter, kernel_internal_counter, send_signal_ptr, barrier_lifetime, pr::opt::SplitOps(m_graph, m_residual), m_graph, m_current_ranks, m_residual ); } else { groute::FusedWork < groute::NeverStop, local_work_t, remote_work_t, rank_t, SplitOps, WorkType, TGraph, RankDatum<rank_t>, ResidualDatum<rank_t> > << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( lwl_high->DeviceObject(), lwl_low->DeviceObject(), rwl_in->DeviceObject(), rwl_out->DeviceObject(), fused_chunk_size, global_prio, high_work_counter, low_work_counter, kernel_internal_counter, send_signal_ptr, barrier_lifetime, pr::opt::SplitOps(m_graph, m_residual), m_graph, m_current_ranks, m_residual ); } } else { if (FLAGS_cta_np) { groute::FusedWork < groute::RunNTimes<1>, local_work_t, remote_work_t, rank_t, SplitOps, WorkTypeNP, TGraph, RankDatum<rank_t>, ResidualDatum<rank_t> > << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( lwl_high->DeviceObject(), lwl_low->DeviceObject(), rwl_in->DeviceObject(), rwl_out->DeviceObject(), fused_chunk_size, global_prio, high_work_counter, low_work_counter, kernel_internal_counter, send_signal_ptr, barrier_lifetime, pr::opt::SplitOps(m_graph, m_residual), m_graph, m_current_ranks, m_residual ); } else { groute::FusedWork < groute::RunNTimes<1>, local_work_t, remote_work_t, rank_t, SplitOps, WorkType, TGraph, RankDatum<rank_t>, ResidualDatum<rank_t> > << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( lwl_high->DeviceObject(), lwl_low->DeviceObject(), rwl_in->DeviceObject(), rwl_out->DeviceObject(), fused_chunk_size, global_prio, high_work_counter, low_work_counter, kernel_internal_counter, send_signal_ptr, barrier_lifetime, pr::opt::SplitOps(m_graph, m_residual), m_graph, m_current_ranks, m_residual ); } } } }; struct Algo { static const char* NameLower() { return "pr"; } static const char* Name() { return "PR"; } static void Init( groute::graphs::traversal::Context<pr::opt::Algo>& context, groute::graphs::multi::CSRGraphAllocator& graph_manager, groute::router::Router<remote_work_t>& worklist_router, groute::opt::DistributedWorklist<local_work_t, remote_work_t, SplitOps>& distributed_worklist) { distributed_worklist.ReportHighPrioWork(context.host_graph.nnodes, 0, "Host", groute::Device::Host, true); // PR starts with all nodes } template< typename TGraphAllocator, template <typename> class ResidualDatum, template <typename> class RankDatum, typename...UnusedData> static std::vector<rank_t> Gather( TGraphAllocator& graph_allocator, ResidualDatum<rank_t>& residual, RankDatum<rank_t>& current_ranks, UnusedData&... data) { graph_allocator.GatherDatum(current_ranks); return current_ranks.GetHostData(); } template< template <typename> class ResidualDatum, template <typename> class RankDatum, typename...UnusedData> static std::vector<rank_t> Host( groute::graphs::host::CSRGraph& graph, ResidualDatum<rank_t>& residual, RankDatum<rank_t>& current_ranks, UnusedData&... data) { return PageRankHost(graph); } static int Output(const char *file, const std::vector<rank_t>& ranks) { return PageRankOutput(file, ranks); } static int CheckErrors(std::vector<rank_t>& ranks, std::vector<rank_t>& regression) { return PageRankCheckErrors(ranks, regression); } }; } } bool TestPageRankAsyncMultiOptimized(int ngpus) { typedef groute::graphs::multi::CSRGraphAllocator GraphAllocator; typedef groute::graphs::multi::NodeOutputGlobalDatum<rank_t> ResidualDatum; typedef groute::graphs::multi::NodeOutputLocalDatum<rank_t> RankDatum; typedef pr::opt::FusedProblem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatumSeg> ProblemType; typedef groute::graphs::traversal::FusedSolver< pr::opt::Algo, ProblemType, pr::opt::local_work_t , pr::opt::remote_work_t, rank_t, pr::opt::SplitOps, groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatumSeg<rank_t>, groute::graphs::dev::GraphDatum<rank_t>> SolverType; groute::graphs::traversal::__MultiRunner__Opt__ < pr::opt::Algo, ProblemType, SolverType, pr::opt::SplitOps, pr::opt::local_work_t, pr::opt::remote_work_t, ResidualDatum, RankDatum > runner; ResidualDatum residual; RankDatum current_ranks; return runner(ngpus, residual, current_ranks); }
855e86a8939fd3349548f156dbe83f55b94e4b9c.cu
// Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <gflags/gflags.h> #include <cub/grid/grid_barrier.cuh> #include <groute/event_pool.h> #include <groute/fused_distributed_worklist.h> #include <groute/fused_worker.h> #include <groute/cta_work.h> #include <groute/graphs/csr_graph.h> #include <groute/graphs/traversal_algo.h> #include <groute/graphs/fused_solver.h> #include <utils/parser.h> #include <utils/utils.h> #include <utils/stopwatch.h> #include "pr_common.h" namespace pr { namespace opt { struct RankData { index_t node; rank_t rank; __host__ __device__ __forceinline__ RankData(index_t node, rank_t rank) : node(node), rank(rank) { } __host__ __device__ __forceinline__ RankData() : node(UINT_MAX), rank(-1.0f) { } }; typedef index_t local_work_t; typedef RankData remote_work_t; template< typename TGraph, typename RankDatum, typename ResidualDatum> struct PageRankWorkNP { template<typename WorkSource> __device__ static void work( const WorkSource& work_source, groute::dev::CircularWorklist<local_work_t>& rwl_in, groute::dev::CircularWorklist<remote_work_t>& rwl_out, const TGraph& graph, RankDatum& current_ranks, ResidualDatum& residual ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<rank_t> np_local = { 0, 0, 0.0 }; if (i < work_size) { index_t node = work_source.get_work(i); rank_t res = atomicExch(residual.get_item_ptr(node), 0); if (res > 0) { current_ranks[node] += res; np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; if (np_local.size > 0) // Just in case { rank_t update = res * ALPHA / np_local.size; np_local.meta_data = update; } } } groute::dev::CTAWorkScheduler<rank_t>::template schedule( np_local, [&graph, &residual, &rwl_in, &rwl_out](index_t edge, rank_t update) { index_t dest = graph.edge_dest(edge); rank_t prev = atomicAdd(residual.get_item_ptr(dest), update); if (graph.owns(dest)) { if (prev + update > EPSILON && prev <= EPSILON) { rwl_in.prepend_warp(dest); } } else { if (prev == 0) // no EPSILON check for remote nodes { rwl_out.append_warp( RankData(dest, atomicExch(residual.get_item_ptr(dest), 0))); } } } ); } } }; template< typename TGraph, typename RankDatum, typename ResidualDatum> struct PageRankWork { template<typename WorkSource> __device__ static void work( const WorkSource& work_source, groute::dev::CircularWorklist<local_work_t>& rwl_in, groute::dev::CircularWorklist<remote_work_t>& rwl_out, const TGraph& graph, RankDatum& current_ranks, ResidualDatum& residual ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.get_work(i); rank_t res = atomicExch(residual.get_item_ptr(node), 0); if (res == 0) continue; // might happen if work_source has duplicates current_ranks[node] += res; index_t begin_edge = graph.begin_edge(node), end_edge = graph.end_edge(node), out_degree = end_edge - begin_edge; if (out_degree == 0) continue; rank_t update = res * ALPHA / out_degree; for (index_t edge = begin_edge; edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); rank_t prev = atomicAdd(residual.get_item_ptr(dest), update); if (graph.owns(dest)) { if (prev + update > EPSILON && prev <= EPSILON) { rwl_in.prepend_warp(dest); } } else { if (prev == 0) // no EPSILON check for remote nodes { rwl_out.append_warp( RankData(dest, atomicExch(residual.get_item_ptr(dest), 0))); } } } } } }; template< typename TGraph, typename RankDatum, typename ResidualDatum> __global__ void PageRankFusedInit(TGraph graph, RankDatum current_ranks, ResidualDatum residual, groute::dev::CircularWorklist<local_work_t> rwl_in, // prepending work here groute::dev::CircularWorklist<remote_work_t> rwl_out, // appending work here volatile int* host_high_work_counter, volatile int* host_low_work_counter, volatile int * send_signal_ptr, cub::GridBarrier gbar) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; index_t start_node = graph.owned_start_node(); index_t end_node = start_node + graph.owned_nnodes(); // Do init step 1 // for (index_t node = start_node + tid; node < end_node; node += nthreads) { current_ranks[node] = 1.0 - ALPHA; index_t begin_edge = graph.begin_edge(node), end_edge = graph.end_edge(node), out_degree = end_edge - begin_edge; if (out_degree == 0) continue; rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree; for (index_t edge = begin_edge; edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); if (graph.owns(dest)) { atomicAdd(residual.get_item_ptr(dest), update); } else // we only append remote nodes, since all owned nodes are processed at step 2 { // Write directly to remote out without atomics rwl_out.append_warp(RankData(dest, update)); } } } gbar.Sync(); int prev_start; // Transmit work if (GTID == 0) { uint32_t remote_work_count = rwl_out.get_alloc_count_and_sync(); if (remote_work_count > 0) IncreaseHostFlag(send_signal_ptr, remote_work_count); prev_start = rwl_in.get_start(); } gbar.Sync(); // Do init step 2 // PageRankWork<TGraph, RankDatum, ResidualDatum>::work( groute::dev::WorkSourceRange<index_t>( graph.owned_start_node(), graph.owned_nnodes()), rwl_in, rwl_out, graph, current_ranks, residual ); gbar.Sync(); // Transmit and report work if (GTID == 0) { uint32_t remote_work_count = rwl_out.get_alloc_count_and_sync(); if (remote_work_count > 0) IncreaseHostFlag(send_signal_ptr, remote_work_count); __threadfence(); // Report work *host_high_work_counter = rwl_in.get_start_diff(prev_start) - graph.owned_nnodes(); *host_low_work_counter = 0; } } struct SplitOps { private: groute::graphs::dev::CSRGraphSeg m_graph_seg; groute::graphs::dev::GraphDatum<rank_t> m_residual; public: template<typename...UnusedData> SplitOps( const groute::graphs::dev::CSRGraphSeg& graph_seg, const groute::graphs::dev::GraphDatum<rank_t>& residual, const groute::graphs::dev::GraphDatumSeg<rank_t>& current_ranks, UnusedData&... data) : m_graph_seg(graph_seg), m_residual(residual) { } SplitOps( const groute::graphs::dev::CSRGraphSeg& graph_seg, const groute::graphs::dev::GraphDatum<rank_t>& residual) : m_graph_seg(graph_seg), m_residual(residual) { } __device__ __forceinline__ groute::opt::SplitFlags on_receive(const remote_work_t& work) { if (m_graph_seg.owns(work.node)) { rank_t prev = atomicAdd(m_residual.get_item_ptr(work.node), work.rank); return (prev + work.rank > EPSILON && prev < EPSILON) ? groute::opt::SF_Take : groute::opt::SF_None; } return groute::opt::SF_Pass; } __device__ __forceinline__ bool is_high_prio(const local_work_t& work, const rank_t& global_prio) { return true; // NOTE: Can soft-priority be supported for PR? } __device__ __forceinline__ groute::opt::SplitFlags on_send(local_work_t work) { return (m_graph_seg.owns(work)) ? groute::opt::SF_Take : groute::opt::SF_Pass; } __device__ __forceinline__ remote_work_t pack(local_work_t work) { return RankData(work, atomicExch(m_residual.get_item_ptr(work), 0)); } __device__ __forceinline__ local_work_t unpack(const remote_work_t& work) { return work.node; } }; /* * The per-device Page Rank problem */ template< typename TGraph, template <typename> class ResidualDatum, template <typename> class RankDatum> struct FusedProblem { TGraph m_graph; ResidualDatum<rank_t> m_residual; RankDatum<rank_t> m_current_ranks; typedef PageRankWork<TGraph, RankDatum<rank_t>, ResidualDatum<rank_t>> WorkType; typedef PageRankWorkNP<TGraph, RankDatum<rank_t>, ResidualDatum<rank_t>> WorkTypeNP; FusedProblem( const TGraph& graph, const ResidualDatum<rank_t>& residual, const RankDatum<rank_t>& current_ranks) : m_graph(graph), m_residual(residual), m_current_ranks(current_ranks) { } // Initial init. Called before a global CPU+GPU barrier void Init(groute::Stream& stream) const { GROUTE_CUDA_CHECK( cudaMemsetAsync( m_residual.data_ptr, 0, m_residual.size * sizeof(rank_t), stream.cuda_stream)); } bool DoFusedInit(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low, groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out, int fused_chunk_size, rank_t global_prio, volatile int *high_work_counter, volatile int *low_work_counter, uint32_t *kernel_internal_counter, volatile int *send_signal_ptr, cub::GridBarrierLifetime& barrier_lifetime, dim3 grid_dims, dim3 block_dims, groute::Stream& stream) { PageRankFusedInit <<< grid_dims, block_dims, 0, stream.cuda_stream >>> ( m_graph, m_current_ranks, m_residual, rwl_in->DeviceObject(), rwl_out->DeviceObject(), high_work_counter, low_work_counter, send_signal_ptr, barrier_lifetime ); return true; } void DoFusedWork(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low, groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out, int fused_chunk_size, rank_t global_prio, volatile int *high_work_counter, volatile int *low_work_counter, uint32_t *kernel_internal_counter, volatile int *send_signal_ptr, cub::GridBarrierLifetime& barrier_lifetime, dim3 grid_dims, dim3 block_dims, groute::Stream& stream) { if (FLAGS_iteration_fusion) { if (FLAGS_cta_np) { groute::FusedWork < groute::NeverStop, local_work_t, remote_work_t, rank_t, SplitOps, WorkTypeNP, TGraph, RankDatum<rank_t>, ResidualDatum<rank_t> > << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( lwl_high->DeviceObject(), lwl_low->DeviceObject(), rwl_in->DeviceObject(), rwl_out->DeviceObject(), fused_chunk_size, global_prio, high_work_counter, low_work_counter, kernel_internal_counter, send_signal_ptr, barrier_lifetime, pr::opt::SplitOps(m_graph, m_residual), m_graph, m_current_ranks, m_residual ); } else { groute::FusedWork < groute::NeverStop, local_work_t, remote_work_t, rank_t, SplitOps, WorkType, TGraph, RankDatum<rank_t>, ResidualDatum<rank_t> > << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( lwl_high->DeviceObject(), lwl_low->DeviceObject(), rwl_in->DeviceObject(), rwl_out->DeviceObject(), fused_chunk_size, global_prio, high_work_counter, low_work_counter, kernel_internal_counter, send_signal_ptr, barrier_lifetime, pr::opt::SplitOps(m_graph, m_residual), m_graph, m_current_ranks, m_residual ); } } else { if (FLAGS_cta_np) { groute::FusedWork < groute::RunNTimes<1>, local_work_t, remote_work_t, rank_t, SplitOps, WorkTypeNP, TGraph, RankDatum<rank_t>, ResidualDatum<rank_t> > << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( lwl_high->DeviceObject(), lwl_low->DeviceObject(), rwl_in->DeviceObject(), rwl_out->DeviceObject(), fused_chunk_size, global_prio, high_work_counter, low_work_counter, kernel_internal_counter, send_signal_ptr, barrier_lifetime, pr::opt::SplitOps(m_graph, m_residual), m_graph, m_current_ranks, m_residual ); } else { groute::FusedWork < groute::RunNTimes<1>, local_work_t, remote_work_t, rank_t, SplitOps, WorkType, TGraph, RankDatum<rank_t>, ResidualDatum<rank_t> > << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( lwl_high->DeviceObject(), lwl_low->DeviceObject(), rwl_in->DeviceObject(), rwl_out->DeviceObject(), fused_chunk_size, global_prio, high_work_counter, low_work_counter, kernel_internal_counter, send_signal_ptr, barrier_lifetime, pr::opt::SplitOps(m_graph, m_residual), m_graph, m_current_ranks, m_residual ); } } } }; struct Algo { static const char* NameLower() { return "pr"; } static const char* Name() { return "PR"; } static void Init( groute::graphs::traversal::Context<pr::opt::Algo>& context, groute::graphs::multi::CSRGraphAllocator& graph_manager, groute::router::Router<remote_work_t>& worklist_router, groute::opt::DistributedWorklist<local_work_t, remote_work_t, SplitOps>& distributed_worklist) { distributed_worklist.ReportHighPrioWork(context.host_graph.nnodes, 0, "Host", groute::Device::Host, true); // PR starts with all nodes } template< typename TGraphAllocator, template <typename> class ResidualDatum, template <typename> class RankDatum, typename...UnusedData> static std::vector<rank_t> Gather( TGraphAllocator& graph_allocator, ResidualDatum<rank_t>& residual, RankDatum<rank_t>& current_ranks, UnusedData&... data) { graph_allocator.GatherDatum(current_ranks); return current_ranks.GetHostData(); } template< template <typename> class ResidualDatum, template <typename> class RankDatum, typename...UnusedData> static std::vector<rank_t> Host( groute::graphs::host::CSRGraph& graph, ResidualDatum<rank_t>& residual, RankDatum<rank_t>& current_ranks, UnusedData&... data) { return PageRankHost(graph); } static int Output(const char *file, const std::vector<rank_t>& ranks) { return PageRankOutput(file, ranks); } static int CheckErrors(std::vector<rank_t>& ranks, std::vector<rank_t>& regression) { return PageRankCheckErrors(ranks, regression); } }; } } bool TestPageRankAsyncMultiOptimized(int ngpus) { typedef groute::graphs::multi::CSRGraphAllocator GraphAllocator; typedef groute::graphs::multi::NodeOutputGlobalDatum<rank_t> ResidualDatum; typedef groute::graphs::multi::NodeOutputLocalDatum<rank_t> RankDatum; typedef pr::opt::FusedProblem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatumSeg> ProblemType; typedef groute::graphs::traversal::FusedSolver< pr::opt::Algo, ProblemType, pr::opt::local_work_t , pr::opt::remote_work_t, rank_t, pr::opt::SplitOps, groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatumSeg<rank_t>, groute::graphs::dev::GraphDatum<rank_t>> SolverType; groute::graphs::traversal::__MultiRunner__Opt__ < pr::opt::Algo, ProblemType, SolverType, pr::opt::SplitOps, pr::opt::local_work_t, pr::opt::remote_work_t, ResidualDatum, RankDatum > runner; ResidualDatum residual; RankDatum current_ranks; return runner(ngpus, residual, current_ranks); }
d5f0bf7c02f8e5c14e81ee11b25795ac8c729328.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <string> #include <math.h> #include <chrono> const int WHITE = 1; const int BLACK = -1; const int EMPTY = 0; const int LEFT = -1; const int STRAIGHT = 0; const int RIGHT = 1; //int turnCount = 0; //int gameCount = 0; ///////////////////////// Helpers //////////////////////////////////// __device__ __host__ void writeValueToFieldList(int* list, int index, int x, int y, int xDim, int yDim, int newValue) { *(list + xDim * yDim * index + x * yDim + y) = newValue; } __device__ __host__ int* getFieldFromList(int* list, int index, int xDim, int yDim) { return list + xDim * yDim * index; } __device__ __host__ int getValueAt(int x, int y, int* field, int xDim, int yDim) { return field[x * yDim + y]; } __device__ __host__ void setValueAt(int x, int y, int newValue, int* field, int xDim, int yDim) { field[x * yDim + y]; = newValue; } __device__ __host__ void copyArrayTo(int* field, int* loc, int xDim, int yDim) { for (int x = 0; x < xDim; x++) { for (int y = 0; y < yDim; y++) { setValueAt(x, y, getValueAt(x, y, field, xDim, yDim), loc, xDim, yDim); } } } __device__ __host__ int* addFieldToList(int* list, int listSize, int* field, int xDim, int yDim) { //int* newList = (int*) realloc(list, (listSize + 1) * xDim * yDim * sizeof(int)); int* newList = (int*) malloc((listSize + 1) * xDim * yDim * sizeof(int)); memcpy(newList, list, listSize * xDim * yDim * sizeof(int)); free(list); int* tmp = getFieldFromList(list, listSize, xDim, yDim); copyArrayTo(field, tmp, xDim, yDim); return newList; } __device__ __host__ int* arrayCopy(int* array, int xDim, int yDim) { int* copied = (int*) malloc(xDim * yDim * sizeof(int)); for (int x = 0; x < xDim; x++) { for (int y = 0; y < yDim; y++) { setValueAt(x, y, getValueAt(x, y, array, xDim, yDim), copied, xDim, yDim); } } return copied; } __device__ __host__ void printTabs(int count) { for (int i = 0; i < count ; i++) { printf("| "); } } __device__ __host__ void printField(int* field, int xDim, int yDim, int tabs) { for (int x = 0; x < xDim; x++) { printTabs(tabs); printf("|"); for (int y = 0; y < yDim; y++) { int position = getValueAt(x, y, field, xDim, yDim); printf(" % d", position); } printf(" |\n"); } } bool fieldEquals(int* fieldOne, int* fieldTwo, int xDim, int yDim) { for (int x = 0; x < xDim; x++) { for (int y = 0; y < yDim; y++) { if (getValueAt(x, y,fieldOne, xDim, yDim) != getValueAt(x, y, fieldTwo, xDim, yDim)) return false; } } return true; } ////////////////////////////////////////////////////////////////////////////// // Returns true if the given player can win __device__ bool executeTurn(int* field, int xDim, int yDim, int player, int depth) { /* if (depth <= 3) { printTabs(depth); printf("%d's turn:\n", player); printField(field, xDim, yDim, depth); } */ printf("Hey 1\n"); int moveCount = 0; for (int x = 0; x < xDim; x++) { for (int y = 0; y < yDim; y++) { if (getValueAt(x, y, field, xDim, yDim) == player) { printf("Hey 2\n"); // Try all movements for (int dir = -1; dir <= 1; dir++) { int xNew = x - player; int yNew = y + dir; // Check if the new position is inside the field boundaries if (xNew >= 0 && xNew < xDim && yNew >= 0 && yNew < yDim) { // Store state of the target position int newPosition = getValueAt(xNew, yNew, field, xDim, yDim); printf("Hey 3\n"); // Check if the turn is legal int moveAllowed = abs(dir) + (newPosition * player); // Zero if you move straight and the target position is empty // or you move to the right/left and the target position is occupied by an enemy // True if moveAllowed == 0 if (!moveAllowed) { //moveCount++; //printTabs(depth); //printf("%d moving from %d,%d to %d,%d\n\n", player, x, y, xNew, yNew); // Set new positions setValueAt(x, y, EMPTY, field, xDim, yDim); setValueAt(xNew, yNew, player, field, xDim, yDim); printf("Hey 4\n"); // Check if a win condition is reached if (xNew == (1 - player)/2 * (xDim - 1)) { // Revert changes setValueAt(x, y, player, field, xDim, yDim); setValueAt(xNew, yNew, newPosition, field, xDim, yDim); printf("Hey 5\n"); return true; } // Check if the enemy cannot win after this turn, then return true: If you execute this turn, you will win bool canEnemyWin = executeTurn(field, xDim, yDim, -player, depth + 1); // Revert changes setValueAt(x, y, player, field, xDim, yDim); setValueAt(xNew, yNew, newPosition, field, xDim, yDim); printf("Hey 6\n"); if (!canEnemyWin) { //printTabs(depth); //printf("--> %d wins\n\n", player); return true; } } } } } } } //if (moveCount == 0) { //gameCount++; //turnCount += depth; //} //printf("\n\n"); printf("Hey 7\n"); return false; } __global__ void kernel(int* fields, int fieldCount, int xDim, int yDim, int player, bool* result) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < fieldCount; i += stride) { *(result + i) = false; bool canWin = executeTurn(getFieldFromList(fields, i, xDim, yDim), xDim, yDim, player, 0); if (canWin) { *(result + i) = true; printf("Wins: %d\n", i); } else { *(result + i) = false; } } } int main(void) { /* int xDim = 3, yDim = 3; int* field = (int*) malloc(xDim * yDim * sizeof(int)); memset(field, 0, xDim * yDim * sizeof(int)); // Init array with zeros // Init player positions -> one row per player for (int i = 0; i < yDim; i++) { setValueAt(0, i, BLACK, field, xDim, yDim); //setValueAt(1, i, BLACK, field, xDim, yDim); //setValueAt(xDim - 2, i, WHITE, field, xDim, yDim); setValueAt(xDim - 1, i, WHITE, field, xDim, yDim); } printField(field, xDim, yDim, 0); printf("\n\n"); */ /* int player = WHITE; auto startTime = std::chrono::high_resolution_clock::now(); bool result = executeTurn(field, xDim, yDim, WHITE, 1); auto endTime = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count(); // Outputs char* playerText = (player == WHITE) ? "White" : "Black"; char* resultText = result ? " wins" : " loses"; std::cout << std::endl << std::endl; std::cout << playerText << resultText << std::endl << std::endl; std::cout << "Metrics:" << std::endl; std::cout << " Elapsed time: " << duration << " microseconds" << std::endl; std::cout << " Game count: " << gameCount << std::endl; std::cout << " Turn count: " << turnCount << std::endl; // Delete field free(field); */ /* int turnCount = 0; int player = WHITE; int* turnList = determineTurns(field, xDim, yDim, player, &turnCount); for (int i = 0; i < 1; i++) { int* newList = (int*) malloc(1); int newListSize = 0; for (int j = 0; j < turnCount; j++) { std::cout << "Iteration" << std::endl; int count; int* tempField = getFieldFromList(turnList, j, xDim, yDim); std::cout << "Field created" << std::endl; printField(tempField, xDim, yDim, 1); int* turns = determineTurns(tempField, xDim, yDim, player, &count); std::cout << "I'm here!" << std::endl; int* tempList = (int*) malloc(1); int tempListSize = 0; for (int k = 0; k < newListSize + count; k++) { if (k < newListSize) { addFieldToList(tempList, k, getFieldFromList(newList, k, xDim, yDim), xDim, yDim); } else { addFieldToList(tempList, k, getFieldFromList(turns, k, xDim, yDim), xDim, yDim); } tempListSize++; } newList = tempList; newListSize = tempListSize; //free(tempList); std::cout << "I'm here! 2" << std::endl; } turnList = newList; free(newList); turnCount = newListSize; player = -player; std::cout << "I'm here! 3" << std::endl; } std::cout << "Turn count: " << turnCount << std::endl; for (int i = 0; i < turnCount; i++) { std::cout << std::endl; printField(getFieldFromList(turnList, i, xDim, yDim), xDim, yDim, 0); } */ std::ifstream file("output.txt"); if (!file.is_open()) { return -1; } int xDim = file.get() - '0'; file.get(); int yDim = file.get() - '0'; //file.get(); int* turnList = (int*) malloc(1); int totalFieldCount = 0; char pos; int x = 0, y = 0; int* field = turnList; while (file.get(pos)) { if (pos == ';') { turnList = (int*) realloc(turnList, (totalFieldCount + 1) * xDim * yDim * sizeof(int)); x = 0; y = 0; field = turnList + totalFieldCount * xDim * yDim; totalFieldCount++; } else { if (pos == '0') { setValueAt(x, y, EMPTY, field, xDim, yDim); } else if (pos == 'S') { setValueAt(x, y, BLACK, field, xDim, yDim); } else if (pos == 'W') { setValueAt(x, y, WHITE, field, xDim, yDim); } y++; if (y >= yDim) { x++; y = 0; } //printField(field, xDim, yDim, 0); } } file.close(); // Supposing there's no ';' at the end of the file printf("Field count: %d\n", totalFieldCount); /* for (int i = 0; i < totalFieldCount; i++) { std::cout << std::endl; printField(getFieldFromList(turnList, i, xDim, yDim), xDim, yDim, 0); } */ int* deviceFieldList; hipMallocManaged(&deviceFieldList, totalFieldCount * xDim * yDim * sizeof(int)); hipMemcpy(deviceFieldList, turnList, totalFieldCount * xDim * yDim * sizeof(int), hipMemcpyHostToDevice); /* for (int i = 0; i < totalFieldCount; i++) { std::cout << std::endl; printField(getFieldFromList(deviceFieldList, i, xDim, yDim), xDim, yDim, 0); } */ bool* result; hipMallocManaged(&result, totalFieldCount * sizeof(bool)); hipMemset(result, false, totalFieldCount * sizeof(bool)); int blockSize = 256; int numBlocks = (totalFieldCount + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernel), dim3(numBlocks), dim3(blockSize), 0, 0, deviceFieldList, totalFieldCount, xDim, yDim, WHITE, result); hipDeviceSynchronize(); for (int i = 0; i < totalFieldCount; i++) { int r = *(result + i); printf("%d: %s\n", i, r ? "true" : "false"); } //printf("%d: %s\n", i, *result ? "true" : "false"); hipFree(deviceFieldList); hipFree(result); free(turnList); } int* determineTurns(int* field, int xDim, int yDim, int player, int* count) { printf("Before list init\n"); int* list = (int*) malloc(1); printf("After after init\n"); int i = 0; for (int x = 0; x < xDim; x++) { for (int y = 0; y < yDim; y++) { if (getValueAt(x, y, field, xDim, yDim) == player) { // Try all movements for (int dir = -1; dir <= 1; dir++) { int xNew = x - player; int yNew = y + dir; // Check if the new position is inside the field boundaries if (xNew >= 0 && xNew < xDim && yNew >= 0 && yNew < yDim) { // Store state of the target position int newPosition = getValueAt(xNew, yNew, field, xDim, yDim); // Check if the turn is legal int moveAllowed = abs(dir) + (newPosition * player); // Zero if you move straight and the target position is empty // or you move to the right/left and the target position is occupied by an enemy // True if moveAllowed == 0 if (!moveAllowed) { printf("Moving\n"); // Set new positions setValueAt(x, y, EMPTY, field, xDim, yDim); setValueAt(xNew, yNew, player, field, xDim, yDim); printf("Positions set\n"); list = (int*) realloc(list, (i+1) * xDim * yDim * sizeof(int)); printf("Memory allocated\n"); copyArrayTo(field, getFieldFromList(list, i, xDim, yDim), xDim, yDim); i++; printf("Array copied\n"); // Revert changes setValueAt(x, y, player, field, xDim, yDim); setValueAt(xNew, yNew, newPosition, field, xDim, yDim); printf("Reverted changes\n\n"); } } } } } } *count = i; return list; }
d5f0bf7c02f8e5c14e81ee11b25795ac8c729328.cu
#include <iostream> #include <fstream> #include <string> #include <math.h> #include <chrono> const int WHITE = 1; const int BLACK = -1; const int EMPTY = 0; const int LEFT = -1; const int STRAIGHT = 0; const int RIGHT = 1; //int turnCount = 0; //int gameCount = 0; ///////////////////////// Helpers //////////////////////////////////// __device__ __host__ void writeValueToFieldList(int* list, int index, int x, int y, int xDim, int yDim, int newValue) { *(list + xDim * yDim * index + x * yDim + y) = newValue; } __device__ __host__ int* getFieldFromList(int* list, int index, int xDim, int yDim) { return list + xDim * yDim * index; } __device__ __host__ int getValueAt(int x, int y, int* field, int xDim, int yDim) { return field[x * yDim + y]; } __device__ __host__ void setValueAt(int x, int y, int newValue, int* field, int xDim, int yDim) { field[x * yDim + y]; = newValue; } __device__ __host__ void copyArrayTo(int* field, int* loc, int xDim, int yDim) { for (int x = 0; x < xDim; x++) { for (int y = 0; y < yDim; y++) { setValueAt(x, y, getValueAt(x, y, field, xDim, yDim), loc, xDim, yDim); } } } __device__ __host__ int* addFieldToList(int* list, int listSize, int* field, int xDim, int yDim) { //int* newList = (int*) realloc(list, (listSize + 1) * xDim * yDim * sizeof(int)); int* newList = (int*) malloc((listSize + 1) * xDim * yDim * sizeof(int)); memcpy(newList, list, listSize * xDim * yDim * sizeof(int)); free(list); int* tmp = getFieldFromList(list, listSize, xDim, yDim); copyArrayTo(field, tmp, xDim, yDim); return newList; } __device__ __host__ int* arrayCopy(int* array, int xDim, int yDim) { int* copied = (int*) malloc(xDim * yDim * sizeof(int)); for (int x = 0; x < xDim; x++) { for (int y = 0; y < yDim; y++) { setValueAt(x, y, getValueAt(x, y, array, xDim, yDim), copied, xDim, yDim); } } return copied; } __device__ __host__ void printTabs(int count) { for (int i = 0; i < count ; i++) { printf("| "); } } __device__ __host__ void printField(int* field, int xDim, int yDim, int tabs) { for (int x = 0; x < xDim; x++) { printTabs(tabs); printf("|"); for (int y = 0; y < yDim; y++) { int position = getValueAt(x, y, field, xDim, yDim); printf(" % d", position); } printf(" |\n"); } } bool fieldEquals(int* fieldOne, int* fieldTwo, int xDim, int yDim) { for (int x = 0; x < xDim; x++) { for (int y = 0; y < yDim; y++) { if (getValueAt(x, y,fieldOne, xDim, yDim) != getValueAt(x, y, fieldTwo, xDim, yDim)) return false; } } return true; } ////////////////////////////////////////////////////////////////////////////// // Returns true if the given player can win __device__ bool executeTurn(int* field, int xDim, int yDim, int player, int depth) { /* if (depth <= 3) { printTabs(depth); printf("%d's turn:\n", player); printField(field, xDim, yDim, depth); } */ printf("Hey 1\n"); int moveCount = 0; for (int x = 0; x < xDim; x++) { for (int y = 0; y < yDim; y++) { if (getValueAt(x, y, field, xDim, yDim) == player) { printf("Hey 2\n"); // Try all movements for (int dir = -1; dir <= 1; dir++) { int xNew = x - player; int yNew = y + dir; // Check if the new position is inside the field boundaries if (xNew >= 0 && xNew < xDim && yNew >= 0 && yNew < yDim) { // Store state of the target position int newPosition = getValueAt(xNew, yNew, field, xDim, yDim); printf("Hey 3\n"); // Check if the turn is legal int moveAllowed = abs(dir) + (newPosition * player); // Zero if you move straight and the target position is empty // or you move to the right/left and the target position is occupied by an enemy // True if moveAllowed == 0 if (!moveAllowed) { //moveCount++; //printTabs(depth); //printf("%d moving from %d,%d to %d,%d\n\n", player, x, y, xNew, yNew); // Set new positions setValueAt(x, y, EMPTY, field, xDim, yDim); setValueAt(xNew, yNew, player, field, xDim, yDim); printf("Hey 4\n"); // Check if a win condition is reached if (xNew == (1 - player)/2 * (xDim - 1)) { // Revert changes setValueAt(x, y, player, field, xDim, yDim); setValueAt(xNew, yNew, newPosition, field, xDim, yDim); printf("Hey 5\n"); return true; } // Check if the enemy cannot win after this turn, then return true: If you execute this turn, you will win bool canEnemyWin = executeTurn(field, xDim, yDim, -player, depth + 1); // Revert changes setValueAt(x, y, player, field, xDim, yDim); setValueAt(xNew, yNew, newPosition, field, xDim, yDim); printf("Hey 6\n"); if (!canEnemyWin) { //printTabs(depth); //printf("--> %d wins\n\n", player); return true; } } } } } } } //if (moveCount == 0) { //gameCount++; //turnCount += depth; //} //printf("\n\n"); printf("Hey 7\n"); return false; } __global__ void kernel(int* fields, int fieldCount, int xDim, int yDim, int player, bool* result) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < fieldCount; i += stride) { *(result + i) = false; bool canWin = executeTurn(getFieldFromList(fields, i, xDim, yDim), xDim, yDim, player, 0); if (canWin) { *(result + i) = true; printf("Wins: %d\n", i); } else { *(result + i) = false; } } } int main(void) { /* int xDim = 3, yDim = 3; int* field = (int*) malloc(xDim * yDim * sizeof(int)); memset(field, 0, xDim * yDim * sizeof(int)); // Init array with zeros // Init player positions -> one row per player for (int i = 0; i < yDim; i++) { setValueAt(0, i, BLACK, field, xDim, yDim); //setValueAt(1, i, BLACK, field, xDim, yDim); //setValueAt(xDim - 2, i, WHITE, field, xDim, yDim); setValueAt(xDim - 1, i, WHITE, field, xDim, yDim); } printField(field, xDim, yDim, 0); printf("\n\n"); */ /* int player = WHITE; auto startTime = std::chrono::high_resolution_clock::now(); bool result = executeTurn(field, xDim, yDim, WHITE, 1); auto endTime = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count(); // Outputs char* playerText = (player == WHITE) ? "White" : "Black"; char* resultText = result ? " wins" : " loses"; std::cout << std::endl << std::endl; std::cout << playerText << resultText << std::endl << std::endl; std::cout << "Metrics:" << std::endl; std::cout << " Elapsed time: " << duration << " microseconds" << std::endl; std::cout << " Game count: " << gameCount << std::endl; std::cout << " Turn count: " << turnCount << std::endl; // Delete field free(field); */ /* int turnCount = 0; int player = WHITE; int* turnList = determineTurns(field, xDim, yDim, player, &turnCount); for (int i = 0; i < 1; i++) { int* newList = (int*) malloc(1); int newListSize = 0; for (int j = 0; j < turnCount; j++) { std::cout << "Iteration" << std::endl; int count; int* tempField = getFieldFromList(turnList, j, xDim, yDim); std::cout << "Field created" << std::endl; printField(tempField, xDim, yDim, 1); int* turns = determineTurns(tempField, xDim, yDim, player, &count); std::cout << "I'm here!" << std::endl; int* tempList = (int*) malloc(1); int tempListSize = 0; for (int k = 0; k < newListSize + count; k++) { if (k < newListSize) { addFieldToList(tempList, k, getFieldFromList(newList, k, xDim, yDim), xDim, yDim); } else { addFieldToList(tempList, k, getFieldFromList(turns, k, xDim, yDim), xDim, yDim); } tempListSize++; } newList = tempList; newListSize = tempListSize; //free(tempList); std::cout << "I'm here! 2" << std::endl; } turnList = newList; free(newList); turnCount = newListSize; player = -player; std::cout << "I'm here! 3" << std::endl; } std::cout << "Turn count: " << turnCount << std::endl; for (int i = 0; i < turnCount; i++) { std::cout << std::endl; printField(getFieldFromList(turnList, i, xDim, yDim), xDim, yDim, 0); } */ std::ifstream file("output.txt"); if (!file.is_open()) { return -1; } int xDim = file.get() - '0'; file.get(); int yDim = file.get() - '0'; //file.get(); int* turnList = (int*) malloc(1); int totalFieldCount = 0; char pos; int x = 0, y = 0; int* field = turnList; while (file.get(pos)) { if (pos == ';') { turnList = (int*) realloc(turnList, (totalFieldCount + 1) * xDim * yDim * sizeof(int)); x = 0; y = 0; field = turnList + totalFieldCount * xDim * yDim; totalFieldCount++; } else { if (pos == '0') { setValueAt(x, y, EMPTY, field, xDim, yDim); } else if (pos == 'S') { setValueAt(x, y, BLACK, field, xDim, yDim); } else if (pos == 'W') { setValueAt(x, y, WHITE, field, xDim, yDim); } y++; if (y >= yDim) { x++; y = 0; } //printField(field, xDim, yDim, 0); } } file.close(); // Supposing there's no ';' at the end of the file printf("Field count: %d\n", totalFieldCount); /* for (int i = 0; i < totalFieldCount; i++) { std::cout << std::endl; printField(getFieldFromList(turnList, i, xDim, yDim), xDim, yDim, 0); } */ int* deviceFieldList; cudaMallocManaged(&deviceFieldList, totalFieldCount * xDim * yDim * sizeof(int)); cudaMemcpy(deviceFieldList, turnList, totalFieldCount * xDim * yDim * sizeof(int), cudaMemcpyHostToDevice); /* for (int i = 0; i < totalFieldCount; i++) { std::cout << std::endl; printField(getFieldFromList(deviceFieldList, i, xDim, yDim), xDim, yDim, 0); } */ bool* result; cudaMallocManaged(&result, totalFieldCount * sizeof(bool)); cudaMemset(result, false, totalFieldCount * sizeof(bool)); int blockSize = 256; int numBlocks = (totalFieldCount + blockSize - 1) / blockSize; kernel<<<numBlocks, blockSize>>>(deviceFieldList, totalFieldCount, xDim, yDim, WHITE, result); cudaDeviceSynchronize(); for (int i = 0; i < totalFieldCount; i++) { int r = *(result + i); printf("%d: %s\n", i, r ? "true" : "false"); } //printf("%d: %s\n", i, *result ? "true" : "false"); cudaFree(deviceFieldList); cudaFree(result); free(turnList); } int* determineTurns(int* field, int xDim, int yDim, int player, int* count) { printf("Before list init\n"); int* list = (int*) malloc(1); printf("After after init\n"); int i = 0; for (int x = 0; x < xDim; x++) { for (int y = 0; y < yDim; y++) { if (getValueAt(x, y, field, xDim, yDim) == player) { // Try all movements for (int dir = -1; dir <= 1; dir++) { int xNew = x - player; int yNew = y + dir; // Check if the new position is inside the field boundaries if (xNew >= 0 && xNew < xDim && yNew >= 0 && yNew < yDim) { // Store state of the target position int newPosition = getValueAt(xNew, yNew, field, xDim, yDim); // Check if the turn is legal int moveAllowed = abs(dir) + (newPosition * player); // Zero if you move straight and the target position is empty // or you move to the right/left and the target position is occupied by an enemy // True if moveAllowed == 0 if (!moveAllowed) { printf("Moving\n"); // Set new positions setValueAt(x, y, EMPTY, field, xDim, yDim); setValueAt(xNew, yNew, player, field, xDim, yDim); printf("Positions set\n"); list = (int*) realloc(list, (i+1) * xDim * yDim * sizeof(int)); printf("Memory allocated\n"); copyArrayTo(field, getFieldFromList(list, i, xDim, yDim), xDim, yDim); i++; printf("Array copied\n"); // Revert changes setValueAt(x, y, player, field, xDim, yDim); setValueAt(xNew, yNew, newPosition, field, xDim, yDim); printf("Reverted changes\n\n"); } } } } } } *count = i; return list; }